blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a11b6df5b3eab6c6aa3df119ccb976ed561e0356
|
a1be38a8108f03dfef02a471bdd3daad6f3ae9af
|
/graph_garden/cli.py
|
9f594e027175196a2af23cbf038462aaefa16b1e
|
[
"Apache-2.0"
] |
permissive
|
ldtoolkit/graph-garden
|
1bd4882d150d7ded6dbe583559071eb848b41375
|
335761ef1c0b98c644b61236561f5ac93eb5a54e
|
refs/heads/master
| 2022-12-15T13:54:06.068520
| 2020-09-19T12:23:25
| 2020-09-19T12:23:25
| 296,370,201
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
from graph_garden import arangodb
from pathlib import Path
from typing import Optional
import sys
import typer
arangodb_app = typer.Typer()
app = typer.Typer()
app.add_typer(arangodb_app, name="arangodb")
@arangodb_app.command()
def list_versions(clear_cache: bool = False):
for version in arangodb.list_versions(clear_cache=clear_cache):
print(version)
@arangodb_app.command()
def install(path: Path = arangodb.DEFAULT_INSTALL_PATH, version: Optional[str] = None):
arangodb.install(path=path, version=version)
@arangodb_app.command()
def start(
exe_path: Path = arangodb.DEFAULT_INSTALL_PATH,
data_path: Path = arangodb.DEFAULT_DATA_PATH,
connection_uri: str = arangodb.DEFAULT_CONNECTION_URI,
database: str = arangodb.SYSTEM_DATABASE,
username: str = arangodb.DEFAULT_USERNAME,
password: str = arangodb.DEFAULT_PASSWORD,
):
arangodb.start(
exe_path=exe_path,
data_path=data_path,
connection_uri=connection_uri,
database=database,
username=username,
password=password,
)
@arangodb_app.command()
def stop():
arangodb.stop()
@arangodb_app.command()
def is_running(
connection_uri: str = arangodb.DEFAULT_CONNECTION_URI,
database: str = arangodb.SYSTEM_DATABASE,
username: str = arangodb.DEFAULT_USERNAME,
password: str = arangodb.DEFAULT_PASSWORD,
):
status_code = (
0
if arangodb.is_running(
connection_uri=connection_uri,
database=database,
username=username,
password=password,
) else
1
)
sys.exit(status_code)
|
[
"infroma@gmail.com"
] |
infroma@gmail.com
|
caff9b449e11c9ecb37a05412c24abcb58c8f83e
|
6ebe740bfeee7d0782ce95d9b4256ebb430599ac
|
/hitcount/migrations/0005_auto_20180511_1446.py
|
4c3f1ff9121d0615e568bfa2f87b6ddd41f80ca8
|
[
"MIT"
] |
permissive
|
fsymonenko/django-hitcount
|
61635f6d00ee02788cae78470dd6e71d4136ae2d
|
cb7690a9a6e9a59da2a63abd6d915855963f8c20
|
refs/heads/master
| 2021-05-25T16:09:52.125038
| 2018-12-21T08:59:47
| 2018-12-21T08:59:47
| 253,820,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-05-11 11:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hitcount', '0004_auto_20180427_1254'),
]
operations = [
migrations.AlterField(
model_name='hit',
name='ip',
field=models.CharField(db_index=True, editable=False, max_length=40),
),
migrations.AlterField(
model_name='hit',
name='session',
field=models.CharField(db_index=True, editable=False, max_length=40),
),
]
|
[
"megoloman@ukr.net"
] |
megoloman@ukr.net
|
ab949e014ed3ce90f3961aac4caac1c65008e47d
|
f5a62d76dcb1534ff3100d14a03856ca630ea40f
|
/src/yamlu/__init__.py
|
20b5316a8e3a6cec15beff51bc225c55675c6af7
|
[
"Apache-2.0"
] |
permissive
|
bernhardschaefer/yamlu
|
8a9b8cbee9ecccc423edce9e18379b5dacf34201
|
c4e4aa09f3be41eef5cec6372f01dccb78c435fd
|
refs/heads/main
| 2022-11-12T07:35:03.832266
| 2022-10-26T19:11:25
| 2022-10-26T19:11:25
| 169,756,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
import sys
from yamlu.img import read_img, plot_img, plot_imgs, plot_img_paths, plot_anns
from yamlu.misc import flatten
from yamlu.np_utils import bin_stats
from yamlu.path import ls, glob
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = version(dist_name)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
finally:
del version, PackageNotFoundError
all = [
"read_img", "plot_img", "plot_imgs", "plot_img_paths", "plot_anns",
"flatten",
"bin_stats",
"ls", "glob",
]
try:
from yamlu.pytorch import isin
all.append("isin")
except ImportError:
pass
__all__ = all
|
[
"bernhard.schaefer@sap.com"
] |
bernhard.schaefer@sap.com
|
038378c030394f3b2bf97bb64ff1ad870e004712
|
cc415b328a443937016ede691badddc6ac116055
|
/Python/List Comprehensions.py
|
b303aa8b0ea992b5bbe9670aa390181498415fc6
|
[
"MIT"
] |
permissive
|
hakerarena/HackerRank-Solutions
|
0480278c5174f0676fd8ef5c78681fa6e624f823
|
4159711286231fd4eb45b65c5a44bd72f7352889
|
refs/heads/master
| 2022-04-16T18:03:24.686060
| 2020-04-13T22:16:56
| 2020-04-13T22:16:56
| 189,284,284
| 0
| 0
| null | 2020-04-13T22:16:57
| 2019-05-29T19:14:16
|
Java
|
UTF-8
|
Python
| false
| false
| 261
|
py
|
/*Code provided by hakerarenatutorials.wordpress.com*/
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
print([[i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if(i+j+k!=n)])
|
[
"noreply@github.com"
] |
hakerarena.noreply@github.com
|
22532dae8e0d9b5ab6b14479bf3acb78f77c966f
|
6689007e1eec7644a236dead507d2209cf34609d
|
/accretionStructures/linearAccDisk.py
|
6573e5d36b45f8452d49694542fb116e59106fa8
|
[] |
no_license
|
jomen93/RayTrace
|
daf3109902b5e4568800c3a6f2b50b3f4ab84126
|
0a2b23400ab76b18660f5dd6d147ff82fe9b3a23
|
refs/heads/master
| 2020-03-26T06:44:07.995683
| 2018-08-27T18:12:50
| 2018-08-27T18:12:50
| 144,619,342
| 1
| 0
| null | 2018-08-13T18:37:45
| 2018-08-13T18:37:44
| null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 19 23:16:05 2018
@author: ashcat
"""
import numpy as np
class Disk:
def __init__(self, rData, R_in = 3., R_out = 5.):
self.rData = rData
self.Shape = np.shape(self.rData)
self.R_out = R_out*np.ones(self.Shape)
self.R_in = R_in*np.ones(self.Shape)
self.m = (1.-0.)/(self.R_in - self.R_out)
self.intens = self.m * (self.rData - self.R_out)
self.image = self.intens*(self.R_in <= self.rData) * (self.rData <= self.R_out)
|
[
"eduardalexis@gmail.com"
] |
eduardalexis@gmail.com
|
c7d76c19005b0196b147820d46386e2c1d922bb2
|
1d528bb263879737eae6f483fac3590ed65f32f9
|
/Homework/Lesson 7/Homework Lesson 7 - 1.py
|
de5b5a9c1666bcfab04e9dd32402e9bf4ff369e9
|
[] |
no_license
|
DAMaas/V1D-Dion_Maas
|
323417aae2222b13f6f98c5dec8945782b1d1569
|
0de4859656a9ac9757a1cfc052623c6b7749481d
|
refs/heads/master
| 2020-03-29T12:16:18.212101
| 2019-09-29T17:11:06
| 2019-09-29T17:11:06
| 149,891,403
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
# Homework Lesson 7 - 1
def convert(tempCelsius):
res = ((tempCelsius*1.8)+32)
return res
def table(vanafTemp, totTemp, gradenPerStap):
print('{:5} {:5}'.format(" F ", " C "))
for tempCelsius in range(vanafTemp, totTemp, gradenPerStap):
tempFahrenheit = convert(tempCelsius)
print('{:5} {:5}'.format(tempFahrenheit, float(tempCelsius)))
print(table(-30, 50, 10))
|
[
"dion.maas@gmail.com"
] |
dion.maas@gmail.com
|
985547429de5e26f0c8cd7b885546ad0e5d927f0
|
f7288d0406579863364129b588cd24a6ae7b327c
|
/manage.py
|
0440701e39fd72af63b495efe776416cd65287d7
|
[] |
no_license
|
tcosta84/sample_djangoapp
|
6a3d98903730f2f2c4aafb887dfa026d2e95ed3a
|
e57e9969e8f74cd3e256825ed0cedc9c6c83bb1f
|
refs/heads/master
| 2020-04-21T14:34:28.999094
| 2014-11-23T16:04:31
| 2014-11-23T16:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sample_djangoapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"thiagodacosta@gmail.com"
] |
thiagodacosta@gmail.com
|
955892170707a2c8a470295357132c1efdd22686
|
3d4a3bebf614086cce8a22510d8c27c0bea52f92
|
/untitled0.py
|
5bd8d5dbe7f74619061831dab5515e2402fe2e77
|
[] |
no_license
|
caelus95/MantaOcean
|
dc031518051daac9b718b4c7664a057a956475f8
|
dbc5774f6ecd949a8d8f58c66d0101f816b90dc9
|
refs/heads/master
| 2023-06-18T22:00:26.353952
| 2021-06-29T13:25:48
| 2021-06-29T13:25:48
| 365,965,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,731
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 13:57:54 2021
@author: caelus
"""
PKG_path = '/home/caelus/dock_1/Working_hub/LGnDC_dep/python_cent/MantaPKG/'
import sys
sys.path.append(PKG_path)
from Manta_Signals.procc_index import sig_pro, linearRegress4Cube
from Manta_Signals.utility import nc2npy
import os
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import Dataset
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
r_path4 = '/home/caelus/dock_1/Working_hub/DATA_dep/Kuroshio/ALL/analysis_sigs/'
Sig_set,Corr_map,Annual_mean = sig_pro(r_path4,['1993-01-01',324,300])
Sig_set['dates'] = pd.to_datetime(Sig_set.index).strftime('%Y-%m')
minlon,maxlon = 112,180
minlat,maxlat = 5,30
# data_a_6M = data_a_6M.mean(dim='latitude')
def MantaCurl2D(u,v,dx=28400.0,dy=28400.0 ):
import numpy as np
'''
dx = 28400.0 # meters calculated from the 0.25 degree spatial gridding
dy = 28400.0 # meters calculated from the 0.25 degree spatial gridding
'''
u_T = u.transpose([1,0])
v_T = v.transpose([1,0])
du_dx, du_dy = np.gradient(u_T, dx,dy)
dv_dx, dv_dy = np.gradient(v_T, dx,dy)
curl = dv_dx - du_dy
return curl.transpose([1,0])
KVTe = Sig_set.KVTe_index_2Y_Rm
ADT_t = xr.open_dataset('/home/caelus/dock_1/Working_hub/DATA_dep/CDS/T_CDS_monthly_199301_201912.nc',decode_times=True)
ADT_t = ADT_t.drop(['crs','lat_bnds','lon_bnds','err','sla','ugosa','vgosa'])
ADT_t = ADT_t.loc[dict(latitude=slice(minlat,maxlat),longitude=slice(minlon,maxlon),nv= 1 )]
# Calculating Vorticity (Curl)
tmp_ugos = ADT_t.ugos.values
tmp_vgos = ADT_t.vgos.values
t,at,on = tmp_ugos.shape
Curl = np.zeros_like(tmp_ugos)
for i in range(t):
Curl[i,:,:] = MantaCurl2D(tmp_ugos[i,:,:],tmp_vgos[i,:,:])
CURL = xr.Dataset(
{
'curl': (["time","latitude", "longitude"], Curl)#,
# "mask": (["y","x"],mask)
},
coords={
"longitude": (["longitude"], ADT_t.longitude),
"latitude": (["latitude"], ADT_t.latitude),
"time": (['time'], ADT_t.time),
# "reference_time": pd.Timestamp("2014-09-05"),
},)
# Calculating EKE
ADT_t['EKE'] = (ADT_t.ugos*2 + ADT_t.vgos*2)/2
# Merge data
ADT_t = xr.merge([ADT_t,CURL])
# Data 2 anomaly
ADT_t = ADT_t - ADT_t.mean(dim='time')
# ------Masking (Extension) --------
import copy
mask_mlon,mask_Mlon = 130,190
mask_mlat,mask_Mlat = 30, 40
tmp_data = copy.deepcopy(ADT_t)
tmp_data = tmp_data.where( (tmp_data.longitude<mask_mlon) | (tmp_data.longitude>mask_Mlon) |\
(tmp_data.latitude<mask_mlat) | (tmp_data.latitude>mask_Mlat),drop=False)
tmp_data.drop(['EKE','adt','Curl'])
ADT_.drop(['ugos','vgos'])
ADT_t = xr.merge([tmp_data,ADT_t])
# --------------
Time=[['1994-12','1999-01'],['2003-01','2005-03'],['2006-12','2009-07']]
# Time_p1 = ['1994-12','1999-01']
# Time_p2 = ['2003-01','2005-03']
# Time_p3 = ['2006-12','2009-07']
for t in Time:
PN_data = ADT_t.loc[dict(time=slice(t[0],t[1]))]
PN_data = PN_data.mean(dim='time')
# P1_data = ADT_t.loc[dict(time=slice(Time_p1[0],Time_p1[1]))]
# P2_data = ADT_t.loc[dict(time=slice(Time_p2[0],Time_p2[1]))]
# P3_data = ADT_t.loc[dict(time=slice(Time_p3[0],Time_p3[1]))]
# P1 = P1_data.mean(dim='time')
# P2 = P2_data.mean(dim='time')
# P3 = P3_data.mean(dim='time')
# =============================================================================
# =============================================================================
# =============================================================================
# # #
# =============================================================================
# =============================================================================
# =============================================================================
# -------------
def r_vector4cube(x,y,data1,data2,factor):
xx = x.values.shape[0]
yy = y.values.shape[0]
a = np.arange(0,xx,factor[0])
b = np.arange(0,yy,factor[1])
r_x, r_y = x[a], y[b]
r_data1 = data1.where( (data1.longitude==r_x) & (data1.latitude==r_y), drop=True )
r_data2 = data2.where( (data2.longitude==r_x) & (data2.latitude==r_y), drop=True )
return r_x, r_y, r_data1, r_data2
figdata11 = P2.adt.values
lon11 = P1.adt.longitude
lat11 = P1.adt.latitude
r_x12, r_y12, r_data121,r_data122 = r_vector4cube(P1.ugos.longitude,P1.ugos.latitude,
P2.ugos,P2.vgos,[4,4])
figdata121 = r_data121
figdata122 = r_data122
lon12 = r_x12
lat12 = r_y12
figdata13 = P2.EKE.values
# lon13 = P1.adt.longitude
# lat13 = P1.adt.latitude
lon_m11, lat_m11 = np.meshgrid(lon11,lat11)
lon_m12, lat_m12 = np.meshgrid(lon12,lat12)
#-------------
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams['axes.linewidth'] = 3
# plt.rcParams['axes.grid'] = False
plt.rcParams['xtick.labeltop'] = False
plt.rcParams['xtick.labelbottom'] = True
plt.rcParams['ytick.labelright'] = False
plt.rcParams['ytick.labelleft'] = True
# w_path_sig = '/home/caelus/dock_1/Working_hub/DATA_dep/Kuroshio/ALL/task_adt/1/'
fig, ax = plt.subplots(figsize=(18,5),linewidth=1)
ax = plt.gca()
m = Basemap(projection='cyl',llcrnrlat=minlat,urcrnrlat=maxlat,\
llcrnrlon=minlon,urcrnrlon=maxlon,resolution='i')
# lon_m, lat_m = np.meshgrid(lon_00,lat_00)
# x, y = m(lon, lat)
m.fillcontinents(color='black',lake_color='black')
m.drawcoastlines()
m.drawparallels(np.arange(-80.,81.,5.),labels=[True,False,False,False],
dashes=[2,2],fontsize=22,fontweight='bold',color='k')
m.drawmeridians(np.arange(-180.,181.,10.),labels=[False,False,False,True],
dashes=[2,2],fontsize=22,fontweight='bold',color='k')
plt.title('b) ADTa UVa Mean [2003-01, 2005-03] ', fontproperties='',loc='left',pad=15,fontsize=28,fontweight='regular')
#plt.suptitle(' UV (mean flow) & speed (anomaly) ',fontstyle='italic',position=(0.5, .92),fontsize=20)
# cs1 = m.contour(lon_m11,lat_m11,np.flipud(figdata111[n,:,:]),colors='grey',linewidths=2.5,levels=10)
# plt.clim(-3.3,3.3)
# plt.clabel(cs1,fontsize=10,fmt='%1.1f',colors='k')
cs2 = m.pcolormesh(lon_m11,lat_m11,figdata11*10,cmap=plt.cm.get_cmap('seismic'),shading='gouraud')
plt.clim(-1.5,1.5) # plt.clim(-max_figdata02,max_figdata02)
q = m.quiver(lon_m12,lat_m12,figdata121,figdata122,
scale=2.5,headwidth=7.5,headaxislength=10,headlength=13,color='k',
minlength=1,edgecolor='y',minshaft=1.3,alpha=.7)
# plt.axis('equal')
# Unit vector
p = plt.quiverkey(q,115.,29,.1,"0.1 m/s",coordinates='data',color='r',
labelpos='S',alpha=1,labelcolor='w',fontproperties={'size':16},
labelsep=0.13)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2.5%", pad=0.1)
cax.tick_params(labelsize=15)
cax.set_ylabel('',{'fontsize':20,'fontweight':'bold','style':'italic'})
#label
# h = plt.colorbar(label='',cax=cax);
h = plt.colorbar(label='$10^{-1} [m]$',cax=cax);
# plt.savefig(w_path01+'Climatology_WSC_Press',bbox_inches='tight')
plt.tight_layout()
# plt.savefig(w_path_sig+'ADTa_GeoUVa_'+Sig_set.dates[n])
plt.show()
# n+=1
figdata11 = P2.adt.values
# r_x12, r_y12, r_data121,r_data122 = r_vector4cube(P1.ugos.longitude,P1.ugos.latitude,P3.ugos,P3.vgos,[3,3])
# figdata121 = r_data121
# figdata122 = r_data122
lon12 = r_x12
lat12 = r_y12
figdata13 = P2.EKE.values
# ----------EKE -------------
fig, ax = plt.subplots(figsize=(18,5),linewidth=1)
ax = plt.gca()
m = Basemap(projection='cyl',llcrnrlat=minlat,urcrnrlat=maxlat,\
llcrnrlon=minlon,urcrnrlon=maxlon,resolution='i')
# lon_m, lat_m = np.meshgrid(lon_00,lat_00)
# x, y = m(lon, lat)
m.fillcontinents(color='black',lake_color='black')
m.drawcoastlines()
m.drawparallels(np.arange(-80.,81.,5.),labels=[True,False,False,False],
dashes=[2,2],fontsize=22,fontweight='bold',color='k')
m.drawmeridians(np.arange(-180.,181.,10.),labels=[False,False,False,True],
dashes=[2,2],fontsize=22,fontweight='bold',color='k')
plt.title('[Positive] EKEa Mean [2003-01, 2005-03] ', fontproperties='',loc='left',pad=15,fontsize=28,fontweight='regular')
#plt.suptitle(' UV (mean flow) & speed (anomaly) ',fontstyle='italic',position=(0.5, .92),fontsize=20)
# cs1 = m.contour(lon_m11,lat_m11,figdata11*100,colors='k',linewidths=2.5,levels=10,alpha=.45)
# plt.clim(-300.3,300.3)
# plt.clabel(cs1,fontsize=10,fmt='%1.1f',colors='k')
cs2 = m.pcolormesh(lon_m11,lat_m11,figdata13*10,cmap=plt.cm.get_cmap('seismic'),shading='gouraud')
plt.clim(-1.5,1.5) # plt.clim(-max_figdata02,max_figdata02)
m.plot([120,180,180,120,120],[18,18,28,28,18],color='k',linestyle='--',linewidth=4,alpha=.8)
# q = m.quiver(lon_m12,lat_m12,figdata121,figdata122,
# scale=2.5,headwidth=7.5,headaxislength=10,headlength=13,color='k',
# minlength=1,edgecolor='y',minshaft=1.3,alpha=.7)
# # plt.axis('equal')
# # Unit vector
# p = plt.quiverkey(q,115.,29,.1,"0.1 m/s",coordinates='data',color='r',
# labelpos='S',alpha=1,labelcolor='w',fontproperties={'size':16},
# labelsep=0.13)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2.5%", pad=0.1)
cax.tick_params(labelsize=15)
cax.set_ylabel('',{'fontsize':20,'fontweight':'bold','style':'italic'})
#label
# h = plt.colorbar(label='',cax=cax);
h = plt.colorbar(label='$\mathit{10^{-1}[(m/s)^{2}]}$',cax=cax);
# plt.savefig(w_path01+'Climatology_WSC_Press',bbox_inches='tight')
plt.tight_layout()
# plt.savefig(w_path_sig+'ADTa_GeoUVa_'+Sig_set.dates[n])
plt.show()
# n+=1
|
[
"caelus9536@gmail.com"
] |
caelus9536@gmail.com
|
93a9b3ba43545d0eb32bdf3285680ef5b4023855
|
22b70235f43be24186fcac6b7563d41493563ee0
|
/tests/test_examples.py
|
f1c093d6522ae70eadba50aec4503a48940d2e26
|
[] |
no_license
|
zeta1999/jaxlie
|
d469da8c5f2c1f7a9644609a377b10f00ceaa9ce
|
ea6aa6fe914fc6709bc11852f736a99e5a498055
|
refs/heads/master
| 2023-02-13T04:16:15.800416
| 2021-01-16T08:59:06
| 2021-01-16T08:59:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
"""Tests with explicit examples.
"""
import numpy as onp
from hypothesis import given, settings
from hypothesis import strategies as st
from utils import assert_arrays_close, assert_transforms_close, sample_transform
import jaxlie
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_so2_from_to_radians_bijective(_random_module):
"""Check that we can convert from and to radians."""
radians = onp.random.uniform(low=-onp.pi, high=onp.pi)
assert_arrays_close(jaxlie.SO2.from_radians(radians).to_radians(), radians)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se2_translation(_random_module):
"""Simple test for SE(2) translation terms."""
translation = onp.random.randn(2)
T = jaxlie.SE2.from_xy_theta(*translation, theta=0.0)
assert_arrays_close(T @ translation, translation * 2)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se3_translation(_random_module):
"""Simple test for SE(3) translation terms."""
translation = onp.random.randn(3)
T = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.identity(),
translation=translation,
)
assert_arrays_close(T @ translation, translation * 2)
def test_se2_rotation():
"""Simple test for SE(2) rotation terms."""
T_w_b = jaxlie.SE2.from_rotation_and_translation(
rotation=jaxlie.SO2.from_radians(onp.pi / 2.0),
translation=onp.zeros(2),
)
p_b = onp.array([1.0, 0.0])
p_w = onp.array([0.0, 1.0])
assert_arrays_close(T_w_b @ p_b, p_w)
def test_se3_rotation():
"""Simple test for SE(3) rotation terms."""
T_w_b = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.from_rpy_radians(onp.pi / 2.0, 0.0, 0.0),
translation=onp.zeros(3),
)
p_b = onp.array([0.0, 1.0, 0.0])
p_w = onp.array([0.0, 0.0, 1.0])
assert_arrays_close(T_w_b @ p_b, p_w)
def test_so3_xyzw_basic():
"""Check that we can create an SO3 object from an xyzw quaternion."""
assert_transforms_close(
jaxlie.SO3.from_quaternion_xyzw(onp.array([0, 0, 0, 1])),
jaxlie.SO3.identity(),
)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_so3_xyzw_bijective(_random_module):
"""Check that we can convert between xyzw and wxyz quaternions."""
T = sample_transform(jaxlie.SO3)
assert_transforms_close(T, jaxlie.SO3.from_quaternion_xyzw(T.as_quaternion_xyzw()))
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se3_compose(_random_module):
"""Compare SE3 composition in matrix form vs compact form."""
T1 = sample_transform(jaxlie.SE3)
T2 = sample_transform(jaxlie.SE3)
assert_arrays_close(T1.as_matrix() @ T2.as_matrix(), (T1 @ T2).as_matrix())
assert_transforms_close(
jaxlie.SE3.from_matrix(T1.as_matrix() @ T2.as_matrix()), T1 @ T2
)
|
[
"yibrenth@gmail.com"
] |
yibrenth@gmail.com
|
752cda865c0dbb20a9dfd40052cb59e8af24741b
|
d1aefe862050b56314d766939dea97133b84132f
|
/modules/core/private_channel/private_channel_controller.py
|
8b07c623715b47ac7a39bb0d9daaa1aefb34fc21
|
[] |
no_license
|
Nepherius/Mangopie
|
3ec348ba030385984e2c713481dcc6aab5cd926a
|
f5293b902bc7217ae053f94c5b5fa85e9cd3cc4d
|
refs/heads/master
| 2020-03-11T21:01:04.803139
| 2018-05-21T10:08:02
| 2018-05-21T10:08:02
| 130,253,173
| 0
| 0
| null | 2018-04-27T19:14:44
| 2018-04-19T17:58:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
from core.decorators import instance, command, event
from tools.command_param_types import Any
from core.private_channel_manager import PrivateChannelManager
@instance()
class PrivateChannelController:
def __init__(self):
pass
def inject(self, registry):
self.bot = registry.get_instance("mangopie")
self.private_channel_manager = registry.get_instance("private_channel_manager")
self.character_manager = registry.get_instance("character_manager")
@command(command="join", params=[], access_level="member",
description="Join the private channel")
def join_cmd(self, channel, sender, reply, args):
self.private_channel_manager.invite(sender.char_id)
@command(command="leave", params=[], access_level="all",
description="Leave the private channel")
def leave_cmd(self, channel, sender, reply, args):
self.private_channel_manager.kick(sender.char_id)
@command(command="invite", params=[Any("character")], access_level="member",
description="Invite a character to the private channel")
def invite_cmd(self, channel, sender, reply, args):
char = args[0].capitalize()
char_id = self.character_manager.resolve_char_to_id(char)
if sender.char_id == char_id:
self.private_channel_manager.invite(sender.char_id)
elif char_id:
self.bot.send_private_message(char_id,
"You have been invited to the private channel by <highlight>%s<end>." % sender.name)
self.private_channel_manager.invite(char_id)
reply("You have invited <highlight>%s<end> to the private channel." % char)
else:
reply("Could not find character <highlight>%s<end>." % char)
@event(PrivateChannelManager.JOINED_PRIVATE_CHANNEL_EVENT, "Notify private channel when someone joins")
def private_channel_joined_event(self, event_type, event_data):
char_name = self.character_manager.get_char_name(event_data.char_id)
self.bot.send_private_channel_message("<highlight>%s<end> has joined the private channel." % char_name)
@event(PrivateChannelManager.LEFT_PRIVATE_CHANNEL_EVENT, "Notify private channel when someone leaves")
def private_channel_left_event(self, event_type, event_data):
char_name = self.character_manager.get_char_name(event_data.char_id)
self.bot.send_private_channel_message("<highlight>%s<end> has left the private channel." % char_name)
|
[
"nepherius@live.com"
] |
nepherius@live.com
|
cca4433d0c9ae3754b43c7408071a1e60e271914
|
6bed3db6f1682134631e106c1653397ca7478d2d
|
/build/ros_controllers/joint_trajectory_controller/catkin_generated/pkg.installspace.context.pc.py
|
86857d5f44df2c4221f03e8e2152d71d47d432c5
|
[] |
no_license
|
158774581/carbot_ws
|
f09fbd405850c20169b6593ab19920446d049716
|
8a4681c4c4a66e0943ec4bbd5cd8cdffe8381fa0
|
refs/heads/master
| 2022-01-11T13:50:25.024901
| 2019-07-21T14:03:04
| 2019-07-21T14:03:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/imhs/carbot_ws/install/include".split(';') if "/home/imhs/carbot_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib;angles;roscpp;urdf;control_toolbox;controller_interface;hardware_interface;realtime_tools;control_msgs;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ljoint_trajectory_controller".split(';') if "-ljoint_trajectory_controller" != "" else []
PROJECT_NAME = "joint_trajectory_controller"
PROJECT_SPACE_DIR = "/home/imhs/carbot_ws/install"
PROJECT_VERSION = "0.15.0"
|
[
"rb4609@rit.edu"
] |
rb4609@rit.edu
|
7b0d2ebbf06bacd3bc9759378933e33c4c134c72
|
1b9eb5ba155285500877fd73faab0ae901f0e874
|
/zotnote/__init__.py
|
96618daced5b50add19ef98fd41a116f81ab415c
|
[] |
no_license
|
sdaza/zotnote
|
a097c39e655c23c76363b662d2d5b9447efae5cc
|
01428e96fe962d792fcbdda14f419bba3b2b95ad
|
refs/heads/main
| 2023-06-17T08:11:22.049813
| 2021-07-06T08:10:39
| 2021-07-06T08:10:39
| 372,754,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from .functions import extractNotes
from .functions import exportNotes
from pyzotero import zotero
|
[
"sebastian.daza@gmail.com"
] |
sebastian.daza@gmail.com
|
08e691d2006ed65d94012585946e12fffff4ee0b
|
601ac0c9f7138b3e506c0511d4a3e7f60a499305
|
/src/pykeen/nn/init.py
|
9b1110588b84ebf7380f76235524e0e61daefbff
|
[
"MIT"
] |
permissive
|
cdpierse/pykeen
|
9aa551adc05c9e609353d473db1d3da1b92f4ab0
|
e8225c066b56bcdd3180ba895ce3e153808e7e38
|
refs/heads/master
| 2023-09-02T06:30:25.849873
| 2021-11-09T17:32:15
| 2021-11-09T17:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,348
|
py
|
# -*- coding: utf-8 -*-
"""Embedding weight initialization routines."""
import math
import numpy as np
import torch
import torch.nn
import torch.nn.init
from torch.nn import functional
from ..utils import compose
__all__ = [
"xavier_uniform_",
"xavier_uniform_norm_",
"xavier_normal_",
"xavier_normal_norm_",
"uniform_norm_",
"normal_norm_",
"init_phases",
]
def xavier_uniform_(tensor, gain: float = 1.0):
r"""Initialize weights of the tensor similarly to Glorot/Xavier initialization.
Proceed as if it was a linear layer with fan_in of zero and Xavier uniform
initialization is used, i.e. fill the weight of input `embedding` with values values
sampled from :math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{embedding_dim}}}
:param tensor: A tensor
:param gain: An optional scaling factor, defaults to 1.0.
:return: Embedding with weights by the Xavier uniform initializer.
"""
bound = gain * 6 / math.sqrt(tensor.shape[-1])
torch.nn.init.uniform_(tensor, -bound, bound)
return tensor
def xavier_normal_(tensor: torch.Tensor, gain: float = 1.0) -> torch.Tensor:
r"""Initialize weights of the tensor similarly to Glorot/Xavier initialization.
Proceed as if it was a linear layer with fan_in of zero and Xavier normal
initialization is used. Fill the weight of input `embedding` with values values
sampled from :math:`\mathcal{N}(0, a^2)` where
.. math::
a = \text{gain} \times \sqrt{\frac{2}{\text{embedding_dim}}}
:param tensor: A tensor
:param gain: An optional scaling factor, defaults to 1.0.
:return: Embedding with weights by the Xavier normal initializer.
"""
std = gain * 2 / math.sqrt(tensor.shape[-1])
torch.nn.init.normal_(tensor, mean=0.0, std=std)
return tensor
def init_phases(x: torch.Tensor) -> torch.Tensor:
r"""Generate random phases between 0 and :math:`2\pi`."""
phases = 2 * np.pi * torch.rand_like(x[..., : x.shape[-1] // 2])
return torch.cat([torch.cos(phases), torch.sin(phases)], dim=-1).detach()
xavier_uniform_norm_ = compose(
torch.nn.init.xavier_uniform_,
functional.normalize,
)
xavier_normal_norm_ = compose(
torch.nn.init.xavier_normal_,
functional.normalize,
)
uniform_norm_ = compose(
torch.nn.init.uniform_,
functional.normalize,
)
normal_norm_ = compose(
torch.nn.init.normal_,
functional.normalize,
)
def init_quaternions(
x: torch.FloatTensor,
) -> torch.FloatTensor:
"""Initialize quaternion."""
num_elements, dim = x.shape
if dim % 4 != 0:
raise ValueError("Quaternions have four components, but dimension {dim} is not divisible by four.")
dim //= 4
# scaling factor
s = 1.0 / math.sqrt(2 * num_elements)
# modulus ~ Uniform[-s, s]
modulus = 2 * s * torch.rand(num_elements, dim) - s
# phase ~ Uniform[0, 2*pi]
phase = 2 * math.pi * torch.rand(num_elements, dim)
# real part
real = (modulus * phase.cos()).unsqueeze(dim=-1)
# purely imaginary quaternions unitary
imag = torch.rand(num_elements, dim, 3)
imag = functional.normalize(imag, p=2, dim=-1)
imag = imag * (modulus * phase.sin()).unsqueeze(dim=-1)
x = torch.cat([real, imag], dim=-1)
return x.view(num_elements, 4 * dim)
|
[
"noreply@github.com"
] |
cdpierse.noreply@github.com
|
18138586a59aaaf1e29e0069c865e664e73e1bfa
|
0f2cd75b6712ad4b4c5508fd30467f9a812357a5
|
/tripadvisorScrap.py
|
b635cdfca89ab7fd1ba8dd9af9668df852b30646
|
[] |
no_license
|
kartikb7/We-Do-Ticket
|
ac3c4e8dd5b13308714f7c16132eaa888a074029
|
549d121789027f9fc9d554dfada77881daaa375f
|
refs/heads/master
| 2022-03-28T22:17:02.793893
| 2020-01-05T21:02:06
| 2020-01-05T21:02:06
| 213,017,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,548
|
py
|
#reference code:https://www.scrapehero.com/how-to-scrape-tripadvisor/
from datetime import datetime
from time import time
from lxml import html
import requests,re
import unicodecsv as csv
from operator import itemgetter
import pandas as pd
from prettytable import PrettyTable
def hotel_scrap(MatchCity):
locality=MatchCity
userInputCheckin=input("Please enter checkin data as YYYY/MM/DD:")
userInputCheckout=input("Please enter checkout data as YYYY/MM/DD:")
checkin_date = datetime.strptime(userInputCheckin,"%Y/%m/%d")
checkout_date = datetime.strptime(userInputCheckout,"%Y/%m/%d")
checkIn = checkin_date.strftime("%Y/%m/%d")
checkOut = checkout_date.strftime("%Y/%m/%d")
sort="popularity"
checkIn = checkin_date.strftime("%Y/%m/%d")
checkOut = checkout_date.strftime("%Y/%m/%d")
print ("Scraper Inititated for Locality:%s"%locality)
# TA rendering the autocomplete list using this API
print ("Finding search result page URL")
geo_url = 'https://www.tripadvisor.com/TypeAheadJson?action=API&startTime='+str(int(time()))+'&uiOrigin=GEOSCOPE&source=GEOSCOPE&interleaved=true&types=geo,theme_park&neighborhood_geos=true&link_type=hotel&details=true&max=12&injectNeighborhoods=true&query='+locality
api_response = requests.get(geo_url, verify=False).json()
#getting the TA url for th equery from the autocomplete response
url_from_autocomplete = "http://www.tripadvisor.com"+api_response['results'][0]['url']
print ('URL found %s'%url_from_autocomplete)
geo = api_response['results'][0]['value']
#Formating date for writing to file
date = checkin_date.strftime("%Y_%m_%d")+"_"+checkout_date.strftime("%Y_%m_%d")
#form data to get the hotels list from TA for the selected date
form_data = {'changeSet': 'TRAVEL_INFO',
'showSnippets': 'false',
'staydates':date,
'uguests': '2',
'sortOrder':sort
}
#Referrer is necessary to get the correct response from TA if not provided they will redirect to home page
headers = {
'Accept': 'text/javascript, text/html, application/xml, text/xml, */*',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'www.tripadvisor.com',
'Pragma': 'no-cache',
'Referer': url_from_autocomplete,
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0',
'X-Requested-With': 'XMLHttpRequest'
}
cookies= {"SetCurrency":"USD"}
print ("Downloading search results page")
page_response = requests.post(url = url_from_autocomplete,data=form_data,headers = headers, cookies = cookies, verify=False)
print ("Parsing results ")
parser = html.fromstring(page_response.text)
hotel_lists = parser.xpath('//div[contains(@class,"listItem")]//div[contains(@class,"listing collapsed")]')
hotel_data = []
if not hotel_lists:
hotel_lists = parser.xpath('//div[contains(@class,"listItem")]//div[@class="listing "]')
for hotel in hotel_lists:
XPATH_HOTEL_LINK = './/a[contains(@class,"property_title")]/@href'
XPATH_REVIEWS = './/a[@class="review_count"]//text()'
XPATH_RANK = './/div[@class="popRanking"]//text()'
XPATH_RATING = './/a[contains(@class,"rating")]/@alt'
XPATH_HOTEL_NAME = './/a[contains(@class,"property_title")]//text()'
XPATH_HOTEL_FEATURES = './/div[contains(@class,"common_hotel_icons_list")]//li//text()'
XPATH_HOTEL_PRICE = './/div[contains(@data-sizegroup,"mini-meta-price")]/text()'
XPATH_VIEW_DEALS = './/div[contains(@data-ajax-preserve,"viewDeals")]//text()'
XPATH_BOOKING_PROVIDER = './/div[contains(@data-sizegroup,"mini-meta-provider")]//text()'
raw_booking_provider = hotel.xpath(XPATH_BOOKING_PROVIDER)
raw_no_of_deals = hotel.xpath(XPATH_VIEW_DEALS)
raw_hotel_link = hotel.xpath(XPATH_HOTEL_LINK)
raw_no_of_reviews = hotel.xpath(XPATH_REVIEWS)
raw_rank = hotel.xpath(XPATH_RANK)
raw_rating = hotel.xpath(XPATH_RATING)
raw_hotel_name = hotel.xpath(XPATH_HOTEL_NAME)
raw_hotel_features = hotel.xpath(XPATH_HOTEL_FEATURES)
raw_hotel_price_per_night = hotel.xpath(XPATH_HOTEL_PRICE)
url = 'http://www.tripadvisor.com'+raw_hotel_link[0] if raw_hotel_link else None
reviews = ''.join(raw_no_of_reviews).replace("reviews","").replace(",","") if raw_no_of_reviews else 0
rank = ''.join(raw_rank) if raw_rank else None
rating = ''.join(raw_rating).replace('of 5 bubbles','').strip() if raw_rating else None
name = ''.join(raw_hotel_name).replace(',','-').strip() if raw_hotel_name else None
hotel_features = ','.join(raw_hotel_features).replace(',',';')
# price_per_night = ''.join(raw_hotel_price_per_night).encode('utf-8').replace('\n','') if raw_hotel_price_per_night else None
price_per_night = ''.join(raw_hotel_price_per_night).replace('\n','') if raw_hotel_price_per_night else None
no_of_deals = re.findall("all\s+?(\d+)\s+?",''.join(raw_no_of_deals))
booking_provider = ''.join(raw_booking_provider).strip() if raw_booking_provider else None
if no_of_deals:
no_of_deals = no_of_deals[0]
else:
no_of_deals = 0
data = {
'hotel_name':name,
'url':url,
'locality':locality,
'reviews':reviews,
'tripadvisor_rating':rating,
'checkOut':checkOut,
'checkIn':checkIn,
'hotel_features':hotel_features,
'price_per_night':price_per_night,
'no_of_deals':no_of_deals,
'booking_provider':booking_provider
}
hotel_data.append(data)
# data = parse(locality,checkin_date,checkout_date,sort)
with open('tripadvisor_data.csv','wb')as csvfile:
fieldnames = ['hotel_name','url','locality','reviews','tripadvisor_rating','checkIn','checkOut','price_per_night','booking_provider','no_of_deals','hotel_features']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in hotel_data:
writer.writerow(row)
def hotel_visual():
input_file=open("tripadvisor_data.csv")
output_file=open("tripadvisor_data_sorted.csv","w")
table=[]
header=input_file.readline()
for line in input_file:
col=line.split(",")
if col[7]!='':
col[7]=float(col[7][1:])
table.append(col)
table_sorted=sorted(table,key=itemgetter(7),reverse=False)
output_file.write(header)
input_file.close()
output_file.close()
for row in table_sorted:
row=[str(x) for x in row]
row=[','.join(row)]
f2=pd.DataFrame(table_sorted,columns=header.split(","))
# print(f2.iloc[:,[0,3,4,7]])
print('\033[1;31m Top Cheapest Hotels\033[0m')
x = PrettyTable()
x.field_names = ["hotel_name", "reviews", "tripadvisor_rating", "price_per_night"]
hotelCount = len(table_sorted) if len(table_sorted)<10 else 10
for i in range(hotelCount):
list_hotel=[]
list_hotel.append(table_sorted[i][0])
list_hotel.append(table_sorted[i][3])
list_hotel.append(table_sorted[i][4])
list_hotel.append(table_sorted[i][7])
x.add_row(list_hotel)
print(x)
#Ten Most Popular Hotels (most people reviewed)
# input_file=open("tripadvisor_data.csv")
# output_file=open("tripadvisor_data_sorted.csv","w")
#
# table=[]
# header=input_file.readline()
# for line in input_file:
# col=line.split(",")
# col[3]=float(col[3])
# table.append(col)
# table_sorted=sorted(table,key=itemgetter(3),reverse=True)
# output_file.write(header)
# input_file.close()
# output_file.close()
# for row in table_sorted:
# row=[str(x) for x in row]
# row=[','.join(row)]
# f2=pd.DataFrame(table_sorted,columns=header.split(","))
# # print(f2.iloc[:,[0,3,4,7]])
# print('\033[1;31m Ten Most Popular Hotels (most people reviewed)\033[0m')
# x = PrettyTable()
# x.field_names = ["hotel_name", "reviews", "tripadvisor_rating", "price_per_night"]
# hotelCount = len(table_sorted) if len(table_sorted)<10 else 10
# for i in range(hotelCount):
# list_hotel=[]
# list_hotel.append(table_sorted[i][0])
# list_hotel.append(table_sorted[i][3])
# list_hotel.append(table_sorted[i][4])
# list_hotel.append(table_sorted[i][7])
# x.add_row(list_hotel)
#
# print(x)
#Ten Hotels with Most highest Rating
input_file=open("tripadvisor_data.csv")
output_file=open("tripadvisor_data_sorted.csv","w")
table=[]
header=input_file.readline()
for line in input_file:
col=line.split(",")
col[4]=float(col[4])
table.append(col)
table_sorted=sorted(table,key=itemgetter(4),reverse=True)
output_file.write(header)
input_file.close()
output_file.close()
for row in table_sorted:
row=[str(x) for x in row]
row=[','.join(row)]
f2=pd.DataFrame(table_sorted,columns=header.split(","))
# print(f2.iloc[:,[0,3,4,7]])
print('\033[1;31m Top Hotels with Most highest Rating\033[0m')
x = PrettyTable()
x.field_names = ["hotel_name", "reviews", "tripadvisor_rating", "price_per_night"]
hotelCount = len(table_sorted) if len(table_sorted)<10 else 10
for i in range(hotelCount):
list_hotel=[]
list_hotel.append(table_sorted[i][0])
list_hotel.append(table_sorted[i][3])
list_hotel.append(table_sorted[i][4])
list_hotel.append(table_sorted[i][7])
x.add_row(list_hotel)
print(x)
|
[
"noreply@github.com"
] |
kartikb7.noreply@github.com
|
f7a1f6c2de3fe073db61c020696bfe9329f1bb0a
|
c983c4cc1ca2e88885d4c94ae615c6817711f2fe
|
/fsm.py
|
b3e98d687716e84fca51f534f4aaf706f93f9ff7
|
[] |
no_license
|
dinobby/facebook-messenger-chat-bot
|
8e2b884ee4fc49dd0f116f39392d7442da6149a3
|
a65d148c7bf6f297984adac6767ef6939b711bd1
|
refs/heads/master
| 2020-04-12T09:20:53.943305
| 2018-12-19T14:25:19
| 2018-12-19T14:25:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,679
|
py
|
import requests
from bs4 import BeautifulSoup as bs
from transitions.extensions import GraphMachine
from utils import send_text_message
path = ['綠幹線','綠1','綠2','綠3','綠4','綠5','綠6','綠7','綠10','綠11','綠12','綠13','綠14','綠15','綠16','綠17','綠20','綠20-1','綠21','綠22','綠23','綠24','綠25','綠26','綠27',
'藍幹線','藍1','藍2','藍3','藍10','藍11','藍12','藍13','藍20','藍21','藍22','藍23','藍24','藍25',
'棕幹線','棕1','棕2','棕3','棕3-1','棕10','棕11',
'橘幹線','橘1','橘2','橘3','橘4','橘4-1','橘5','橘10','橘10-1','橘11','橘12','橘20',
'黃幹線','黃1','黃2','黃3','黃4','黃5','黃6','黃6-1','黃7','黃9','黃10','黃10-1','黃11','黃11-1','黃11-2','黃12','黃12-2','黃13','黃14','黃14-1','黃15','黃16','黃20',
'紅幹線','紅1','紅2','紅3','紅3-1','紅4','紅10','紅11','紅12','紅13','紅14']
ids = [1100,1101,1102,1103,1104,1105,1106,1107,1110,1111,1112,1113,1114,1115,1116,1117,1120,1804,1121,1122,1123,1124,1125,1126,1127,
1200,1201,1202,1203,1210,1211,1212,1213,1220,1221,1222,1223,1224,1225,
1300, 1301, 1302, 1303, 1810, 1310, 1311,
1400,1401,1402,1403,1404,1808,1405,1410,1807,1411,1412,1420,
1500,1501,1502,1503,1504,1505,1506,1802,1507,1509,1510,1812,1511,1809,1813,1512,1805,1513,1514,1806,1515,1516,1520,
1600,1601,1602,1603,1801,1604,1610,1611,1612,1613,1614]
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model=self,
**machine_configs
)
def is_going_to_state1(self, event):
if event.get("message"):
text = event['message']['text']
return text == '開始使用'
return False
def state1_going_to_state2(self, event):
if event.get("message"):
text = event['message']['text']
return text == '1'
return False
def state1_going_to_state3(self, event):
if event.get("message"):
text = event['message']['text']
return text == '2'
return False
def state1_going_to_state4(self, event):
if event.get("message"):
text = event['message']['text']
return text == '3'
return False
def state2_going_to_state5(self, event):
if event.get("message"):
text = event['message']['text']
return len(text) > 0 and self.state == 'state2'
def state5_going_to_state1(self, event):
if event.get("message"):
text = event['message']['text']
return text == '返回'
def state3_going_to_state6(self, event):
if event.get("message"):
text = event['message']['text']
return len(text) > 0 and self.state == 'state3'
def state6_going_to_state1(self, event):
if event.get("message"):
text = event['message']['text']
return text == '返回'
def on_enter_state1(self, event):
print("I'm entering state1")
sender_id = event['sender']['id']
responese = send_text_message(sender_id, "歡迎使用「沒有機車沒關係我來幫你找公車」!\n輸入「1」查詢公車到站時間\n輸入「2」查詢公車票價\n輸入「3」查詢公車優惠說明")
#self.go_back()
# def on_exit_state1(self):
# print('Leaving state1')
def on_enter_state2(self, event):
print("I'm entering state2")
sender_id = event['sender']['id']
send_text_message(sender_id, "請輸入您想要查詢的路線以及去/返程,例如:綠1 去 / 橘4-1 返 / 紅13 去...等,路線名稱與去返程中間以空格隔開")
#self.go_back()
# def on_exit_state2(self):
# print('Leaving state2')
def on_enter_state3(self, event):
print("I'm entering state3")
sender_id = event['sender']['id']
responese = send_text_message(sender_id, "輸入路線及起訖站來查詢票價,例如:綠1 新化站 國泰大樓,路線與起訖站中間以空格隔開")
#self.go_back()
# def on_exit_state3(self):
# print('Leaving state3')
def on_enter_state4(self, event):
print("I'm entering state4")
s = '⭐市民卡1日9元搭到飽\n\
台南市民持市民卡搭乘市區公車(0~99路,33路除外),\
並且上、下車都刷市民卡,第1段半價(9元),當日第2段起免費!💖\
(下車未刷卡時,當日搭乘下一段次仍維持半價9元計費)\n\
\n\
⭐幹支線公車8公里免費\n\
持電子票證(市民卡、一卡通、悠遊卡、iCash 2.0)\
搭乘幹支線公車(綠、藍、棕、橘、黃、紅),並且上、下車都刷卡,享前8公里免費!💖\
\n\
⭐轉乘優惠加碼為4小時\n\
持電子票證(市民卡、一卡通、悠遊卡、iCash 2.0)\
搭乘台鐵及大台南公車,並且上、下車都刷卡,\
4小時內轉乘市區公車1段票免費、轉乘幹支線公車8公里免費再折扣9元!💖\n\
\n\
⚡特別提醒⚡\n\
學生證就是市民卡:本市高中職以下學生,刷學生證即享優惠。\
設籍臺南市民眾,可攜帶身分證至臺南市37區公所申辦市民卡一般卡,每張新臺幣100元。\
107年9月1日開放iCash 2.0,9月底前每趟加贈OPENPOINT 300點,歡迎多加利用👍\n\
\n\
※實施期間:自107年8月1日起至107年12月31日止。'
sender_id = event['sender']['id']
send_text_message(sender_id, s)
self.go_back()
def on_enter_state5(self, event):
global path, ids
query_path = 0
sender_id = event['sender']['id']
if sender_id != '312249102747382':
if event.get("message"):
if event.get("message") == '返回':
self.go_back()
else:
l = event['message']['text'].split(" ")
if len(l) == 2:
query_path = l[0]
if l[1] == '去':
goback = 0
elif l[1] == '返':
goback = 1
else:
goback = 0
try:
id = ids[path.index(query_path)]
url = 'http://2384.tainan.gov.tw/NewTNBusAPI_V2/API/GoAndBackWithTimeV1.ashx?id={0}&goback={1}&Lang=cht'.format(id,goback)
res = requests.get(url)
d = res.json()
s = ''
for i in d:
if i['Time'] == '末班已駛離':
s+=('離{0}站,末班已駛離').format(i['StopName']) + '\n'
else:
s+=('離{0}站,還有{1}分鐘'.format(i['StopName'],i['Time'])) + '\n'
send_text_message(sender_id, s)
except ValueError:
send_text_message(sender_id, "對不起,您輸入的路線不存在")
def on_enter_state6(self, event):
global path, ids
query_path = 0
query_start = ''
query_end = ''
sender_id = event['sender']['id']
if sender_id != '312249102747382':
if event.get("message"):
if event.get("message") == '返回':
self.go_back()
else:
l = event['message']['text'].split(" ")
if len(l) == 3:
query_path = l[0]
query_start = l[1]
query_end = l[2]
try:
id = ids[path.index(query_path)]
url = 'http://2384.tainan.gov.tw/NewTNBusAPI_V2/API/FareV1.ashx?pathid={0}&sname={1}&ename={2}'.format(id, query_start, query_end)
res = requests.get(url)
d = res.json()
s = '現金全票:' + d['fareCash'] + '元\n' + \
'現金半票:' + d['fareCashHalf'] + '元\n' + \
'電子票卡全票:' + d['fareIc'] + '元\n' + \
'電子票卡半票:' + d['fareIcHalf'] + '元'
send_text_message(sender_id, s)
except ValueError:
send_text_message(sender_id, "對不起,您輸入的路線或起訖站不存在")
# def on_exit_state3(self):
# print('Leaving state4')
|
[
"noreply@github.com"
] |
dinobby.noreply@github.com
|
12d863d79095b97e96ea274f2a32bf1fc79c1279
|
1c0f09e25d3601f4b7c692c494514f19d3fc1bce
|
/Two-stage Recoverable FLP/test/test_scipy.io.py
|
b5349cca145cc595600ef9496a3aacae8c28b27d
|
[] |
no_license
|
dubo0111/Python
|
9be94d7670c9eb841d237e6ff4bc45ed5796c7b1
|
bcf03b360ceb18da75e867d1d63522f1d350e907
|
refs/heads/master
| 2020-03-23T21:36:27.155231
| 2019-09-02T03:32:57
| 2019-09-02T03:32:57
| 142,118,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
# test .mat I/O
import scipy.io
mat = scipy.io.loadmat('city_250.mat')
a=mat['city_250']
#print(a)
b=a[0,:]
#print(b)
scipy.io.savemat('save_b.mat',{'b':b})
bb = scipy.io.loadmat('save_b.mat')
|
[
"dubo0111@hotmail.com"
] |
dubo0111@hotmail.com
|
24df43e3a494a253832fa18042119ad13e94661c
|
0946818d16631ebd2366c9c225ddd0c7301bb85e
|
/05 - 文本分析/0417-textCNN/20190417/news_classfication_textcnn/preprocess_data.py
|
21ef1aa8c1e0d8eb748049c14e0d1d5c6a471f69
|
[] |
no_license
|
frozenYu/AI-course
|
49a24520040f7bf1df650039a88be378e848c9a7
|
d64208acacdbe1c0ddc9b30d5d61ad8b80b3074a
|
refs/heads/master
| 2020-06-24T11:25:30.446181
| 2019-05-09T04:29:34
| 2019-05-12T04:29:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,495
|
py
|
import numpy as np
import jieba
from keras.preprocessing import sequence
from keras import utils
def split_dataset(filename_in):
data_in = open(filename_in,'r',encoding='utf8')
label_sentences_dict = {} # 定义一个标签:样本的字典
for line in data_in:
label,sentence = line.strip().split('\t')
if label not in label_sentences_dict:
lst = []
lst.append(sentence)
label_sentences_dict[label] = lst
else:
lst = label_sentences_dict[label]
lst.append(sentence)
label_sentences_dict[label] = lst
# print(len(label_sentences_dict))
data_in.close()
return label_sentences_dict
def save2file(label_sentences_dict,train_ratio,training_file,testing_file):
data_out_training = open(training_file,'w',encoding='utf8')
data_out_testing = open(testing_file,'w',encoding='utf8')
for label in label_sentences_dict:
sentences = label_sentences_dict[label]
np.random.shuffle(sentences)
for i in range(0,len(sentences)):
if i <int(train_ratio*len(sentences)):
data_out_training.write(label+':'+sentences[i]+'\n')
else:
data_out_testing.write(label+':'+sentences[i]+'\n')
data_out_training.close()
data_out_testing.close()
# 返回字典集合 标签集合 过滤低频词
def create_word2index(train_file,min_num):
data_in = open(train_file,'r',encoding='utf8')
label_set = set()
# word_set = set()
word_dict={}
for line in data_in:
label,sentence = line.strip().split('\t')
label_set.add(label) # 添加label
words = jieba.cut(sentence) # 添加word
for word in words:
word = word.strip()
if word == '' or word == '\t' or word == ',' or word == ':': # 过来特殊符号 可自定义
continue
if word not in word_dict:
word_dict[word] = 1
else:
num = word_dict[word]
num+=1
word_dict[word] = num
# word_set.add(word)
data_in.close()
print('word_dict is: ',word_dict)
# 过滤掉word_dict中低频的词
word_dict_new,word_set = filter_word_dict(word_dict,min_num)
print('word_dict_new is: ',word_dict_new)
word2index = {}
index = 0
for word in word_set:
word2index[word] = index
index+=1
label2index = {}
index = 0
for label in label_set:
label2index[label] = index
index+=1
if '' in word_set:
print('exist')
else:
print('no exist')
return word2index,label2index
def filter_word_dict(word_dict,min_num):
word_dict_new = {}
word_set = set()
for word,num in word_dict.items():
if num>=min_num:
word_dict_new[word] = num
word_set.add(word)
return word_dict_new,word_set
def create_trainingdata(training_file,word2index,label2index,padding_length = 300):
data_in = open(training_file,'r',encoding='utf8')
training_data = []
for line in data_in:
label,sentence = line.strip().split('\t')
words = jieba.cut(sentence)
feas = [word2index[word]for word in words if word in word2index]
label = [label2index[label]]
training_data.append((feas,label)) # 将特征和标签放一起读取 然后随机打乱
data_in.close()
np.random.shuffle(training_data)
training_feas = []
training_labels = []
for feas, label in training_data:
training_feas.append(feas)
training_labels.append(label)
# print('training_feas is: ',training_feas)
# print('training_labels is: ',training_labels)
training_feas = sequence.pad_sequences(training_feas,maxlen=padding_length,padding='post', truncating='post')
training_labels = utils.to_categorical(training_labels,num_classes=10)
# print('after, training_feas is: ',training_feas)
# print('after, training_labels is: ',training_labels)
training_feas = np.array(training_feas)
training_labels = np.array(training_labels)
return training_feas,training_labels
if __name__ == '__main__':
# label =
label_sentences_dict = split_dataset('cnews_test.txt')
train_ratio = 0.1
save2file(label_sentences_dict,train_ratio,'cnews_test1.txt','cnews_test2.txt')
# for label in label_sentences_dict:
# print(label)
# print(len(label_sentences_dict[label]))
|
[
"lizhicq@gmail.com"
] |
lizhicq@gmail.com
|
ca73adb0ece5f7a1cbeb842f3c0078a486c78355
|
38a03a8c4426eb31ec566cd62cab05b47ddc123a
|
/qcwy/qcwy/settings.py
|
44a6cdd7d83d845b510d55f6a6a0fe86ec7c2c9e
|
[] |
no_license
|
leehwayou/Scrapy_51job
|
85a3efaa9790bf6f830665887523a669005e16dc
|
8bb3f05371261fd5f71d92dcdbd2db9f486615ac
|
refs/heads/master
| 2021-03-30T01:58:22.748712
| 2020-03-17T15:24:05
| 2020-03-17T15:24:05
| 248,004,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for qcwy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'qcwy'
SPIDER_MODULES = ['qcwy.spiders']
NEWSPIDER_MODULE = 'qcwy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'qcwy (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'qcwy.middlewares.QcwySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'qcwy.middlewares.QcwyDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'qcwy.pipelines.QcwyPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
DOWNLOAD_DELAY = 0.3
COOKIES_ENABLED = False
ITEM_PIPELINES = {
'qcwy.pipelines.QcwyPipeline': 300,
}
|
[
"317551047@qq.com"
] |
317551047@qq.com
|
de7786b217ba7f38702ef86869144ea48d5a1da3
|
32288fb5bfcef7ee45d45dd8f97f7229a631512b
|
/python_code/inc.py
|
0444375b0556b44b5114663693ac69d943af275a
|
[] |
no_license
|
GalotonReone/PPPPPPP
|
f4dfd849251c9e1c182a8ae0472050d5b37ac14f
|
bc6bca69741504b79da03372799bd086dc8e023e
|
refs/heads/master
| 2023-08-10T17:29:11.306148
| 2021-10-02T13:59:17
| 2021-10-02T13:59:17
| 412,811,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,685
|
py
|
import csv
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
import os.path
with open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/extra.csv','r') as ro1:
read1=csv.reader(ro1)
new_rows=list(read1)
if os.path.isfile('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/new.csv'):
print ("File exist")
with open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/new.csv', 'a') as ao:
append = csv.writer(ao)
append.writerows(new_rows)
print("done if")
else:
print ("File not exist")
with open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/hist.csv','r') as ro:
read=csv.reader(ro)
rows=list(read)
with open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/new.csv','w',newline='') as wo:
write=csv.writer(wo)
write.writerows(rows)
with open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/new.csv', 'a') as ao:
append = csv.writer(ao)
append.writerows(new_rows)
print("done else")
data2 = pd.read_csv(open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/hist.csv'),sep=',')
#print(data2)
data2.loc[data2["recruited"]=='Yes',"recruited"]=1
data2.loc[data2["recruited"]=='No',"recruited"]=0
data2.loc[data2['verbal']=='Excellent','verbal']=1
data2.loc[data2['verbal']=='Good','verbal']=2
data2.loc[data2['verbal']=='Average','verbal']=0
data2.loc[data2['verbal']=='Poor','verbal']=3
data2.loc[data2['comm']=='Excellent','comm']=1
data2.loc[data2['comm']=='Good','comm']=2
data2.loc[data2['comm']=='Average','comm']=0
data2.loc[data2['comm']=='Poor','comm']=3
data2.loc[data2['logical']=='Excellent','logical']=1
data2.loc[data2['logical']=='Good','logical']=2
data2.loc[data2['logical']=='Average','logical']=0
data2.loc[data2['logical']=='Poor','logical']=3
data2.loc[data2['quantitative']=='Excellent','quantitative']=1
data2.loc[data2['quantitative']=='Good','quantitative']=2
data2.loc[data2['quantitative']=='Average','quantitative']=0
data2.loc[data2['quantitative']=='Poor','quantitative']=3
data2.loc[data2['extracurr']=='others','extracurr']=0
data2.loc[data2['extracurr']=='cultural','extracurr']=1
data2.loc[data2['extracurr']=='sports','extracurr']=2
data2.loc[data2["drops"]=='Yes',"drops"]=1
data2.loc[data2["drops"]=='No',"drops"]=0
#print(data2.head(5))
data2_x=data2[['verbal','quantitative','logical','comm','extracurr','drops']]
data2_y=data2['recruited']
#print((data2_x).toarray())
#print((data2_y).toarray())
features_train, features_test, target_train, target_test = train_test_split(data2_x,data2_y, test_size=0.33, random_state=10)
data1 = pd.read_csv(open('C:/Users/Chinmay Chaughule/PycharmProjects/proj1/extra.csv'),sep=',')
cv = CountVectorizer(lowercase=False)
X = cv.fit_transform(data1.verbal+data1.quantitative+data1.logical+data1.comm+data1.extracurr+data1.drops).toarray()
y=cv.fit_transform(data1.recruited).toarray()
print(len(X))
print(len(y))
''''cv1=CountVectorizer(lowercase=False)
#f_t=[str (item) for item in features_train]
#f_t=[item for item in f_t if not isinstance(item,int)]
X=cv1.fit_transform(features_train).toarray()
print(type(features_train))
print(type(target_train.to_frame()))
t_t=[str (item1) for item1 in target_train]
t_t=[item1 for item1 in t_t if not isinstance(item1,int)]
y=cv1.fit_transform(target_train.to_frame()).toarray()
'''''
print(X)
print(y)
gb = GaussianNB()
gb.partial_fit(X,y)
print("model created")
|
[
"noreply@github.com"
] |
GalotonReone.noreply@github.com
|
a6eb0098a1c416649e25f8560bcf7e9e3d2097e5
|
c61b43cd266ecc5a3ff5244c079e4324383340f8
|
/example/proxy/app.py
|
9eb7223102e359482214c5c84a83f148cd18095b
|
[
"MIT"
] |
permissive
|
fusion44/fastapi-versioning
|
9cc6dc45f13defa105a7cf58f5aac9ecbdcc9925
|
c1832936726d9ffce364ac63a9aac2ff3712b37d
|
refs/heads/master
| 2023-08-03T12:00:21.601970
| 2021-09-26T09:00:37
| 2021-09-26T09:00:37
| 410,499,977
| 0
| 0
|
MIT
| 2021-09-26T08:57:34
| 2021-09-26T08:53:13
| null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
from fastapi import FastAPI
from fastapi_versioning import VersionedFastAPI, version
app = FastAPI(title="My App")
@app.get("/greet")
@version(1, 0)
def greet_with_hello() -> str:
return "Hello"
@app.get("/greet")
@version(1, 1)
def greet_with_hi() -> str:
return "Hi"
app = VersionedFastAPI(app, root_path="/api")
|
[
"deanway93@gmail.com"
] |
deanway93@gmail.com
|
830fbda9a688ec4516ba2b640b81b59383c26b62
|
ddb3db602622334ba1209f0784b91c7630836e97
|
/skribblr_project/skribblr_project/urls.py
|
53613d34741b0a9a72683a2fa32e41bafd1520de
|
[
"MIT"
] |
permissive
|
mhhoban/skribblr
|
701a98ea9b4615f1104cb3a98389e14ffbf810f2
|
834d351ab5adfacb187cbb7d4e1abda30c335e99
|
refs/heads/master
| 2021-01-19T08:45:58.478857
| 2017-04-09T07:19:19
| 2017-04-09T07:19:19
| 87,672,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
"""skribblr_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"mhhoban@gmail.com"
] |
mhhoban@gmail.com
|
9b34ab457f4794bb6e1f54543d0d87cd1e9e509b
|
38b9fa8384d8b852558fd79148e02ffa95447c12
|
/METValidator/python/metvalidator_cfi.py
|
e65c801aae2049967433e8a3689e2084a5acdf83
|
[] |
no_license
|
mageisler/MyAnalyzers
|
5aa24a29945f19220c0ac03937b51164bd5aeaaf
|
dca0cbd2e01e3a31df7572d7ff7e6422083f3545
|
refs/heads/master
| 2016-09-05T18:26:52.723547
| 2014-01-29T08:44:42
| 2014-01-29T10:06:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
import FWCore.ParameterSet.Config as cms
demo = cms.EDAnalyzer('METValidator'
)
|
[
"mgeisler@cern.ch"
] |
mgeisler@cern.ch
|
a3bea9cd17507051e84abafad2fbda57bfda2d5e
|
b34b6910a4d46fd38e25a64e885bd435ef02528d
|
/program3.py
|
a5000c285c9f5a7fbe7028352ef6eced67121c99
|
[] |
no_license
|
Naush22/codekata
|
161c29da539a4d641108cc65086a3be8d7f795f6
|
7767c454353e7a86cf7da75fca6b315e159da032
|
refs/heads/master
| 2020-04-21T16:04:37.463247
| 2019-02-19T08:13:30
| 2019-02-19T08:13:30
| 169,688,880
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
yz=raw_input()
if((yz>='a'and yz<='z')or(yz>='A'and yz<='Z')):
print("Alphabet")
else:
print("NO")
|
[
"noreply@github.com"
] |
Naush22.noreply@github.com
|
ec45bb5ab5f0dc0c1d56e349ed7ea43b25cd7037
|
4066190a88d59b66a1a8712358992df3a3a35a8b
|
/tetris_v3/pygame_enums.py
|
bf2ac9a97891f3c215d60c0626b2854091774c54
|
[] |
no_license
|
ReijoJaakkola/Pygame-Tetris
|
fbbff240e4f968b587e8811d93dc6206679266af
|
a9bf151fbddaff9270c94e96a7ecc3fcaf4e09a6
|
refs/heads/master
| 2021-07-12T15:20:29.445351
| 2021-07-10T10:11:07
| 2021-07-10T10:11:07
| 210,703,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from enum import Enum
# Enum for the possible directions for the current piece.
class Direction(Enum):
RIGHT = 0
LEFT = 1
DOWN = 2
# Enum for the possible colors.
class Color(Enum):
GREEN = 0
RED = 1
BLUE = 2
YELLOW = 3
VIOLET = 4
# Enum for the possible shapes for the current piece.
class Shape(Enum):
SQUARE = 0
PIPE = 1
FIVE = 2
L = 3
MIDDLE = 4
|
[
"reijo.jaakkola@tuni.fi"
] |
reijo.jaakkola@tuni.fi
|
4387be7e2c57b61f2b6431b23a407b8aa32b9082
|
94e1151d9a7f1a214ec3d4b282ce5ea7a43a93f3
|
/LSAs/RouterLSA.py
|
a74e0e5f63730a151f6e38031dcb3687cbd2d8ad
|
[] |
no_license
|
andreppires/OSPF-ABR_ArbTop
|
c45e0a0e2c3f854bfe73b8f79e69e83339b66a75
|
93be7d9e737118ef04f818df634d0ad4f138d2a7
|
refs/heads/master
| 2021-07-03T00:00:42.025183
| 2019-02-20T23:37:10
| 2019-02-20T23:37:10
| 95,771,424
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
import struct
import utils
from LSAs.LSAHeader import LSAHeader
OSPF_LSA_ROUTER = "> BBH "
OSPF_LSA_ROUTER_LEN = struct.calcsize(OSPF_LSA_ROUTER)
OSPF_LSA_ROUTER_LINK_DATA = "> I I BBH"
OSPF_LSA_ROUTER_LINK_DATA_LEN = struct.calcsize(OSPF_LSA_ROUTER_LINK_DATA)
class RouterLSA(LSAHeader):
def __init__(self,sourceR, lsage, opt, lstype, lsid, advert, lsNumber, ck, lg, v, e, b, links, linksData):
LSAHeader.__init__(self,sourceR, lsage, opt, lstype, lsid, advert, lsNumber, ck, lg)
self.V = v
self.E = e
self.B = b
self.NumberLinks = links
self.LinksData = linksData
def printLSA(self):
print "Router LSA: ABR? ADV Router Age Seq# Link count"
print " ",self.getBbit()," ",self.getADVRouter()," ",self.getAge(),\
" ", self.getSeqNumber()," ", len(self.LinksData)
def calculateLength(self, ck):
hdlen = self.getLengthHeader(ck)
netlen = (OSPF_LSA_ROUTER_LEN + (OSPF_LSA_ROUTER_LINK_DATA_LEN * self.NumberLinks))
self.setLength(hdlen+netlen, ck)
return hdlen+netlen
def calculateChecksum(self):
lg = self.calculateLength(True)
pack = self.packRLSA()
structn = self.getHeaderPack() + pack
checkum = utils.fletcher(structn, 16, lg)
self.setChecksum(checkum)
return 0
def printaTudoo(self):
self.printaTudo()
print "EVB:", self.E, self.V, self.B
print "Number Links:", self.NumberLinks
print "Links Data:", self.LinksData
def packRLSA(self):
first = self.V*4 + self.E*2 + self.B
pack = struct.pack(OSPF_LSA_ROUTER, first, 0, self.NumberLinks)
for x in self.LinksData:
pack = pack + struct.pack(OSPF_LSA_ROUTER_LINK_DATA, utils.IPtoDec(x[0]),
utils.IPtoDec(x[1]), x[2], x[3], x[4])
return pack
def getLSAtoSend(self, ):
pack = self.packRLSA()
return [self.getHeaderPack() + pack, self.getLength()]
def setBbit(self, value):
self.B = value
def getBbit(self):
return self.B
def getEbit(self):
return self.E
def getPrefixandCost(self):
return [10, self.AdvertisingRouter] # TODO get the right cost
def getDicOfNeighbors(self):
out ={}
for x in self.LinksData:
if x[2] == 2: #se o link for do tipo transit
out[x[0]] = x[4]
return out
|
[
"andre.pinheiro.pires@gmail.com"
] |
andre.pinheiro.pires@gmail.com
|
de4f42e4dc390bf799a0d75d062b12372cbdba7c
|
27d7b9f1c88b85d524f94163648aa117d7b17a9b
|
/Face detector ,Motion detector and gaming programs/frame_diff.py
|
c628470459a7352ecdffe193aa20ab4ed76f2303
|
[] |
no_license
|
DipeshDhandha07/Machine-Learning-Projects
|
a8658b788e49c6841f88fa51145c468dc217c44b
|
0e5164680798f2e0139e5082d5b7709cd4e2ae35
|
refs/heads/main
| 2023-07-26T03:19:39.461631
| 2023-07-24T06:35:57
| 2023-07-24T06:35:57
| 393,878,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
import cv2
# Compute the frame differences
def frame_diff(prev_frame, cur_frame, next_frame):
# Difference between the current frame and the next frame
diff_frames_1 = cv2.absdiff(next_frame, cur_frame)
# Difference between the current frame and the previous frame
diff_frames_2 = cv2.absdiff(cur_frame, prev_frame)
return cv2.bitwise_and(diff_frames_1, diff_frames_2)
# Define a function to get the current frame from the webcam
def get_frame(cap, scaling_factor):
# Read the current frame from the video capture object
_, frame = cap.read()
# Resize the image
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return gray
if __name__=='__main__':
# Define the video capture object
cap = cv2.VideoCapture(0)
# Define the scaling factor for the images
scaling_factor = 0.5
# Grab the current frame
prev_frame = get_frame(cap, scaling_factor)
# Grab the next frame
cur_frame = get_frame(cap, scaling_factor)
# Grab the frame after that
next_frame = get_frame(cap, scaling_factor)
# Keep reading the frames from the webcam
# until the user hits the 'Esc' key
while True:
# Display the frame difference
cv2.imshow('Object Movement', frame_diff(prev_frame,
cur_frame, next_frame))
# Update the variables
prev_frame = cur_frame
cur_frame = next_frame
# Grab the next frame
next_frame = get_frame(cap, scaling_factor)
# Check if the user hit the 'Esc' key
key = cv2.waitKey(10)
if key == 27:
break
# Close all the windows
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
DipeshDhandha07.noreply@github.com
|
51924aaac8fe49727a533d4c2e7fe1f306daab0e
|
40bbf089bdd60e95e124bdf273f3d16480c9d568
|
/base/read_excel.py
|
b26ee43e880c5f249e194e28f182e0463c9cd409
|
[] |
no_license
|
wulimin523/webAutoTest
|
0449f61a7b3ea7e457923ff181ab4f6922604f5d
|
a5d7a57edf70de79c1b6f008be8027fd670fd98a
|
refs/heads/master
| 2023-02-19T04:24:51.813719
| 2021-01-18T09:20:13
| 2021-01-18T09:20:13
| 327,577,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
# @Project :webAutoTest
# @File :read_excel
# @Date :2020/12/31 18:07
# @Author :吴利民
# @Email :wulimin523@163.com
# @Software :PyCharm
-------------------------------------------------
"""
import xlrd
import config
data = xlrd.open_workbook_xls('E:\\job\\CRM系统\\data\\CRM系统_测试用例.xlsx')
table = data.sheets()[0]
rows = table.nrows()
cols = table.ncols()
print(rows)
|
[
"wulimin523@163.com"
] |
wulimin523@163.com
|
452d5370d5e9797725db180985a3c061d18c0bd8
|
16121595581229e6666bfc52cdd526d7451a4c15
|
/ideas/misc/strange_idb/dev/helper.py
|
65f02bd61edee200c3606c7abfefb3baff4e06da
|
[
"MIT"
] |
permissive
|
HackerDom/ctfcup-2019-tb
|
c78a69cf4b95339048f7d44ce3a40561d6e56ca0
|
b10f887a5cebfd311fd6577973a5381ac45594d6
|
refs/heads/master
| 2020-09-21T19:47:36.291637
| 2019-12-02T08:59:21
| 2019-12-02T08:59:21
| 224,905,498
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,849
|
py
|
import binascii
a = {0x0: 0x2055, 0x1: 0x4084, 0x2: 0x4088, 0x3: 0x408c, 0x4: 0x4090, 0x5: 0x4094, 0x6: 0x4098, 0x7: 0x409c, 0x8: 0x40a0, 0x9: 0x40a4, 0xa: 0x40a8, 0xb: 0x40ac, 0xc: 0x40b0, 0xd: 0x40b4, 0xe: 0x40b8, 0xf: 0x40bc, 0x10: 0x40c0, 0x11: 0x40c4, 0x12: 0x40c8, 0x13: 0x40cc, 0x14: 0x40d0, 0x15: 0x40d4, 0x16: 0x40d8, 0x17: 0x40dc, 0x18: 0x40e0, 0x19: 0x40e4, 0x1a: 0x40e8, 0x1b: 0x40ec, 0x1c: 0x40f0, 0x1d: 0x40f4, 0x1e: 0x40f8, 0x1f: 0x40fc, 0x20: 0x4100, 0x21: 0x4104, 0x22: 0x4108, 0x23: 0x410c, 0x24: 0x4110, 0x25: 0x4114, 0x26: 0x4118, 0x27: 0x411c, 0x28: 0x4120, 0x29: 0x4124, 0x2a: 0x4128, 0x2b: 0x412c, 0x2c: 0x4130, 0x2d: 0x4134, 0x2e: 0x4138, 0x2f: 0x413c, 0x3a: 0x4160, 0x3b: 0x4164, 0x3c: 0x4168, 0x3d: 0x416c, 0x3e: 0x4170, 0x3f: 0x4174, 0x40: 0x4178, 0x5b: 0x4180, 0x5c: 0x4181, 0x5d: 0x4182, 0x5e: 0x4183, 0x5f: 0x4184, 0x60: 0x4185, 0x7b: 0x4186, 0x7c: 0x4187, 0x7d: 0x4188, 0x7e: 0x4189, 0x7f: 0x41a0, 0x80: 0x41a4, 0x81: 0x41a8, 0x82: 0x41ac, 0x83: 0x41b0, 0x84: 0x41b4, 0x85: 0x41b8, 0x86: 0x41bc, 0x87: 0x41c0, 0x88: 0x41c4, 0x89: 0x41c8, 0x8a: 0x41cc, 0x8b: 0x41d0, 0x8c: 0x41d4, 0x8d: 0x41d8, 0x8e: 0x41dc, 0x8f: 0x41e0, 0x90: 0x41e4, 0x91: 0x41e8, 0x92: 0x41ec, 0x93: 0x41f0, 0x94: 0x41f4, 0x95: 0x41f8, 0x96: 0x41fc, 0x97: 0x4200, 0x98: 0x4204, 0x99: 0x4208, 0x9a: 0x420c, 0x9b: 0x4210, 0x9c: 0x4214, 0x9d: 0x4218, 0x9e: 0x421c, 0x9f: 0x4220, 0xa0: 0x4224, 0xa1: 0x4228, 0xa2: 0x422c, 0xa3: 0x4230, 0xa4: 0x4234, 0xa5: 0x4238, 0xa6: 0x423c, 0xa7: 0x4240, 0xa8: 0x4244, 0xa9: 0x4248, 0xaa: 0x424c, 0xab: 0x4250, 0xac: 0x4254, 0xad: 0x4258, 0xae: 0x425c, 0xaf: 0x4260, 0xb0: 0x4264, 0xb1: 0x4268, 0xb2: 0x426c, 0xb3: 0x4270, 0xb4: 0x4274, 0xb5: 0x4278, 0xb6: 0x427c, 0xb7: 0x4280, 0xb8: 0x4284, 0xb9: 0x4288, 0xba: 0x428c, 0xbb: 0x4290, 0xbc: 0x4294, 0xbd: 0x4298, 0xbe: 0x429c, 0xbf: 0x42a0, 0xc0: 0x42a4, 0xc1: 0x42a8, 0xc2: 0x42ac, 0xc3: 0x42b0, 0xc4: 0x42b4, 0xc5: 0x42b8, 0xc6: 0x42bc, 0xc7: 0x42c0, 0xc8: 0x42c4, 0xc9: 0x42c8, 0xca: 0x42cc, 0xcb: 0x42d0, 0xcc: 0x42d4, 0xcd: 0x42d8, 0xce: 0x42dc, 0xcf: 0x42e0, 0xd0: 0x42e4, 0xd1: 0x42e8, 0xd2: 0x42ec, 0xd3: 0x42f0, 0xd4: 0x42f4, 0xd5: 0x42f8, 0xd6: 0x42fc, 0xd7: 0x4300, 0xd8: 0x4304, 0xd9: 0x4308, 0xda: 0x430c, 0xdb: 0x4310, 0xdc: 0x4314, 0xdd: 0x4318, 0xde: 0x431c, 0xdf: 0x4320, 0xe0: 0x4324, 0xe1: 0x4328, 0xe2: 0x432c, 0xe3: 0x4330, 0xe4: 0x4334, 0xe5: 0x4338, 0xe6: 0x433c, 0xe7: 0x4340, 0xe8: 0x4344, 0xe9: 0x4348, 0xea: 0x434c, 0xeb: 0x4350, 0xec: 0x4354, 0xed: 0x4358, 0xee: 0x435c, 0xef: 0x4360, 0xf0: 0x4364, 0xf1: 0x4368, 0xf2: 0x436c, 0xf3: 0x4370, 0xf4: 0x4374, 0xf5: 0x4378, 0xf6: 0x437c, 0xf7: 0x4380, 0xf8: 0x4384, 0xf9: 0x4388, 0xfa: 0x438c, 0xfb: 0x4390, 0xfc: 0x4394, 0xfd: 0x4398, 0xfe: 0x439c, 0xff: 0x43a0, 0x51: 0x2018, 0x57: 0x2019, 0x45: 0x201a, 0x52: 0x201b, 0x54: 0x201c, 0x59: 0x201d, 0x55: 0x201e, 0x49: 0x201f, 0x4f: 0x2020, 0x50: 0x2021, 0x41: 0x2022, 0x53: 0x2023, 0x44: 0x2024, 0x46: 0x2025, 0x47: 0x2026, 0x48: 0x2027, 0x4a: 0x2028, 0x4b: 0x2029, 0x4c: 0x202a, 0x5a: 0x202b, 0x58: 0x202c, 0x43: 0x202d, 0x56: 0x202e, 0x42: 0x202f, 0x4e: 0x2030, 0x4d: 0x2031, 0x71: 0x2032, 0x77: 0x2033, 0x65: 0x2034, 0x72: 0x2035, 0x74: 0x2036, 0x79: 0x2037, 0x75: 0x2038, 0x69: 0x2039, 0x6f: 0x203a, 0x70: 0x203b, 0x61: 0x203c, 0x73: 0x203d, 0x64: 0x203e, 0x66: 0x203f, 0x67: 0x2040, 0x68: 0x2041, 0x6a: 0x2042, 0x6b: 0x2043, 0x6c: 0x2044, 0x7a: 0x2045, 0x78: 0x2046, 0x63: 0x2047, 0x76: 0x2048, 0x62: 0x2049, 0x6e: 0x204a, 0x30: 0x204b, 0x31: 0x204c, 0x32: 0x204d, 0x33: 0x204e, 0x34: 0x204f, 0x35: 0x2050, 0x36: 0x2051, 0x37: 0x2052, 0x38: 0x2053, 0x39: 0x2054}
buf = "4a07ea371a587a623482f86f23b7f3bc48cba1344e9f7fdac85998c5568b9a1caeccd44cb12137959601011bcc3e5aa7591142a4524f865de08335963bf772e95b3ffd9a13fef6239100"
buf = binascii.unhexlify( buf )
for i in buf:
print( "buf += p_8( get_db_byte(", hex( a[ i ] ), ") )" )
|
[
"revervand@MacBook-Air-Anon.local"
] |
revervand@MacBook-Air-Anon.local
|
6f6e10881068b659801fd52a1403787fb8c14723
|
9be00f2d22ad6594b278a2c47845c9f6315463ad
|
/category/migrations/0004_auto_20191118_0617.py
|
65bffd05c400479974d49dc79e0c2d4ce98473f3
|
[] |
no_license
|
RanaFrazKhan/KingBestMallBackend
|
207e66b3acc1f7f6fd0dcbf9e7cfe33f49a93f64
|
570032c625ffbaf51ac62585b4f8a6b81f3abb21
|
refs/heads/master
| 2022-03-16T02:10:13.716275
| 2019-11-27T13:48:23
| 2019-11-27T13:48:23
| 224,174,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
# Generated by Django 2.2.6 on 2019-11-18 06:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('category', '0003_auto_20191118_0520'),
]
operations = [
migrations.AlterField(
model_name='sub_categories',
name='Cat_ID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subname', to='category.Main_Categories'),
),
]
|
[
"rana.faraz@brainplow.com"
] |
rana.faraz@brainplow.com
|
be45ca90af900d93250db15965ddd47db514bb0b
|
4388363ba45b95910c25bae3d9c02ad78f4a75d6
|
/python/anaconda/pkgs/notebook-5.0.0-py27_0/bin/jupyter-serverextension
|
667d4d4d9ba8d68af1c25ae696d1f3e047806c81
|
[] |
no_license
|
locolucco209/MongoScraper
|
d494e02531f4f165b1e821633dc9661c579337b5
|
74476c9f00ee43338af696da7e9cd02b273f9005
|
refs/heads/master
| 2022-11-25T19:09:27.248747
| 2018-07-10T03:54:06
| 2018-07-10T03:54:06
| 137,553,786
| 3
| 1
| null | 2022-11-16T04:32:26
| 2018-06-16T04:49:22
| null |
UTF-8
|
Python
| false
| false
| 173
|
#!/opt/anaconda1anaconda2anaconda3/bin/python
if __name__ == '__main__':
import sys
import notebook.serverextensions
sys.exit(notebook.serverextensions.main())
|
[
"lukemassetti@WestSide-Luke.local"
] |
lukemassetti@WestSide-Luke.local
|
|
2741d5264563affb353f6ae75f914192654b32cd
|
2a43932e2248f209ca995f143d49cb66dbcc86dd
|
/SSG_client/matfyz/nswi177/my_ssg.py.save
|
2596e398997bd1dace801ec29da4a1a3f789a430
|
[] |
no_license
|
dominikrathan/Python
|
4a7fc4257be37e0ee4ce7fbc709612d293f25809
|
660a3138dd810208a20c922659422a1830e502b7
|
refs/heads/master
| 2022-11-12T04:21:24.295423
| 2020-07-02T11:47:00
| 2020-07-02T11:47:00
| 276,628,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,358
|
save
|
#!/usr/bin/env python3
"""
my_ssg - My simple static site generator for
the NSWI177 course at MFF CUNI.
"""
import argparse
import os
import pathlib
import sys
import matfyz.nswi177.mdyml as mdyml
import matfyz.nswi177.files as files
def generate_one_file(template, all_pages, base_dir, input_filename, destination_filename):
"""
Generate one HTML file from Markdown source.
Parameters
----------
template
Templating engine object.
all_pages: list
List of all pages (already pre-parsed).
base_dir: str
Base directory with content
input_filename: str
Filename to process.
destination_filename: str
Where to store the converted file.
"""
print("{} => {}".format(input_filename, destination_filename))
(meta, content) = mdyml.load_markdown_with_yaml_header_from_file(input_filename)
rendered = content
with open(destination_filename, 'w') as out:
out.write(rendered)
def get_html_filename(relative_markdown_filename):
"""
Get HTML filename from given Markdown filename.
"""
return os.path.splitext(relative_markdown_filename)[0] + '.html'
def action_generate(content_dir, templates_dir, static_dir, destination_dir):
"""
Callback for the `generate` action.
Parameters
----------
content_dir: str
Path to directory with input Markdown files.
templates_dir: str
Path to directory with Jinja templates.
static_dir: str
Path to directory with static files (CSS, images etc.).
destinatation_dir: str
Path to directory where to put the generated files.
"""
from jinja2 import Environment, PackageLoader, select_autoescape
env = Environment(
loader=PackageLoader()
# TODO: get Jinja template here!
template = None
for (file_in, file_in_rel, relative_dirname) in files.relative_paths_walk(content_dir, '*.md'):
dir_out = os.path.join(destination_dir, relative_dirname)
file_out = os.path.join(destination_dir, get_html_filename(file_in_rel))
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
generate_one_file(template, [], content_dir, file_in, file_out)
def main():
"""
Entry point of the whole program.
Only parses command-line arguments and executes the right callback.
"""
args = argparse.ArgumentParser(description='My SSG')
args_sub = args.add_subparsers(help='Select what to do')
args.set_defaults(action='help')
args_help = args_sub.add_parser('help', help='Show this help.')
args_help.set_defaults(action='help')
args_version = args_sub.add_parser(
'version',
help='Show version of this tool.'
)
args_version.set_defaults(action='version')
args_generate = args_sub.add_parser(
'generate',
help='Generate the web.'
)
args_generate.set_defaults(action='generate')
args_generate.add_argument(
'--content',
dest='content_dir',
default='content/',
metavar='PATH',
help='Directory with source (content) files.'
)
args_generate.add_argument(
'--templates',
dest='templates_dir',
default='templates/',
metavar='PATH',
help='Directory with Jinja templates.'
)
args_generate.add_argument(
'--static',
dest='static_dir',
default='static/',
metavar='PATH',
help='Directory with static files (images, styles, ...).'
)
args_generate.add_argument(
'--destination',
dest='destination_dir',
default='out/',
metavar='PATH',
help='Directory where to store the result.'
)
if len(sys.argv) < 2:
# pylint: disable=too-few-public-methods,missing-class-docstring
class HelpConfig:
def __init__(self):
self.action = 'help'
config = HelpConfig()
else:
config = args.parse_args()
if config.action == 'help':
args.print_help()
elif config.action == 'generate':
action_generate(
config.content_dir,
config.templates_dir,
config.static_dir,
config.destination_dir
)
else:
raise Exception("Internal error, unknown action")
if __name__ == '__main__':
main()
|
[
"rathan.dominik@gmail.com"
] |
rathan.dominik@gmail.com
|
8e7e5dc0a360e26ee19ed473db213ee94c8030d7
|
dc47a524573e143aeebe604edf632503b115516f
|
/src/learn/experimenter.py
|
6b51238404cdba0b053fd4bb78c061c237adac45
|
[] |
no_license
|
bradcarter1994/trump-or-not-trump
|
186944ab6654964a0fc7b04abbdb96a63d46e4ef
|
2a5cb63e0691d40f879c1cd86eef01615c829472
|
refs/heads/master
| 2020-04-05T05:42:12.749392
| 2018-12-05T21:17:21
| 2018-12-05T21:17:21
| 156,607,037
| 0
| 0
| null | 2018-12-05T21:17:23
| 2018-11-07T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
import argparse
class Experimenter:
def __init__(self):
pass
def main(self):
raise NotImplementedError
def parser(self):
parser = argparse.ArgumentParser(description='Machine Learning System Manager')
parser.add_argument('-V', '--verbose', action='store_true', help='Print debug information')
return parser
def run_experiments(self):
raise NotImplementedError
if __name__ == 'main':
Experimenter().main()
|
[
"bradcarter1994@gmail.com"
] |
bradcarter1994@gmail.com
|
6a76881dceed0be75445a3cac3cb9816b26109b0
|
5228d0cfda662eb16b739a01690a6378436dc03b
|
/flow-builder.py
|
0e9fb1f0331ab40bde7ec5b13b8ec21587de7419
|
[] |
no_license
|
vgthoppae/appflow-compliance
|
23fe7752adeaace34f72da367e3cce4a56df7a77
|
e6ff7e984fd5cbcccedde79b9e7d509da473c4b0
|
refs/heads/main
| 2023-05-10T04:33:29.230976
| 2021-06-01T12:44:01
| 2021-06-01T12:44:01
| 360,546,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
import json, boto3
client = boto3.client('appflow', region_name='us-east-1')
flow_name = 'prod-flow'
def build():
with open('config/source_config.json') as source_config_contents:
source_config = json.load(source_config_contents)
with open('config/dest_config.json') as dest_config_contents:
dest_config = json.load(dest_config_contents)
with open('config/tasks_config.json') as tasks_config_contents:
tasks_config = json.load(tasks_config_contents)
response = client.create_flow(
flowName = flow_name,
triggerConfig = {
'triggerType': 'OnDemand'
},
sourceFlowConfig = source_config,
destinationFlowConfigList = dest_config,
tasks = tasks_config
)
print(response)
if __name__ == '__main__':
build()
|
[
"venkat.thoppae@gmail.com"
] |
venkat.thoppae@gmail.com
|
5b9e5d6d6c233fcacaf5dc6fbc9c4b12589fb903
|
daa2c79f7c774b287b71612b374bc5383f294193
|
/explore_dataset.py
|
7bb2838037ae938f8565d68d7aa51850caf5f2a7
|
[] |
no_license
|
phuongdoviet/VCCorp20173_MiniProject
|
41b112bb2aea94aff57b0978f0c04c72bfefa16e
|
284950e37124b601a71874853b0421ea1dff2974
|
refs/heads/master
| 2020-03-26T15:42:03.560444
| 2018-08-16T12:12:09
| 2018-08-16T12:12:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
import os, time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import utils
from preprocessing import FeatureTransformer
if __name__ == "__main__":
# Load data to explore
training_file_path = "./Dataset/encoded_training_data_345.json"
# test_file_path = "./Dataset/data_sent.json"
# training_data = utils.load_data(training_file_path)
training_data, labels = FeatureTransformer.load_encoded_data(training_file_path)
# training_size = len(training_data)
# test_data = utils.load_data(test_file_path)
# test_size = len(test_data)
# print("Training data size : ", training_size)
# print("Test data size : ", test_size)
print("========================================")
# training_df = utils.convert_original_data_to_df(training_data)
# print(training_df.info())
print("\nStatistic")
# stats_by_label = training_df.label.value_counts().sort_index().reset_index()
stats_by_label = pd.DataFrame(labels, columns=["label"]).label.value_counts().sort_index().reset_index()
cols = ["label", "total(%)"]
stats_by_label.columns = cols
# stats_by_label["total"] = stats_by_label["total"] / stats_by_label["total"].sum() * 100
print(stats_by_label.head())
print("Number distinct label : ", stats_by_label.shape[0])
utils.plot_stats_count(stats_by_label, is_save=True)
|
[
"quancq.it@gmail.com"
] |
quancq.it@gmail.com
|
0005181720c920227c79102a8f0b946a124a3609
|
8552de797d96609b6ab306f0a30e0924d4da17da
|
/code/pather.py
|
d2b1bccf38e5246be4cce78b3893da57d6a44ec1
|
[] |
no_license
|
YanB25/Snake
|
49235f150a66c57940028a2c4359d37954e13034
|
d8409888c416e77499ce509864274d05670409c9
|
refs/heads/master
| 2020-04-10T18:45:12.167093
| 2018-12-12T06:02:51
| 2018-12-12T06:02:51
| 161,211,494
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,613
|
py
|
import json
from snake import Snake
from fruit import Fruit
import queue
import copy
class PathSolve():
def __init__(self, snake, fruit, config):
self.snake = copy.deepcopy(snake)
self.fruit = copy.deepcopy(fruit)
self.config = config
width = int(config['window-width'])
height = int(config['window-height'])
blk = int(config['block-size'])
self.width = int(width / blk)
self.height = int(height / blk)
self.delX = [0, 0, -1, 1]
self.delY = [-1, 1, 0, 0]
self.dir = ['U', 'D', 'L', 'R']
self.rev_dir = ['D', 'U', 'R', 'L']
self.rev_map = {
'U': 'D',
'D': 'U',
'L': 'R',
'R': 'L'
}
def shortest_path(self, target):
'''
find the shortest path. snake body is blocked.
@param target tuple of size 2. the x-y pair of destination
@ret Tuple(Boolean, List[Str]). first element tells you whether
it has a shortest path. Second element tells you the path from
SNAKE HEAD to target. Empty list if no path.
'''
# print('target', target)
# print('snake', self.snake.head())
game_map = [
['O' for i in range(self.width)]
for j in range(self.height)
]
dir_map = copy.deepcopy(game_map)
for x, y in self.snake.snakebody[1:]:
game_map[y][x] = 'X' # block
# PathSolve.printList(game_map)
x, y = target
game_map[y][x] = 'F'
q = queue.Queue()
q.put(self.snake.head())
while not q.empty():
x, y = q.get()
# print(x, y, game_map[y][x])
if game_map[y][x] != 'O':
continue
# print(x, y)
game_map[y][x] = 'X'
for i in range(4):
nx = x + self.delX[i]
ny = y + self.delY[i]
if not self.isValid(nx, ny):
continue
if game_map[ny][nx] == 'F':
dir_map[ny][nx] = self.rev_dir[i]
return True, self.__findPath(dir_map, target)
if game_map[ny][nx] != 'O':
continue
q.put((nx, ny))
# print('put', nx, ny)
dir_map[ny][nx] = self.rev_dir[i]
return False, []
def longest_path(self, target):
y, sp = self.shortest_path(target)
if not y:
return False, []
game_map = [
['O' for i in range(self.width)]
for j in range(self.height)
]
for x, y in self.snake.snakebody:
game_map[y][x] = 'X'
idx = 0
cur = self.snake.head()
while True:
extended = False
direction = sp[idx]
if direction in ['U', 'D']:
test_extend = ['L', 'R']
else:
assert(direction in ['L', 'R'])
test_extend = ['U', 'D']
next_cur = self.pos_move(cur[0], cur[1], direction)
for d in test_extend:
t1 = self.pos_move(cur[0], cur[1], d)
t2 = self.pos_move(next_cur[0], next_cur[1], d)
if self.__exstendable(*t1, game_map) and self.__exstendable(*t2, game_map):
extended = True
sp.insert(idx, d)
sp.insert(idx + 2, self.rev_map[d])
x, y = t1
game_map[y][x] = 'X'
x, y = t2
game_map[y][x] = 'X'
break
if not extended:
cur = next_cur
idx += 1
if idx >= len(sp):
break
return True, sp
def __findPath(self, dir_map, target):
'''
inner function called by shortest path
'''
# PathSolve.printList(dir_map)
x, y = target
ret = []
while dir_map[y][x] != 'O':
ret.append(self.rev_map[dir_map[y][x]])
i = self.dir.index(dir_map[y][x])
# print('step', dir_map[y][x], i)
nx = self.delX[i]
ny = self.delY[i]
x, y = x + nx, y + ny
return ret[::-1]
@staticmethod
def printList(ls):
for i in ls:
print(i)
print()
def isValid(self, x, y):
return x >= 0 and x < self.width and y >= 0 and y < self.height
def __exstendable(self, x, y, game_map):
return self.isValid(x, y) and game_map[y][x] == 'O'
def pos_move(self, x, y, d):
i = self.dir.index(d)
delX = self.delX[i]
delY = self.delY[i]
return (x + delX, y + delY)
def shortest_path_fruit(self):
return self.shortest_path(self.fruit.where())
def longest_path_tail(self):
return self.longest_path(self.snake.snakebody[-1])
if __name__ == '__main__':
dt = None
with open('config.json', 'r') as f:
dt = json.load(f)
with open ('output_9223372036568707794.log', 'r') as flog:
data = flog.read()
data = data.strip().split('\n')
snake = Snake()
snake.snakebody = []
for d in data:
x, y = d.split(' ')
x = int(x)
y = int(y)
snake.snakebody.append((x, y))
fruit = Fruit(dt)
fruit.last_generate = (3, 8)
solver = PathSolve(snake, fruit, dt)
# print(solver.longest_path(fruit.where()))
print(solver.longest_path(snake.snakebody[-1]))
|
[
"yanb25@mail2.sysu.edu.cn"
] |
yanb25@mail2.sysu.edu.cn
|
9067ba5d706c78c3468b537a19d3d1530b92d744
|
1e420c670a8b457d4a046e4559a9e72bacb77572
|
/tools/initial_abundance/pinput_radii_reader.py
|
ea66fd6637c22b30fa0836f46f13a53fd98baa29
|
[
"BSD-3-Clause"
] |
permissive
|
vishaltiwari/XNet
|
b213bb9cfcb60537eac98f08ceb990de1babc591
|
a7ad8d9a610edb1d522fef56b8ed1067fb39f30f
|
refs/heads/master
| 2020-06-07T09:13:44.373139
| 2019-06-28T00:13:27
| 2019-06-28T00:13:27
| 192,984,409
| 0
| 0
|
BSD-3-Clause
| 2019-06-20T20:36:56
| 2019-06-20T20:36:55
| null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
def pinput_radii_reader(pinput_file):
import numpy as np
import imp
#Read file of radii for each row of particles
radius_pathname = pinput_file
fradius = open(radius_pathname,'r')
rad_list = np.loadtxt(fradius,skiprows=1,usecols=(0,))
with open(radius_pathname, 'r') as f_pnum:
line_1 = f_pnum.readline()
print line_1
line_list = line_1.split()
n_part_x = int(line_list[1])
n_part_y = int(line_list[3])
n_part_z = int(line_list[5])
return rad_list,n_part_x,n_part_y,n_part_z
|
[
"jaharris87@users.noreply.github.com"
] |
jaharris87@users.noreply.github.com
|
52a78ecf355ee0e7f8b171659bee062669b20d41
|
20777c40d35fa4b9b535a76545c9300a57624657
|
/prediction_baselines.py
|
95d10e7bf50f75a78a65cd04667381f49fc9bbbb
|
[] |
no_license
|
robsdedude/graph-pattern-learner
|
6e658a3a4c56bbdbcf26f4f128dd98b09629cca7
|
eb8ddd14732cd719e67a712c11b324152a41baa6
|
refs/heads/master
| 2021-06-20T04:34:42.452313
| 2017-06-07T22:20:51
| 2017-06-07T22:20:51
| 91,547,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
#!/usr/bin/env python2.7
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from SPARQLWrapper import SPARQLWrapper
from rdflib import URIRef
from rdflib import Literal
from rdflib import Variable
from splendid import get_path
import config
from gp_learner import find_in_prediction
from graph_pattern import TARGET_VAR
from ground_truth_tools import get_semantic_associations
from ground_truth_tools import split_training_test_set
import gp_query
from utils import sparql_json_result_bindings_to_rdflib
TIMEOUT = 30
LIMIT = 300
def predict_target_with_query(
sparql, query, source, timeout=TIMEOUT, limit=LIMIT):
"""Predicts target with given query.
For example for pagerank_bidi:
SELECT distinct(?target) ?score {
{ dbr:Circle ?p ?target .}
UNION
{ ?target ?q dbr:Circle . }
?target dbo:wikiPageRank ?score
}
ORDER BY DESC(?score)
LIMIT 100
"""
q = query % {'source': source.n3()}
q += '\nLIMIT %d' % limit
t, q_res = gp_query._query(sparql, timeout, q)
res_rows_path = ['results', 'bindings']
bindings = sparql_json_result_bindings_to_rdflib(
get_path(q_res, res_rows_path, default=[])
)
target_scores = [
(get_path(row, [TARGET_VAR]), get_path(row, [Variable('score')]))
for row in bindings]
# print(target_scores)
return target_scores
def query_template(name, triple):
template_out = '''
SELECT distinct(?target) ?score {
%(source)s ?p ?target .
%(triple)s
}
ORDER BY DESC(?score)
'''
template_in = '''
SELECT distinct(?target) ?score {
?target ?p %(source)s .
%(triple)s
}
ORDER BY DESC(?score)
'''
template_bidi = '''
SELECT distinct(?target) ?score {
{ %(source)s ?p ?target .}
UNION
{ ?target ?q %(source)s . }
%(triple)s
}
ORDER BY DESC(?score)
'''
names = map(lambda s: name + '_%s' % s, ['out', 'in', 'bidi'])
replace = {
'triple': triple,
'source': '%(source)s', # we want to keep '%(source)s' for later
}
queries = map(lambda t: t % replace,
[template_out, template_in, template_bidi])
return dict(zip(names, queries))
prediction_queries = {}
prediction_queries.update(
query_template('pagerank', '?target dbo:wikiPageRank ?score .')
)
prediction_queries.update(
query_template('hits', '?target dbo:wikiHITS ?score .')
)
prediction_queries.update(
query_template('outdeg', '?target dbo:wikiPageOutLinkCountCleaned ?score .')
)
prediction_queries.update(
query_template('indeg', '?target dbo:wikiPageInLinkCountCleaned ?score .')
)
def main():
semantic_associations = get_semantic_associations(
config.GT_ASSOCIATIONS_FILENAME)
assocs_train, assocs_test = split_training_test_set(
semantic_associations, variant='random'
)
# setup node expander
sparql = SPARQLWrapper(config.SPARQL_ENDPOINT)
predict_set = assocs_test
for method, query in sorted(prediction_queries.items()):
target_idxs = []
for source, target in predict_set:
prediction = predict_target_with_query(sparql, query, source)
target_idxs.append(find_in_prediction(prediction, target))
print("'%s': %s," % (method, target_idxs))
if __name__ == '__main__':
main()
|
[
"dev@joernhees.de"
] |
dev@joernhees.de
|
ce2c633262339c77e0699a3d819f196a5f81baa6
|
c9b0251fffad837bb39acb11760fd210b20a0454
|
/lib/blocks.py
|
ad1e802c7a9aadcde1eade9f238b3b982cb43ddf
|
[
"MIT"
] |
permissive
|
jpinsonault/blocks
|
7ea9456318dab7453c96f9b87547c3297bbf3c0a
|
9e62012309aa3ccb291e31f01824c329c74bd165
|
refs/heads/master
| 2021-01-20T05:41:23.526762
| 2017-08-29T02:51:06
| 2017-08-29T02:51:06
| 101,465,050
| 0
| 1
| null | 2017-08-29T02:51:07
| 2017-08-26T05:34:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,469
|
py
|
import pygame
from pygame.locals import *
from random import randint
import time
### Grid
def create_grid(width, height, colors):
column = lambda: [colors['grey']() for _ in range(width)]
rows = [column() for _ in range(height)]
return rows
def move_to_pos(grid, row, column, creature, color_map):
grid[row][column] = color_map['blue']()
return grid
### Creature
def create_creature():
creature = {'position': {'row': 0, 'column': 0}}
return creature
def ask_creature_where_to_move_to(creature):
return ['up', 'down', 'left', 'right'][randint(0, 3)]
def move_creature(creature, grid, color_map):
#import pdb; pdb.set_trace()
row = creature['position']['row']
column = creature['position']['column']
direction = ask_creature_where_to_move_to(creature)
grid[row][column] = color_map['grey']()
new_pos = [row, column]
if row > 0 and direction == 'up':
new_pos = [row-1, column]
elif row < (len(grid) - 1) and direction == 'down':
new_pos = [row+1, column]
elif column > 0 and direction == 'left':
new_pos = [row, column-1]
elif column < (len(grid[0]) - 1) and direction == 'right':
new_pos = [row, column+1]
creature['position']['row'] = new_pos[0]
creature['position']['column'] = new_pos[1]
print(new_pos)
grid = move_to_pos(grid, new_pos[0], new_pos[1], creature, color_map)
return grid
### App
class App:
def __init__(self):
self._running = True
self._display_surf = None
self.width = 4
self.height = 4
self.scale = 100
self.win_width = self.width * self.scale
self.win_height = self.height * self.scale
self.size = self.win_width, self.win_height
rand_color = lambda: randint(0, 255)
blue = lambda: (0, 128, 255)
grey = lambda: (128, 128, 128)
#color_map = {'blue': lambda: (rand_color(), rand_color(), 255)}
self.color_map = {'blue': blue, 'grey': grey}
self.grid = create_grid(self.width, self.height, self.color_map)
self.creature = create_creature()
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
self._running = True
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
def on_loop(self):
pass
def on_render(self):
scale_for_box = lambda x: int(self.scale / len(x))
width = scale_for_box(self.grid[0])
height = scale_for_box(self.grid)
for i, row in enumerate(self.grid):
for j, color in enumerate(row):
x = j * self.scale
y = i * self.scale
if not type(color) == type(tuple()):
#import pdb; pdb.set_trace()
pass
pygame.draw.rect(self._display_surf, color, pygame.Rect(x, y, self.scale-5, self.scale-5))
pygame.display.flip()
self.grid = move_creature(self.creature, self.grid, self.color_map)
time.sleep(.1)
def on_cleanup(self):
pygame.quit()
def on_execute(self):
if self.on_init() == False:
self._running = False
while (self._running):
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
|
[
"ben@weaveup.com"
] |
ben@weaveup.com
|
7179647af042fc71cb587a3d24047fe7ca109158
|
aad3b4ec3f36bb3ebb00288e77dc2dfbb9e72d78
|
/Baseball_Stats/Django_Stats/models.py
|
d4eca5458e9c432b4c996af88252b3f875471b92
|
[] |
no_license
|
zseaborn63/Baseball
|
0430629be3c01f69d2901fb4f2fd4f4b5b28d9ec
|
3f2db5356befe6e7be8eb8d3c92d93a302c4134b
|
refs/heads/master
| 2021-01-10T01:49:17.791825
| 2016-02-19T17:13:27
| 2016-02-19T17:13:27
| 51,536,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
from django.db import models
# Create your models here.
class Player(models.Model):
lahmanID = models.IntegerField()
player_key = models.CharField(max_length=10)
managerID = models.CharField(max_length=10, blank=True)
hofID = models.CharField(max_length=10, blank=True)
birthYear = models.IntegerField()
birthMonth = models.IntegerField()
birthDay = models.IntegerField()
birthCountry = models.CharField(max_length = 50)
birthState = models.CharField(max_length = 2, blank=True)
birthCity = models.CharField(max_length = 50)
deathYear = models.IntegerField(blank=True)
deathMonth = models.IntegerField(blank=True)
deathDay = models.IntegerField(blank=True)
deathCountry = models.CharField(max_length = 50, blank=True)
deathState = models.CharField(max_length = 2, blank=True)
deathCity = models.CharField(max_length = 50, blank=True)
nameFirst = models.CharField(max_length=50)
nameLast = models.CharField(max_length=50)
nameNote = models.CharField(max_length=255)
nameGiven = models.CharField(max_length=255)
nameNick = models.CharField(max_length=255)
height = models.IntegerField()
weight = models.IntegerField()
bats = models.CharField(max_length=1, choices=(('L', 'Left'), ('R', 'Right'), ('B', 'Both')))
throws = models.CharField(max_length=1, choices=(('L', 'Left'), ('R', 'Right'), ('B', 'Both')))
debut = models.CharField(max_length=12)
finalGame = models.CharField(max_length=12, blank=True)
college = models.CharField(max_length=50, blank=True)
lahman40ID = models.CharField(max_length=9)
lahman45ID = models.CharField(max_length=9)
holtzID = models.CharField(max_length=9)
bbrefID = models.CharField(max_length=9)
def __str__(self):
return self.player_key
|
[
"zseaborn63@gmail.com"
] |
zseaborn63@gmail.com
|
0052e5593e820026f416055ebfbc862c2af184f2
|
3c0508785ac17f1b48e385c6273a1e068130ceac
|
/user_items
|
0819be02cba3a0a57b326beaf5d28d2b71639c2a
|
[] |
no_license
|
zhanchangbao/recommand-system
|
735430505897dc448ac76f015cd6160d86236e9d
|
5dc4d74b60cee0300961129b8b69f1c2df8c340d
|
refs/heads/master
| 2020-04-18T00:57:25.536076
| 2018-04-09T13:59:46
| 2018-04-09T13:59:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,176
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-#!
#time:2018/2/9 ____author____= pengxiaoxin
import pandas as pd
import numpy as np
import math
from operator import *
rating = pd.read_csv('C:\\Users\\Administrator\\Desktop\\movie-data\\ml-latest-small\\ratings.csv')
#ratings.csv's columns are user,movieId,rating,timestamp
user = rating['userId']
movieId = rating['movieId']
rating1 = rating['rating'] #人对电影的评分
timestamp = rating['timestamp'] #打分的时间
user_item = dict()#构造一个字典,格式{userid:[movieid,movieid],……}因为一个人会看多部电影
for i in range(len(user)):
if user[i] not in user_item:
user_item[user[i]] = set()
user_item[user[i]].add(movieId[i])
item_user = dict()#建立物品——用户的倒排表,为了降低表的稀疏性
for i in range(len(user)):
if movieId[i] not in item_user:
item_user[movieId[i]] = set()#set结构可以看成一个不重复的数组
item_user[movieId[i]].add(user[i])
N = dict()#计数,看看每个电影被看了多少次
C = dict()#得出两个物品同时被一个用户看的次数
for i,items in item_user.items():
for item in items:
if item not in N:
N[item] = 0
N[item]+=1
for item2 in items:
if item ==item2:
continue
if (item, item2) not in C:
C[item,item2] = 0
C[item,item2]+=1
w = dict()#通过C(1,2)/N[1]*N[2] 算出系数表,等于把物品的关联程度算出来啦;如果想加上时间的因素,就用log加入
for i ,item in C.items():
w[i[0],i[1]] = C[i[0], i[1]]/math.sqrt(N[i[0]] * N[i[1]] *1.0)
rank = dict()#推荐列表
target_user = 1 #input("write the user you want to recommand:")
train = user_item[target_user]
ralated_item = dict()
for i in w:
if i[0] in train and i[1] not in train:#找出同时出现的两部电影,两部电影有1部被target-user看过
if i[1] not in ralated_item:
ralated_item[i[1]] = 0
ralated_item[i[1]] = w[i[0],i[1]]
print(sorted(ralated_item.items(), key = itemgetter(1), reverse=True)[:10])#打印出前十的。
|
[
"noreply@github.com"
] |
zhanchangbao.noreply@github.com
|
|
3fbf9686fdab322a9bb64352bcb0e47b8d8ec3f1
|
61e82c9fd238483bceeae6670df2de6732daba21
|
/La_Course/src/Test_ing.py
|
049cd338a35b15340a3a7361593ecd6602c2d292
|
[] |
no_license
|
h4r4ld-git/P1-A6
|
7b729462b09f0f6be96d0842d74d471994a6193b
|
90a3ebb736bfe65be55b0153758e60281dbc1aaa
|
refs/heads/main
| 2023-03-18T05:35:33.495972
| 2021-03-14T14:14:01
| 2021-03-14T14:14:01
| 345,929,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
import unittest
import correct
import student
class TestLa_Course(unittest.TestCase):
def test_La_course(self):
a = "Luka: 14\nIsam :1\n6 : 001\nAwidYamak:7\Bouchlaghem : 9\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\n54564\n'(-è: 2"
i = "Ismail: 14\nIzadin :1\n6 : 001\nBoukabouz:9\nRafik : 7\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\n58:Samad"
b = "Ismail: 20\nRomayssa :1\n6 : 001\nKartasou:7\nSaid : 9\n Ismail;7\njdef:?!\n4: Mohamed\nSaid?5\n\n\njkfjr\n\n\n;;;..?"
c = "Mohamed: 15\nRodayna :13\n6 : 001\nRiiF:715\nAbjij : 14\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\;,?:325"
d = "Zafzafi: 19\nNassir :16\n6 : 001\nZafzafi:25\nAhdidouch : 24\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\n$*^:&é3"
e = "Zafzafi: 1\nHamza :16\n6 : 001\nAwidAbak:15\nFikri : 24\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\nSamir: 6"
f = "Karim: 6\nBilal :8\n6 : 001\nYouta:65\nGholam : 7\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\n54564\nSaid:10\n\n\neff:64\nytu:Ay7"
g = "Ilyass: 1\nSaffah :5\n6 : 001\nHalamala:715\nRayan : 4\n Ismail;7\njdef:zefezf\n4: Mohamed\nSaid?5\n\n\njkfjr\n54564\nImane:2"
h = "Kamal: 1\nImad :5\n6 : 001\nAyoube:715\Lina : 4\nMohamed:10\nYassir:3\nRayan:14\nImran:2\nSouraya: 7\nImane:4\nAnissa:15"
j = "Mohamed:3\nSalwa:2\nIsmail:4\nAymane:6\nAyoube:9\nRodayna:12\nImran:13\nYassir:14\nRayan:15\nFikri:1"
k = "Sara:11\nNisrine:12\nSophia:13\nMarouan:14\nFatiha:15"
m = "Wassima:1\nWassim:2\nWadie:3\nMohamed:4\nfjf:23\n2:Halim\??:2"
l = [a,b,c,d,e,f,g,h,i,j,k,m]
for i in range(len(l)):
rep = ("Votre fonction a retourné {} alors que la réponse attendue est {}")
f1 = open("Timssah_i+1.txt","w")
f1.write(l[i])
f1.close()
try:
student_course = student.La_course("Timssah_i+1.txt")
except Exception as error:
self.fail("la fonction 'La_course' a provoqué l'exception {}: {}".format(type(error), error))
correct_course = correct.La_course("Timssah_i+1.txt")
self.assertEqual(student_course, correct_course, rep.format(student_course, correct_course))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"luka.chkoidze@student.uclouvain.be"
] |
luka.chkoidze@student.uclouvain.be
|
ea1c10e06fee9d8a040e69ae1b16700b007c9f87
|
939266abe034b73c513b65b1d9d73167494655a8
|
/script_job_file_stage_plus.py
|
7c4589eca45b42f6c90c5b1278c8b9620ff59231
|
[] |
no_license
|
pmbasene/xml_log
|
6bfbce24977c9e7eb51e9d2879b2ffcb6aec74a1
|
c73e018e9aba14a95e5b186d2ab80a9786ab754a
|
refs/heads/master
| 2020-07-23T09:48:11.791877
| 2019-10-10T09:46:17
| 2019-10-10T09:46:17
| 207,346,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,343
|
py
|
#!/anaconda3/bin/python
# coding: utf-8
import os
from lxml import etree
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
from pprint import pprint
# import random
# from XML_HANDLE import *
from XML_HANDLE import Xml_logfile
from clean_file import CleanFolder
"""
DESCRIPTION DES MODULES
--------------------------
Modules :
- lxml: permet de parser les fichiers xml.
Pour plus d'infos rtfm la documentation sur le package lxml consulter: https://docs.python.org/3.4/library/xml.etree.elementtree.html#module-xml.etree.ElementTree ;)
- Pandas et Numpy permettent de travailler sur des dataframes et de manipuler les donnees plus facilement.
- clean_file.py permet de nettoyer au prealable le dossier qui contient les excecutables.
Taches :
- extraire les valeurs des cles(jobname, filename, directory) qui sont en format string ou variable depuis les fichiers XML.
- recuperer leurs valeurs exactes en utilisant les jobnames des fichiers xml pour trouver leurs fichiers excecutables corespndantes.
"""
class ParseElementXML():
"""cette classe a ete creee dans l'optique de faciliter l'extraction de données à partir d'un fichier de format XML.
Elle comporte un ensemble de fonctions qui permettent de recuperer, manipuler, et extraire à partir du xml-projet
Note: Pour l'instant afin d'eviter toutes erreurs d'excecution, les fichier XML doivent etre dans le meme dossier que le prog main_scrpit.py """
def document(self, fileProjetXML="SUPprd.xml"):
#DOC-FTSACprd.xml, SUPprd.xml , MGTPRD.xml
"""cette fonction prend en input le nom du fichier .xml et renvoie le path absolu. Ce dernier sera utilise aussi en input par la fonction
getroot() pour instancier le module lmxl.etree """
basePath = os.path.dirname(
__file__) # path abs du dossier de ce file
fullPath = os.path.join(basePath, fileProjetXML)
# fullPath = os.path.abspath(fileProjetXML)
# os.chdir(fullPath)
# # ---------------------------------------------------------------------,
# try:
# basePath = os.path.dirname(__file__)
# fullPath = os.path.join(basePath, fileProjetXML)
# print(fullPath)
# return fullPath
# except OSError:
# return "verifier bien le path du fichier. Il doit etre dans le dossier"
# __________________________________#
# print(fullPath)
return fullPath
def getRoot(self, fullPath):
"""getroot() prend en input le path absolu un objet appele root """
tree = etree.parse(fullPath)
root = tree.getroot()
# print(root.tag, root.attrib)
# print(f"Infos - nombre de child pour {root.tag}:", len(root))
# print("_________-------_____----Header------___----___----___----___ ")
return root
def removeDuplicates(self, listDoublons): # not use
'''cette methode permet de supprimer les doublons dans une liste.
Elle prend en entree une liste d'elements et retourne ensuite la meme liste dans laquelle tous elements dupliques sont supprimes'''
liste = []
for i in listDoublons:
if i not in liste:
liste.append(i)
# return liste
# liste = [liste.append(x) for x in listDoublons if x not in liste] # list method comprehension
return liste
def recupererCleOuValeurInString(self, string, sep=" "):
'''cette fonction prend en entree une chaine de caractere <str>
et un separateur(= ou , ou ; ou : etc) et retourne la cle et la valeur de la chaine splitee'''
key, val = string.split(sep)
Key = key.strip()
Val = val.strip()
return Key, Val
def recuperer_PAR_dir(self, string):
""" cette fonction prend en entree une la valeur du file du xML (format string) et retourne une liste d'elements filtres (format list).
Comment ca marche : Elle splite d'abord une chaine de caractere en se basant sur le separator # qui est donne en entree, ensuite filtre tous les items non desirables
tels que les items vides, underscore, etc. et retourne finalement que les variables-repertoires du logfile """
PAR_list = [] # RESULTAT A RECUPERER COMME RETURN DE LA FONCTION
# LIST DES CARACTERES EXCLUS COMME PREMIER ITEM DE LA LISTE
caract_exclu = ['', '_']
result = string.split("\\")
for res in result:
if 'PAR' in res:
res = res.split("#")
# print(res)
del res[0]
for r in res:
if r not in caract_exclu and 'DS_' not in r:
PAR_list.append(r)
if 'DS_' in r:
r, r_ext = os.path.splitext(r)
PAR_list.append(r)
PAR_list.append(r_ext)
# print(res)
# print(PAR_list)
return PAR_list
def buildFullPath(self, PAR_list, realFilePath):
# global realFilePathFull
extensions = ['.ds', '.ext', 'csv']
extension = PAR_list[-1]
DSfile = PAR_list[-2]
if extension in extensions:
if 'PAR' not in DSfile:
# realFilePathFull = realFilePath+DSfile+extension
return realFilePath+DSfile+extension
else:
# realFilePathFull = realFilePath+extension
return realFilePath+extension
elif extension not in extensions:
return realFilePath+extension
def makePathForFileOrFileDataset(self, filePath, PAR_list):
""" cette fonction prend en input le filePath cad la liste des key-repertoire des chemins PATH des files,... """
realFilePath = " "
if len(filePath) == 1:
realFilePath = os.path.join(filePath[0])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
elif len(filePath) == 2:
realFilePath = os.path.join(filePath[0], filePath[1])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
elif len(filePath) == 3:
realFilePath = os.path.join(filePath[0], filePath[1], filePath[2])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
elif len(filePath) == 4:
realFilePath = os.path.join(
filePath[0], filePath[1], filePath[2], filePath[3])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
elif len(filePath) == 5:
realFilePath = os.path.join(
filePath[0], filePath[1], filePath[2], filePath[3], filePath[4])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
elif len(filePath) == 6:
realFilePath = os.path.join(
filePath[0], filePath[1], filePath[2], filePath[3], filePath[4], filePath[5])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
elif len(filePath) == 7:
realFilePath = os.path.join(
filePath[0], filePath[1], filePath[2], filePath[3], filePath[4], filePath[5], filePath[5])
realFilePath = self.buildFullPath(PAR_list, realFilePath)
# else:
# fileValueTrueRecord.append(None)
return realFilePath
def retrieveFilePath(self, fileValue, blockTextPar):
# les filepath fonctionnent correctement si le bloc est au debut du logfile. donc il me faut trouver une solution pour resoudre le cas ou le boc est a une ligne x du logfile. pour cela se referer a la variable PAR
filePath = []
# recuperation des parametres directory
# print("PAR",len(PAR_list),PAR_list)
blockTextPar_list = blockTextPar
PAR_list = self.recuperer_PAR_dir(fileValue)
# la valeur PAR_list = 0 signifie que le bloc n'est pas en debut du logfile
for par in PAR_list:
for line in blockTextPar_list:
# line = line.strip() # pour enlever tous les espaces a gauche et a droite
if r'=' in line: # ceci est du a une levee dexcpection ie unpacking items
kline, vline = self.recupererCleOuValeurInString(
line, sep="=")
if par == kline:
filePath.append(vline)
return PAR_list, filePath
def funcname(self, *param):
#
# PropertyOrcollection, fileValueRecord, fileValueTrueRecord, datasetValueRecord, datasetValueTrueRecord
if PropertyOrcollection.tag == 'Collection' and PropertyOrcollection.attrib.get("Name") == 'Properties':
# attribute_Name = PropertyOrcollection.attrib.get('Name')
for subrecord in PropertyOrcollection: # ACCES NIVEAU 5
for prop in subrecord:
if prop.attrib.get('Name') == 'Value':
Textprop = str(prop.text)
fileValue = Textprop
if (r')file' in fileValue) and (jobN == jobFromXML):
# print(fileValue)
PAR_list, filePath = self.retrieveFilePath(
fileValue, blockTextPar)
realFilePath = self.makePathForFileOrFileDataset(
filePath, PAR_list)
# print(realFilePath)
fileValueRecord.append(fileValue)
fileValueTrueRecord.append(realFilePath)
tup = (jobN, logfile, fileValue,
realFilePath, attribute_type)
tuple_Job_logfile_file_trueFile_attr.append(tup)
# print(
# f"{jobN},{logfile},{fileValue},{realFilePath},NaN,NaN,NaN ,NaN,{attribute_type}")
# print(f"jobN, logfile, fileValue, realFilePath, datasetValue, realFilePathDataset, stageName, stageType,recordTypeRecord")
# return fileValueRecord, fileValueTrueRecord,
# print(tup)
datasetValue = Textprop
if ('.ds' in datasetValue) and (jobN == jobFromXML):
# print(realFilePathDataset)
PAR_dataset_list, filePathDataset = self.retrieveFilePath(
datasetValue, blockTextPar)
realFilePathDataset = self.makePathForFileOrFileDataset(
filePathDataset, PAR_dataset_list)
datasetValueRecord.append(datasetValue)
datasetValueTrueRecord.append(realFilePathDataset)
# print(
# f"{jobN},{logfile},NaN,NaN,{datasetValue},{realFilePathDataset},NaN,NaN,{attribute_type}")
# print(f"jobN, logfile, fileValue, realFilePath, datasetValue, realFilePathDataset, stageName, stageType,recordTypeRecord")
# return realFilePathDataset, datasetValueTrueRecord
return fileValueRecord, fileValueTrueRecord, datasetValueRecord, datasetValueTrueRecord, tuple_Job_logfile_file_trueFile_attr
# def funcname2(record):
# for PropertyOrcollection in record: # ACCES NIVEAU 4
# attribute_Name = PropertyOrcollection.attrib.get('Name')
# if attribute_Name == 'Name':
# TextPropertyOrcollection = str(PropertyOrcollection.text)
# stageName = TextPropertyOrcollection
# # print(f"{jobN}, {logfile}, NaN, NaN, NaN, NaN, {stageName},NaN, {attribute_type}")
# # print(f"jobN, logfile, fileValue, realFilePath, datasetValue, realFilePathDataset, stageName, stageType,recordTypeRecord")
# elif attribute_Name == 'StageType':
# TextPropertyOrcollection = str(PropertyOrcollection.text)
# stageType = TextPropertyOrcollection
# # print(f"{jobN}, {logfile}, NaN, NaN, NaN, NaN, NaN,{stageType},{attribute_type}")
# # print(f"jobN, logfile, fileValue, realFilePath, datasetValue, realFilePathDataset, stageName, stageType,recordTypeRecord")
class ParseLog():
def changeDir(self):
path_to_logfullDS = '/Users/ganasene/Downloads/folder/logsfullDS'
r = os.chdir(path_to_logfullDS)
return r
def blockEventID(self, string):
''' cette fonction prend en entree une string et retourne une liste.
ici on va prendre comme separator_word le -Event iD-, donc sep_word=Event !!!
-Dans les listes qui seront generees il en y aura certaines qui seront vides. Donc il faut en prendre compte lors de suppression
des occurences inutiles(msgid -00126 par exples) de la liste bloc_jb.- '''
blockPar2 = []
blockPar3 = []
bloc_jb = string.split(
'Event ') # Note : prendre juste Event plus espace pour que ca marche
# del bloc_jb[2] # permet d'enlever la deuxieme occurence qui comporte que les parametre de conf du datastage
# enleve la permiere occ qui est vide
del bloc_jb[0]
# ceci est la liste d'element ou bloc a suprrimer
msgId_list = ['IIS-DSTAGE-RUN-I-0126',
'IIS-DSTAGE-RUN-I-0034', 'IIS-DSTAGE-RUN-I-0070']
for i, l in enumerate(bloc_jb): # i, la ligne et l est la ligne
if msgId_list[0] in l:
# suppression de l'environnement varaible inutiles
del bloc_jb[i]
for l2 in bloc_jb:
if (msgId_list[1] in l2) or (msgId_list[2] in l2):
blockPar2.append(l2)
for l3 in blockPar2:
# conversion de str en list car blockTextPar etait en format list
blockPar2_list = l3.split('\n')
for l4 in blockPar2_list:
if r'=' in l4:
blockPar3.append(l4)
blockPar3 = list(set(blockPar3))
return blockPar3
### ===================MAIN0 : nettoyage du dossier ===================================================
p = CleanFolder()
content = p.cleaning_files()
### affichage
# print("------ traitement1: Resultat nettoyage du dossier {} !!!!------".format(p))
for i in content:
# print(i, sep='\n')
pass
# print('---<>_<>__ Dossier nettoye!!!!---<>_<>__', sep='\n')
# # ===================MAIN1===================================================
### Initiation des listes suivants en vue de creeer un dataframe en output
datasetName = []
datasetValueTrue = []
logFile = []
b = Xml_logfile()
######## ======================== ======================
q = ParseLog()
path_to_logfullDS = q.changeDir() # changement de repertoire
tuple_job_logfile = []
compt = 0
jobFromXMLpd = []
logfilepd = []
tuple_job_logfilepd = []
print(f'Projet,job,file,truefile,stage,idIO,typeIO')
# fileProjetXML = "MGTPRD.xml"
for fileProjetXML in os.listdir('/Users/ganasene/Downloads/projet_xml_insyco/xmlFile/'):
if (fileProjetXML != ".DS_Store") and (fileProjetXML != ".vscode"):
# fileProjetXML, ext= os.path.splitext(fileProjetXML)
# print(fileProjetXML)
filename, ext = os.path.splitext(fileProjetXML)
###DOC-FTSACprd.xml, SUPprd.xml , MGTPRD.xml, SOC-CLIPIprd.xml, DOC-OCTAVprd.xml
###UTIprd.xml Docprd.xml,SOC-OSCARprd.xml
#no files in FTSACprd.xml ,DOC-OCTAVprd.xml, UTIprd.xml
#1 file in SOC-CLIPIprd.xml
fullPath = b.document(fileProjetXML)
# Instanciation du module etree
## methode parse
tree = etree.parse(fullPath)
root = tree.getroot()
# Initiation des listes de collection des jobs extraits dans les fichiers xml
jobList = []
stageNumberList= []
stageList = []
fileInput = []
fileOutput= []
idInputList = []
idOuputList = []
p = ParseElementXML()
fullPath = p.document(fileProjetXML)
root = p.getRoot(fullPath)
# snippet pour rechercher et collecter les PAR au niveau des logs
for logfile in os.listdir(path_to_logfullDS):
with open(logfile, encoding='utf8') as f:
f = f.read()
# for idx, jobFromXML in enumerate(collectionJobFromXML):
# if jobFromXML in f:
for job in root:
jobName = job.attrib.get('Identifier')
jobName = str(jobName)
# print(jobName)
if jobName in f:
# print(jobName+'-->'+logfile)
jobList.append(jobName)
blockTextPar = q.blockEventID(f)
for record in job:
attribute_type = record.attrib.get('Type')
attribute_identifier = record.attrib.get('Identifier')
if attribute_type == 'CustomStage':
stageNumberList.append(attribute_type) # col 4
for PropertyOrcollection in record: # ACCES NIVEAU 4
attribute_Name = PropertyOrcollection.attrib.get('Name')
if attribute_Name == 'Name':
stageName = str(PropertyOrcollection.text)
stageList.append(stageName)
elif attribute_Name == 'InputPins':
idxs = str(PropertyOrcollection.text) # idxs ---> identifianr
if r"|" in idxs:
idInput = idxs.split('|') # les id des inputs
idInputList.append(idInput)
# print(jobName+';'+stageName+';'+str(idInputList)+';'+attribute_Name)
else:
idInput = idxs
idInputList.append(idInput)
# print(jobName+';'+stageName+';'+str(idInputList)+';'+attribute_Name)
# print(jobN,'--',stageType,'--', stageName,'--',idInput,'(I)')
elif attribute_Name == 'OutputPins':
idxs = str(PropertyOrcollection.text) # idxs ---> identifianr
if r"|" in idxs:
idOutput = idxs.split('|')
idOuputList.append(idOutput)
# print(jobName+';'+stageName+';'+str(idOuputList)+';'+attribute_Name)
else:
idOutput = idxs
idOuputList.append(idOutput)
# print(jobName+';'+stageName+';' +str(idOuputList)+';'+attribute_Name)
elif attribute_type == 'CustomInput':
for PropertyOrcollection in record:
if PropertyOrcollection.tag == 'Collection' and PropertyOrcollection.attrib.get("Name") == 'Properties':
# attribute_Name = PropertyOrcollection.attrib.get('Name')
for subrecord in PropertyOrcollection: # ACCES NIVEAU 5
# print(idInput, 'Input')
# print(stageName)
for prop in subrecord:
if prop.attrib.get('Name') == 'Value':
fileValue = str(prop.text)
datasetValue = str(prop.text)
if (r')file' in fileValue) and (attribute_identifier == idInput):
# if attribute_identifier == idInput:
# print(idInput, 'Input')
# print(stageName)
PAR_list, filePath = p.retrieveFilePath(fileValue, blockTextPar)
realFilePath = p.makePathForFileOrFileDataset( filePath, PAR_list)
print(filename+','+jobName+','+fileValue+','+realFilePath+','+stageName+','+attribute_identifier+','+attribute_type)
fileInput.append(fileValue)
if ('.ds' in datasetValue) and (attribute_identifier == idInput):
# print(idInput, 'Input')
# print(stageName)
PAR_list, filePath = p.retrieveFilePath(fileValue, blockTextPar)
realFilePath = p.makePathForFileOrFileDataset( filePath, PAR_list)
print(filename+',' + jobName+','+datasetValue + ',' + realFilePath + ',' + stageName+','+attribute_identifier+','+attribute_type)
fileInput.append(datasetValue)
elif attribute_type == 'CustomOutput':
for PropertyOrcollection in record:
if PropertyOrcollection.tag == 'Collection' and PropertyOrcollection.attrib.get("Name") == 'Properties':
# attribute_Name = PropertyOrcollection.attrib.get('Name')
for subrecord in PropertyOrcollection: # ACCES NIVEAU 5
for prop in subrecord:
if prop.attrib.get('Name') == 'Value':
fileValue = str(prop.text)
datasetValue = str(prop.text)
if r')file' in fileValue and (attribute_identifier == idOutput):
PAR_list, filePath = p.retrieveFilePath(fileValue, blockTextPar)
realFilePath = p.makePathForFileOrFileDataset( filePath, PAR_list)
print( filename+','+jobName+','+fileValue+','+realFilePath+','+stageName+','+attribute_identifier+','+attribute_type)
fileOutput.append(fileValue)
if ('.ds' in datasetValue) and (attribute_identifier == idOutput):
PAR_list, filePath = p.retrieveFilePath(fileValue, blockTextPar)
realFilePath = p.makePathForFileOrFileDataset( filePath, PAR_list)
print(filename+','+jobName+','+datasetValue+','+realFilePath +','+stageName+','+attribute_identifier+','+attribute_type)
fileOutput.append(datasetValue)
# # # print("------ traitement2: collection des jobs dans xml {} !!!!------".format(fullPath))
print(len(jobList))
jobList = list(set(jobList))
jobList = p.removeDuplicates(jobList)
# # # stageList = p.removeDuplicates(stageList) # ne supprime pas les doublons des stages, maybe c important
# # # stageList = p.removeDuplicates(stageList)
# jobList.remove(None)
print(len(jobList))
# # # print(len(stageNumberList))
# # print(len(stageList))
# # print(len(fileInput))
# # print(len(fileOutput))
# # # print(stageList)
# # print(idInputList)
|
[
"pmbasene@gmail.com"
] |
pmbasene@gmail.com
|
16a9e29d931fdaa72f0e92f17d73fadeb046554d
|
33cc8b2684dddf64fb730f42bf90c0d3e0a4f1c6
|
/client/src/assets/papers/rename.py
|
bed4d7ee0dfbf0869efc2bc17fc43df3c64bba3c
|
[] |
no_license
|
apujol09/NUIWebReact
|
1f74a9ed175babd304ab96c52ae0c9d12dac10a1
|
64598cd0ae1fa776615716aa607b0a30ac7d7c9d
|
refs/heads/master
| 2020-04-07T18:26:20.631019
| 2019-02-27T18:05:51
| 2019-02-27T18:05:51
| 158,610,159
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
import os
directory = 'D:/Repos/web2/assets/papers'
[os.rename(os.path.join(directory, f), os.path.join(directory, f).replace(' ', '_').lower()) for f in os.listdir(directory)]
|
[
"apujo010@AD.CS.FIU.EDU"
] |
apujo010@AD.CS.FIU.EDU
|
872d3a57cfcf35f46c151c463b307c38cb2f8821
|
4b1c693a6a18ac3856f2f855c15bb2c9bed90410
|
/quat2/actions.py
|
9837018b7316cef26cb34e41bd71d4753c7590f1
|
[] |
no_license
|
mouboo/quat2
|
73cb06315400dd725bfd34dc13634f61366f8597
|
f5d0519d0279aff8d0e5069db6211200a1e8263f
|
refs/heads/master
| 2020-09-23T11:07:27.077622
| 2020-01-05T13:37:53
| 2020-01-05T13:37:53
| 225,485,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#actions.py
def cmd_help(args):
if h == 'go':
print("'Go' usage: go [to] <direction>")
def go(args):
print("You go to {}".format(args))
def inventory(args):
s = ''
for item in p.inventory:
s += item + ', '
def look(args):
pass
|
[
"peterkvillegard@gmail.com"
] |
peterkvillegard@gmail.com
|
a3c0fb4b5518284323f259aefadfe664f7532bd1
|
6a44d6071d9bc632b234da40f04393ed05e5f9d5
|
/plugins/lektor-github/lektor_github.py
|
2feace77361dfc201bb51047a1139eb4e9799992
|
[] |
no_license
|
zupo/seaintome
|
431d6533e4d8bcdd4a0eea3a5c210f26a7ed3477
|
37b9c49133173b2f6e531246a8f48432b5b0b718
|
refs/heads/master
| 2023-03-28T12:04:14.345668
| 2018-07-03T17:02:47
| 2018-07-03T17:02:47
| 98,026,930
| 0
| 0
| null | 2021-03-25T21:32:43
| 2017-07-22T11:26:07
|
CSS
|
UTF-8
|
Python
| false
| false
| 822
|
py
|
from __future__ import unicode_literals
from lektor.publisher import Command, Publisher
from lektor.pluginsystem import Plugin
class GitHubPlugin(Plugin):
name = 'GitHub'
description = 'Support for pushing changes to GitHub.'
def on_setup_env(self, **extra):
self.env.publishers['github'] = GitHubPublisher
class GitHubPublisher(Publisher):
def publish(self, target, credentials=None, **extra):
for line in Command(['git', 'status']).safe_iter():
yield line
for line in Command(['git', 'add', '-A']).safe_iter():
yield line
for line in Command(['git', 'commit', '-m', 'Automatic commit by Lektor']).safe_iter():
yield line
for line in Command(['git', 'push', 'origin', 'HEAD:master']).safe_iter():
yield line
|
[
"nejc.zupan@gmail.com"
] |
nejc.zupan@gmail.com
|
dbad2d478c3ae3f0a07b7d0bbc72a1c1d30c8325
|
9bd270e6713ef7f4f887ff8413d6b94d314849a1
|
/10. Raspberry Pi/9.LED Button time increment.py
|
d6e561482651a40a6ddad13cea7c18d0ba0e2ef4
|
[] |
no_license
|
dongryoung/Class_Examples
|
ab0242dde514ab72f1d8088cf0698693fb36d665
|
90040f40742d9bf8d8030388cec24bc548ae9c92
|
refs/heads/master
| 2023-04-10T20:37:01.794232
| 2020-07-29T03:56:24
| 2020-07-29T03:56:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
#버튼 한 번 누를때마다
#LED 켜지는 시간 10초씩 증가
import RPi.GPIO as GPIO
import time
led_pin = 7
pushButton_pin = 11
GPIO.setmode(GPIO.BOARD)
GPIO.setup(led_pin, GPIO.OUT)
GPIO.setup(pushButton_pin, GPIO.IN)
led_on = False #초기 LED 상태
pressed = 0 #버튼 상태 (눌림: 1, 안눌림: 0)
start_time = 0 #처음 버튼이 눌린 시간
remain_time = 0 #LED 불 남아 있는 시간
try:
while True:
a = GPIO.input(pushButton_pin)
if a == 1: #버튼이 눌렸는가
if pressed == 0: #버튼 상태가 계속 눌렸는가
if led_on == False: #불이 꺼진 상태인가
remain_time = 0
start_time = time.time()
print(start_time)
led_on = True
remain_time = remain_time + 3
print(remain_time)
pressed = 1
else:
pressed = 0
if led_on:
GPIO.output(led_pin, True)
else:
GPIO.output(led_pin, False)
#현재시간에서 (처음 시간 + 남은 시간)을 빼면
if ((start_time + remain_time) - time.time()) <= 0:
led_on = False
else:
led_on = True
except KeyboardInterrupt:
GPIO.cleanup()
|
[
"noreply@github.com"
] |
dongryoung.noreply@github.com
|
f9fe4c293d98a8c59e1a94cc7dfdb508f86e3d8b
|
6b2d5488cc8a06cab81c1524bb14c347f3bf015a
|
/multiflood_python/bin/pyreverse
|
2100194fbe0033c18d4e3ba098107366aff70d65
|
[
"MIT"
] |
permissive
|
devanshk/MultiFlood
|
b169c47afc402afc5cbe9f139f9887d9c6bc2df1
|
73f8e40beeec9f818b8419b7ef4db6121a60b709
|
refs/heads/master
| 2023-02-23T04:12:55.395589
| 2021-01-25T01:01:16
| 2021-01-25T01:01:16
| 299,680,934
| 2
| 1
|
MIT
| 2020-12-28T20:26:00
| 2020-09-29T16:52:33
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 267
|
#!/Users/jay/Documents/MultiFlood/multiflood_python/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
|
[
"koolguru2@gmail.com"
] |
koolguru2@gmail.com
|
|
ef62ce3eb7ac8f03084c6369bbcb145a4ad9648c
|
8ea7912401d3f609765738f38b44561e4b2dbd5f
|
/sshmng/cli.py
|
9d70863d14a9860279cc7888b1e7e3018843920e
|
[
"MIT"
] |
permissive
|
zeroam/sshmng
|
1b46099ef6d9b174e7debf64e35663c6998b01c9
|
c3b705aeb6b086ec02664e74a31650ec2ffc41f6
|
refs/heads/master
| 2023-03-15T13:52:02.056589
| 2021-03-22T12:16:32
| 2021-03-22T12:16:32
| 343,767,017
| 1
| 1
|
MIT
| 2021-03-22T12:16:33
| 2021-03-02T12:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,630
|
py
|
"""Console script for sshmng."""
import os
import sys
import click
import subprocess
import tempfile
from pathlib import Path
from pprint import pprint
from sshmng.repo import Repo
@click.group()
@click.option("--debug/--nodebug", default=False, envvar="SSHMNG_DEBUG")
@click.pass_context
def main(ctx, debug):
ctx.obj = Repo(debug)
@main.command()
@click.pass_obj
def list(repo: Repo):
hosts = repo.load_hosts()
# TODO: print host info more pretty (+ colors)
for name, host in hosts.items():
print(f"- {name}: {host['username']}@{host['host']}:{host['port']}")
@main.command()
@click.argument("name")
@click.pass_obj
def add(repo: Repo, name):
hosts = repo.load_hosts()
if hosts.get(name):
replace = input(
f"The connection for {name} already exists.\nDo you want to replace? [y/N] "
)
if replace.lower() != "y":
print("Canceled add host")
return
print(f"Saving {name}...")
# TODO: check input is empty
username = input("Enter user name: ")
address = input("Enter server address: ")
port = int(input("Enter ssh port[22]: ") or 22)
private_key_path = input(f"Enter private key path[{repo.private_key}]: ")
if not private_key_path:
private_key = repo.private_key.read_text()
# add ssh-copy-id logic
subprocess.call(
f'cat {repo.public_key} | ssh -p {port} {username}@{address} "mkdir -p ~/.ssh && chmod 700 ~/.ssh && cat >> ~/.ssh/authorized_keys"',
shell=True,
)
else:
# check add pem file instead
private_key = Path(private_key_path).read_text()
hosts[name] = {
"username": username,
"host": address,
"port": port,
"private_key": private_key,
}
repo.save_hosts(hosts)
print(f"Saved {name}")
@main.command()
@click.argument("name")
@click.pass_obj
def connect(repo: Repo, name):
host = repo.load_hosts().get(name)
if host is None:
print(f"connection for '{name}' not exists")
return sys.exit(1)
# TODO: check if pem file exists
with tempfile.NamedTemporaryFile("w") as fp:
fp.write(host["private_key"])
fp.flush()
temppath = os.path.join(tempfile.gettempdir(), fp.name)
print(temppath)
subprocess.call(
f"ssh -i {temppath} -p {host['port']} {host['username']}@{host['host']}",
shell=True,
)
@main.command()
@click.argument("name")
@click.pass_obj
def delete(repo: Repo, name):
hosts = repo.load_hosts()
if hosts.get(name) is None:
print(f"connection for '{name}' not exists")
return sys.exit(1)
replace = input(
f"connection for '{name}' permenantly deleted. Are you sure? [y/N] "
)
if replace.lower() != "y":
return
del hosts[name]
repo.save_hosts(hosts)
print(f"Delete '{name}' Complete")
@main.command()
@click.argument("name")
@click.argument("command", nargs=-1)
@click.pass_obj
def exec(repo: Repo, name, command):
host = repo.load_hosts().get(name)
if host is None:
print(f"connection for '{name}' not exists")
return sys.exit(1)
with tempfile.NamedTemporaryFile("w") as fp:
fp.write(host["private_key"])
fp.flush()
temppath = os.path.join(tempfile.gettempdir(), fp.name)
print(temppath)
subprocess.call(
f"ssh -i {temppath} -p {host['port']} {host['username']}@{host['host']} '{' '.join(command)}'",
shell=True,
)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
[
"imdff0803@gmail.com"
] |
imdff0803@gmail.com
|
422920aa326471336694645680d370c24a513dd2
|
78d86701e3406750c712e22b4883d70063025659
|
/bert_lightning_adapter.py
|
ce15942c06c266924ed254ec7f3b7768521028df
|
[] |
no_license
|
jq8205/qa-for-ta
|
44d8cf97afab5426930a556aabf4b323fd7afe2b
|
f83de4152672962f5a5c449bdb5327bbba7f2839
|
refs/heads/master
| 2022-11-16T22:53:09.632515
| 2020-07-07T01:30:43
| 2020-07-07T01:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,909
|
py
|
from typing import Dict
from collections import OrderedDict
from functools import partial
import lineflow as lf
import lineflow.datasets as lfds
import lineflow.cross_validation as lfcv
from lineflow.core import lineflow_load
import torch
from torch.utils.data import DataLoader, ConcatDataset
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.logging import TestTubeLogger
from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup
from adapter_bert import AdapterBert, setup_adapters
import os
# model_name = "bert-large-uncased-whole-word-masking"
model_name = "bert-base-uncased"
cache_directory = "/mnt/nfs/work1/andrewlan/bzylich/cached_models/"
nb_gpus = 1
nb_nodes = 1
# world = nb_gpus * nb_nodes
long_candidates_per_example = 4
nq_save_path = "/mnt/nfs/work1/andrewlan/bzylich/datasets/natural_questions/simplified/v1.0-simplified-nq-train-" + str(long_candidates_per_example) + "-cands-per-example.pkl"
MAX_LEN = 300
NUM_LABELS = 2
def preprocess(tokenizer: BertTokenizer, x: Dict) -> Dict:
# Given two sentences, x["string1"] and x["string2"], this function returns BERT ready inputs.
inputs = tokenizer.encode_plus(
x["question"],
x["context"],
add_special_tokens=True,
max_length=MAX_LEN,
)
# First `input_ids` is a sequence of id-type representation of input string.
# Second `token_type_ids` is sequence identifier to show model the span of "string1" and "string2" individually.
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1] * len(input_ids)
# BERT requires sequences in the same batch to have same length, so let's pad!
padding_length = MAX_LEN - len(input_ids)
pad_id = tokenizer.pad_token_id
input_ids = input_ids + ([pad_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([pad_id] * padding_length)
# Super simple validation.
assert len(input_ids) == MAX_LEN, "Error with input length {} vs {}".format(len(input_ids), MAX_LEN)
assert len(attention_mask) == MAX_LEN, "Error with input length {} vs {}".format(len(attention_mask), MAX_LEN)
assert len(token_type_ids) == MAX_LEN, "Error with input length {} vs {}".format(len(token_type_ids), MAX_LEN)
# Convert them into PyTorch format.
label = torch.tensor(int(not x["is_impossible"])).long()
input_ids = torch.tensor(input_ids)
attention_mask = torch.tensor(attention_mask)
token_type_ids = torch.tensor(token_type_ids)
# DONE!
return {
"label": label,
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids
}
def get_dataloader():
tokenizer = BertTokenizer.from_pretrained(model_name, do_lower_case="uncased" in model_name, cache_dir=cache_directory)
preprocessor = partial(preprocess, tokenizer)
train = lfds.Squad("train", 2)
test = lfds.Squad("dev", 2)
train, val = lfcv.split_dataset_random(train, int(len(train) * 0.9), seed=42)
train = train.map(preprocessor)
print("SQuAD Train dataset length:", len(train), flush=True)
# nq = lineflow_load(nq_save_path)
# train, val = lfcv.split_dataset_random(nq, int(len(nq) * 0.9), seed=42)
# train = train.map(preprocessor)
# nq = nq.map(preprocessor)
# print("NQ Train dataset length:", len(nq), flush=True)
# train = ConcatDataset([train, nq])
print("Train dataset length:", len(train), flush=True)
print("Val dataset length:", len(val), flush=True)
print("Test dataset length:", len(test), flush=True)
val = val.map(preprocessor)
test = test.map(preprocessor)
return train, val, test
class Model(pl.LightningModule):
def __init__(self, num_epochs, adapter_size, learning_rate, batch_size):
super(Model, self).__init__()
self.num_epochs = num_epochs
self.adapter_size = adapter_size
self.learning_rate = learning_rate
self.batch_size = batch_size
setup_adapters(self.adapter_size)
model = AdapterBert.from_pretrained(model_name, num_labels=NUM_LABELS, cache_dir=cache_directory)
self.model = model
train, val, test = get_dataloader()
self._train = train
self._val = val
self._test = test
self._train_dataloader = None
self._val_dataloader = None
self._test_dataloader = None
def configure_optimizers(self):
param_optimizer = list(self.model.named_parameters())
no_decay = ["bias", "LayerNorm.weight"] # ["bias", "gamma", "beta"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
"weight_decay_rate": 0.0 # 0.01
},
{
"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
"weight_decay_rate": 0.0
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.learning_rate,
eps=1e-8
)
t_total = len(self.train_dataloader()) * self.num_epochs
t_warmup = int(0.1 * float(t_total))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=t_warmup, num_training_steps=t_total)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
labels = batch["label"]
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
token_type_ids = batch["token_type_ids"]
loss, _ = self.model(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
labels=labels
)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp or self.trainer.use_ddp2:
loss = loss.unsqueeze(0)
tqdm_dict = {"train_loss": loss}
output = OrderedDict({
"loss": loss,
"progress_bar": tqdm_dict,
"log": tqdm_dict
})
return output
def validation_step(self, batch, batch_idx):
labels = batch["label"]
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
token_type_ids = batch["token_type_ids"]
loss, logits = self.model(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
labels=labels
)
labels_hat = torch.argmax(logits, dim=1)
correct_count = torch.sum(labels == labels_hat)
if self.on_gpu:
correct_count = correct_count.cuda(loss.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp or self.trainer.use_ddp2:
loss = loss.unsqueeze(0)
correct_count = correct_count.unsqueeze(0)
output = OrderedDict({
"val_loss": loss,
"correct_count": correct_count,
"batch_size": len(labels)
})
return output
def validation_end(self, outputs):
val_acc = sum([torch.mean(out["correct_count"]) if (self.trainer.use_dp or self.trainer.use_ddp2) else out["correct_count"] for out in outputs]).float() / sum(out["batch_size"] for out in outputs)
val_loss = sum([torch.mean(out["val_loss"]) if (self.trainer.use_dp or self.trainer.use_ddp2) else out["val_loss"] for out in outputs]) / len(outputs)
tqdm_dict = {
"val_loss": val_loss,
"val_acc": val_acc,
}
result = {"progress_bar": tqdm_dict, "log": tqdm_dict, "val_loss": val_loss}
return result
def test_step(self, batch, batch_idx):
labels = batch["label"]
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
token_type_ids = batch["token_type_ids"]
loss, logits = self.model(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
labels=labels
)
labels_hat = torch.argmax(logits, dim=1)
correct_count = torch.sum(labels == labels_hat)
if self.on_gpu:
correct_count = correct_count.cuda(loss.device.index)
output = OrderedDict({
"test_loss": loss,
"correct_count": correct_count,
"batch_size": len(labels)
})
return output
def test_end(self, outputs):
test_acc = sum([out["correct_count"] for out in outputs]).float() / sum(out["batch_size"] for out in outputs)
test_loss = sum([out["test_loss"] for out in outputs]) / len(outputs)
tqdm_dict = {
"test_loss": test_loss,
"test_acc": test_acc,
}
result = {"progress_bar": tqdm_dict, "log": tqdm_dict}
return result
@pl.data_loader
def train_dataloader(self):
if self._train_dataloader is None:
train_dist_sampler = torch.utils.data.distributed.DistributedSampler(self._train)
self._train_dataloader = DataLoader(
self._train,
sampler=train_dist_sampler, # RandomSampler(train),
# sampler=RandomSampler(train),
batch_size=self.batch_size,
num_workers=0
)
return self._train_dataloader
@pl.data_loader
def val_dataloader(self):
if self._val_dataloader is None:
val_dist_sampler = torch.utils.data.distributed.DistributedSampler(self._val)
self._val_dataloader = DataLoader(
self._val,
sampler=val_dist_sampler, # SequentialSampler(val),
# sampler=SequentialSampler(val),
batch_size=self.batch_size,
num_workers=0
)
return self._val_dataloader
@pl.data_loader
def test_dataloader(self):
if self._test_dataloader is None:
test_dist_sampler = torch.utils.data.distributed.DistributedSampler(self._test)
self._test_dataloader = DataLoader(
self._test,
sampler=test_dist_sampler, # SequentialSampler(test),
# sampler=SequentialSampler(test),
batch_size=self.batch_size,
num_workers=0
)
return self._test_dataloader
if __name__ == "__main__":
# BEGIN Hyperparameters
BATCH_SIZE = 32
# LEARNING_RATE = 5e-5
# ADAPTER_SIZE = 8
# NUM_EPOCHS = 3
# END
learning_rate_options = [1e-3, 1e-2, 1e-1]
adapter_size_options = [64, 256]
num_epochs_options = [20, 30]
for NUM_EPOCHS in num_epochs_options:
for LEARNING_RATE in learning_rate_options:
for ADAPTER_SIZE in adapter_size_options:
name = model_name + "_" + str(LEARNING_RATE) + "_" + str(BATCH_SIZE) + "_adapter_" + str(ADAPTER_SIZE) + "_epochs_" + str(NUM_EPOCHS) + "_SQuAD_Grid" # _NQ_2-cands-per-example_plus-
ckpt_directory = "./" + name + "/"
SAVE_PREFIX = name + "_"
# early_stop_callback = EarlyStopping(
# monitor="val_loss",
# min_delta=0.0,
# patience=3,
# verbose=True,
# mode="min"
# )
# default logger used by trainer
logger = TestTubeLogger(
save_dir="./lightning_logs/",
name=name,
debug=False,
create_git_tag=False
)
checkpoint_callback = ModelCheckpoint(
filepath=ckpt_directory,
save_top_k=1,
verbose=True,
monitor='val_loss',
mode='min',
prefix=SAVE_PREFIX
)
trainer = pl.Trainer(
logger=logger,
nb_gpu_nodes=nb_nodes,
gpus=nb_gpus,
early_stop_callback=False, # early_stop_callback,
checkpoint_callback=checkpoint_callback,
distributed_backend='ddp',
amp_level='O2',
use_amp=True,
max_epochs=NUM_EPOCHS
)
print("-----------------------------------------------------------------------")
print(name)
model = Model(NUM_EPOCHS, ADAPTER_SIZE, LEARNING_RATE, BATCH_SIZE)
print("all params:", model.model.count_all_params(), "trainable params:", model.model.count_trainable_params())
print("Model initiated.", flush=True)
print("Commencing training...", flush=True)
trainer.fit(model)
print("Training completed.", flush=True)
# print("Testing model...", flush=True)
# trainer.test()
# print("Finished!", flush=True)
del logger
del checkpoint_callback
del trainer
del model
torch.cuda.empty_cache()
|
[
"noreply@github.com"
] |
jq8205.noreply@github.com
|
ce1097d24dcb81d3104358d7723a962b1be9dccb
|
69b98035ededca206f7139ec68d61aaca74f7f1f
|
/src/lidar_odometry_and_mapping/src/gnss_ins_sim/src/recorder_node_allan_variance_analysis.py
|
f94fa2138a08164344cfd0fe56bfa08ecc701e7e
|
[] |
no_license
|
xiekunpeng0379/SensorFusion
|
69378ad1077b8769566ea05b5c4b3c359ec3252d
|
fbd1ddeb55c610714b456be76bc501efac86344e
|
refs/heads/main
| 2023-04-08T12:34:03.059967
| 2021-04-18T06:23:58
| 2021-04-18T06:23:58
| 483,066,678
| 1
| 0
| null | 2022-04-19T02:26:05
| 2022-04-19T02:26:05
| null |
UTF-8
|
Python
| false
| false
| 7,205
|
py
|
#!/usr/bin/python
import os
import rospkg
import rospy
import rosbag
import math
import numpy as np
from gnss_ins_sim.sim import imu_model
from gnss_ins_sim.sim import ins_sim
from std_msgs.msg import String
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
def get_gnss_ins_sim(motion_def_file, fs_imu, fs_gps):
'''
Generate simulated GNSS/IMU data using specified trajectory.
'''
# set IMU model:
D2R = math.pi/180.0
# imu_err = 'low-accuracy'
imu_err = {
# 1. gyro:
# a. random noise:
# gyro angle random walk, deg/rt-hr
'gyro_arw': np.array([0.75, 0.75, 0.75]),
# gyro bias instability, deg/hr
'gyro_b_stability': np.array([10.0, 10.0, 10.0]),
# gyro bias isntability correlation time, sec
'gyro_b_corr': np.array([100.0, 100.0, 100.0]),
# b. deterministic error:
'gyro_b': np.array([0.0, 0.0, 0.0]),
'gyro_k': np.array([1.0, 1.0, 1.0]),
'gyro_s': np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
# 2. accel:
# a. random noise:
# accel velocity random walk, m/s/rt-hr
'accel_vrw': np.array([0.05, 0.05, 0.05]),
# accel bias instability, m/s2
'accel_b_stability': np.array([2.0e-4, 2.0e-4, 2.0e-4]),
# accel bias isntability correlation time, sec
'accel_b_corr': np.array([100.0, 100.0, 100.0]),
# b. deterministic error:
'accel_b': np.array([0.0e-3, 0.0e-3, 0.0e-3]),
'accel_k': np.array([1.0, 1.0, 1.0]),
'accel_s': np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
# 3. mag:
'mag_si': np.eye(3) + np.random.randn(3, 3)*0.0,
'mag_hi': np.array([10.0, 10.0, 10.0])*0.0,
'mag_std': np.array([0.1, 0.1, 0.1])
}
# generate GPS and magnetometer data:
imu = imu_model.IMU(accuracy=imu_err, axis=9, gps=True)
# init simulation:
sim = ins_sim.Sim(
[fs_imu, fs_gps, fs_imu],
motion_def_file,
ref_frame=1,
imu=imu,
mode=None,
env=None,
algorithm=None
)
# run:
sim.run(1)
# get simulated data:
rospy.logwarn(
"Simulated data size {}".format(
len(sim.dmgr.get_data_all('gyro').data[0])
)
)
# imu measurements:
step_size = 1.0 / fs_imu
for i, (gyro, accel, ref_pos, ref_att_quat, ref_vel) in enumerate(
zip(
# a. gyro
sim.dmgr.get_data_all('gyro').data[0],
# b. accel
sim.dmgr.get_data_all('accel').data[0],
sim.dmgr.get_data_all('ref_pos').data[0],
sim.dmgr.get_data_all('ref_att_quat').data[0],
sim.dmgr.get_data_all('ref_vel').data[0]
)
):
yield {
'stamp': i * step_size,
'data': {
# a. gyro:
'gyro_x': gyro[0],
'gyro_y': gyro[1],
'gyro_z': gyro[2],
# b. accel:
'accel_x': accel[0],
'accel_y': accel[1],
'accel_z': accel[2],
# c. ref_pos:
'ref_pos_x': ref_pos[0],
'ref_pos_y': ref_pos[1],
'ref_pos_z': ref_pos[2],
# d. ref_att_quat:
'ref_att_quat_x': ref_att_quat[0],
'ref_att_quat_y': ref_att_quat[1],
'ref_att_quat_z': ref_att_quat[2],
'ref_att_quat_w': ref_att_quat[3],
# e. ref_vel:
'ref_vel_x': ref_vel[0],
'ref_vel_y': ref_vel[1],
'ref_vel_z': ref_vel[2],
}
}
def gnss_ins_sim_recorder():
"""
Record simulated GNSS/IMU data as ROS bag
"""
# ensure gnss_ins_sim_node is unique:
rospy.init_node('gnss_ins_sim_recorder_node')
# parse params:
motion_def_name = rospy.get_param('/gnss_ins_sim_recorder_node/motion_file')
sample_freq_imu = rospy.get_param('/gnss_ins_sim_recorder_node/sample_frequency/imu')
sample_freq_gps = rospy.get_param('/gnss_ins_sim_recorder_node/sample_frequency/gps')
topic_name_imu = rospy.get_param('/gnss_ins_sim_recorder_node/topic_name')
rosbag_output_path = rospy.get_param('/gnss_ins_sim_recorder_node/output_path')
rosbag_output_name = rospy.get_param('/gnss_ins_sim_recorder_node/output_name')
rosbag_output_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), rosbag_output_path)
# generate simulated data:
motion_def_path = os.path.join(
rospkg.RosPack().get_path('gnss_ins_sim'), 'config', 'motion_def', motion_def_name
)
imu_simulator = get_gnss_ins_sim(
# motion def file:
motion_def_path,
# gyro-accel/gyro-accel-mag sample rate:
sample_freq_imu,
# GPS sample rate:
sample_freq_gps
)
with rosbag.Bag(
os.path.join(rosbag_output_path, rosbag_output_name), 'w'
) as bag:
# get timestamp base:
timestamp_start = rospy.Time.now()
for measurement in imu_simulator:
# init:
msg = Imu()
# a. set header:
time_stamp = timestamp_start + rospy.Duration.from_sec(measurement['stamp'])
msg.header.frame_id = 'NED'
msg.header.stamp = time_stamp
# b. set orientation estimation:
msg.orientation.x = 0.0
msg.orientation.y = 0.0
msg.orientation.z = 0.0
msg.orientation.w = 1.0
# c. gyro:
msg.angular_velocity.x = measurement['data']['gyro_x']
msg.angular_velocity.y = measurement['data']['gyro_y']
msg.angular_velocity.z = measurement['data']['gyro_z']
msg.linear_acceleration.x = measurement['data']['accel_x']
msg.linear_acceleration.y = measurement['data']['accel_y']
msg.linear_acceleration.z = measurement['data']['accel_z']
odom_msg = Odometry()
odom_msg.header.frame_id = 'ENU'
odom_msg.child_frame_id = 'ENU'
odom_msg.header.stamp = time_stamp
# a. set orientation :
odom_msg.pose.pose.x = measurement['data']['ref_pos_x']
odom_msg.pose.pose.y = measurement['data']['ref_pos_y']
odom_msg.pose.pose.z = measurement['data']['ref_pos_z']
odom_msg.orientation.x = measurement['data']['ref_att_quat_x']
odom_msg.orientation.y = measurement['data']['ref_att_quat_y']
odom_msg.orientation.z = measurement['data']['ref_att_quat_z']
odom_msg.orientation.w = measurement['data']['ref_att_quat_w']
# b. set position
odom_msg.twist.twist.linear.x = measurement['data']['ref_vel_x']
odom_msg.twist.twist.linear.y = measurement['data']['ref_vel_y']
odom_msg.twist.twist.linear.z = measurement['data']['ref_vel_z']
# write:
bag.write(topic_name_imu, msg, msg.header.stamp)
bag.write("/pose/ground_truth", odom_msg, odom_msg.header.stamp)
if __name__ == '__main__':
try:
gnss_ins_sim_recorder()
except rospy.ROSInterruptException:
pass
|
[
"uupks0325@gmail.com"
] |
uupks0325@gmail.com
|
1fac3d7e1d754ceccd22e2a44d7aa79d21809451
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/tree-big-4739.py
|
d5abb8e7fb73baa88a694920865ec11d866b424b
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,292
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, $TypedVar, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
cedf0f9f7e4a3f3e462c9ece960b06285598723e
|
e2ab8200b81d7c3d5b0f6bd090aa8c587fe8ae35
|
/insertsort.py
|
41eec2659a0c786f4bb9dc5033a3b37c723407a1
|
[] |
no_license
|
Ananth-Adhikarla/Python-Programming
|
d13c320e5d23407acc62842848975051fde2057b
|
b3b03dac771c3c6337c1cd5016ce4f8826e1058d
|
refs/heads/master
| 2020-03-12T04:54:55.975415
| 2018-04-21T08:13:06
| 2018-04-21T08:13:06
| 130,453,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
"""
Insert Sort
"""
def insertion_sort(nlist):
for i in range(1,len(nlist)):
temp = nlist[i]
j = i
while( j>0 and temp < nlist[j - 1]):
nlist[j] = nlist[j-1]
j -= 1
nlist[j] = temp
return nlist
l = [1,3,5,7,4,2]
print("Original List : ",l)
print("After insert sort ", insertion_sort(l) )
assert insertion_sort([5,19,4,1,36,99,2]) == sorted([5,19,4,1,36,99,2])
assert insertion_sort(["Greg", "Armen", "Ken"]) == sorted(["Greg", "Armen", "Ken"])
assert insertion_sort([9,8,7,6,5,4,3,2,1]) == [1,2,3,4,5,6,7,8,9]
|
[
"ananth.adhikarla@gmail.com"
] |
ananth.adhikarla@gmail.com
|
9a0c824e0a63c04fd42985900a538f7a3e5a1ed2
|
5af277b5819d74e61374d1d78c303ac93c831cf5
|
/bisimulation_aaai2020/dopamine/rainbow_agent.py
|
7c9a614beabc4a088e107685b25a5c86d430673c
|
[
"Apache-2.0"
] |
permissive
|
Ayoob7/google-research
|
a2d215afb31513bd59bc989e09f54667fe45704e
|
727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7
|
refs/heads/master
| 2022-11-11T03:10:53.216693
| 2020-06-26T17:13:45
| 2020-06-26T17:13:45
| 275,205,856
| 2
| 0
|
Apache-2.0
| 2020-06-26T16:58:19
| 2020-06-26T16:58:18
| null |
UTF-8
|
Python
| false
| false
| 18,472
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rainbow agent used for learning the bisimulation metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dopamine.agents.rainbow import rainbow_agent
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim as contrib_slim
@gin.configurable
def atari_network(num_actions, num_atoms, support, network_type, state,
representation_layer=10):
"""The convolutional network used to compute agent's Q-value distributions.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
support: tf.linspace, the support of the Q-value distribution.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
representation_layer: int, the layer which will be used as the
representation for computing the bisimulation distances. Defaults to
a high value, which defaults to the penultimate layer.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = contrib_slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
curr_layer = 1
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
representation = None
if representation_layer <= curr_layer:
representation = contrib_slim.flatten(net)
net = contrib_slim.conv2d(
net,
32, [8, 8],
stride=4,
weights_initializer=weights_initializer,
trainable=False)
curr_layer += 1
if representation is None and representation_layer <= curr_layer:
representation = contrib_slim.flatten(net)
net = contrib_slim.conv2d(
net,
64, [4, 4],
stride=2,
weights_initializer=weights_initializer,
trainable=False)
curr_layer += 1
if representation is None and representation_layer <= curr_layer:
representation = contrib_slim.flatten(net)
net = contrib_slim.conv2d(
net,
64, [3, 3],
stride=1,
weights_initializer=weights_initializer,
trainable=False)
net = contrib_slim.flatten(net)
curr_layer += 1
if representation is None and representation_layer <= curr_layer:
representation = net
net = contrib_slim.fully_connected(
net, 512, weights_initializer=weights_initializer, trainable=False)
curr_layer += 1
if representation is None:
representation = net
net = contrib_slim.fully_connected(
net,
num_actions * num_atoms,
activation_fn=None,
weights_initializer=weights_initializer,
trainable=False)
logits = tf.reshape(net, [-1, num_actions, num_atoms])
probabilities = contrib_layers.softmax(logits)
q_values = tf.reduce_sum(support * probabilities, axis=2)
return network_type(q_values, logits, probabilities, representation)
@gin.configurable
def bisimulation_network(states, hidden_dimension=512, num_layers=1,
trainable=True):
"""Creates the network for approximating the bisimulation distances.
Args:
states: Tensor, concatentation of two state representations.
hidden_dimension: int, dimensionality of hidden_layers.
num_layers: int, the number of layers to use for the approximant.
trainable: bool, whether this network will be trainable.
Returns:
Network to approximate bisimulation metric.
"""
net = tf.cast(states, tf.float32)
net = contrib_slim.flatten(net)
for _ in range(num_layers):
net = contrib_slim.fully_connected(
net, hidden_dimension, trainable=trainable)
return contrib_slim.fully_connected(net, 1, trainable=trainable)
SequentialDistances = (
collections.namedtuple('sequential_distances', ['bisimulation', 'value']))
@gin.configurable
class BisimulationRainbowAgent(rainbow_agent.RainbowAgent):
"""A subclass of Rainbow which learns the on-policy bisimulation metric."""
def __init__(self,
sess,
num_actions,
optimizer=tf.train.AdamOptimizer(
learning_rate=0.000075, epsilon=0.00015),
bisim_horizon_discount=0.99,
evaluate_metric_only=False,
summary_writer=None):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
optimizer: `tf.train.Optimizer`, for training the bisimulation estimator.
bisim_horizon_discount: float, amount by which to increase the horizon for
estimating the distance.
evaluate_metric_only: bool, if set, will evaluate the loaded metric
approximant.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
"""
self.bisim_optimizer = optimizer
self.bisim_horizon_discount = bisim_horizon_discount
self.bisim_horizon_discount_value = 1.0
self.bisim_horizon = 0.0
self.evaluate_metric_only = evaluate_metric_only
self.start_recording = False
super(BisimulationRainbowAgent, self).__init__(
sess, num_actions, network=atari_network, summary_writer=summary_writer)
self._source_state = np.copy(self.state)
self._evaluation_steps = 0
self.eval_distances = SequentialDistances([], [])
def reload_checkpoint(self, checkpoint_path):
"""Reload variables from a fully specified checkpoint.
Args:
checkpoint_path: string, full path to a checkpoint to reload.
"""
assert checkpoint_path
global_vars = set([x.name for x in tf.global_variables()])
ckpt_vars = [
'{}:0'.format(name)
for name, _ in tf.train.list_variables(checkpoint_path)
]
# Only include non trainable variables which are also present in the
# checkpoint to restore
include_vars = list(global_vars.intersection(set(ckpt_vars)))
variables_to_restore = contrib_slim.get_variables_to_restore(
include=include_vars)
if variables_to_restore:
reloader = tf.train.Saver(var_list=variables_to_restore)
reloader.restore(self._sess, checkpoint_path)
tf.logging.info('Done restoring from %s!', checkpoint_path)
def _get_network_type(self):
"""Returns the type of the outputs of a Q value network.
Returns:
net_type: _network_type object defining the outputs of the network.
"""
return collections.namedtuple('c51_network',
['q_values', 'logits', 'probabilities',
'representation'])
def _concat_states(self, states, transpose=False):
"""Concatenate all pairs of states in a batch.
Args:
states: Tensor, batch of states from which we will concatenate
batch_size^2 pairs of states.
transpose: bool, whether to concatenate states in transpose order.
Returns:
A batch_size^2 Tensor containing the concatenation of all elements in
`states`.
"""
# tiled_states will have shape
# [batch_size, batch_size, representation_dimension] and will be of the
# following form (where \phi_1 is the representation of the state of the
# first batch_element):
# [ \phi_1 \phi_2 ... \phi_batch_size ]
# [ \phi_1 \phi_2 ... \phi_batch_size ]
# ...
# [ \phi_1 \phi_2 ... \phi_batch_size ]
tiled_states = tf.tile([states], [self.batch_size, 1, 1])
# transpose_tiled_states will have shape
# [batch_size, batch_size, representation_dimension] and will be of the
# following form (where \phi_1 is the representation of the state of the
# first batch_element):
# [ \phi_1 \phi_1 ... \phi_1 ]
# [ \phi_2 \phi_2 ... \phi_2 ]
# ...
# [ \phi_batch_size \phi_batch_size ... \phi_batch_size ]
transpose_tiled_states = tf.keras.backend.repeat(states, self.batch_size)
# concat_states will be a
# [batch_size, batch_size, representation_dimension*2] matrix containing the
# concatenation of all pairs of states in the batch.
if transpose:
concat_states = tf.concat([transpose_tiled_states, tiled_states], 2)
else:
concat_states = tf.concat([tiled_states, transpose_tiled_states], 2)
# We return a reshaped matrix which results in a new batch of size
# batch_size ** 2. Resulting matrix will have shape
# [batch_size**2, representation_dimension].
representation_dimension = tf.shape(states)[1]
return tf.reshape(concat_states,
(self.batch_size**2, representation_dimension * 2))
def _build_bisimulation_target(self):
"""Build the bisimulation target."""
r1 = tf.tile([self._replay.rewards], [self.batch_size, 1])
r2 = tf.transpose(r1)
reward_differences = tf.abs(r1 - r2)
reward_differences = tf.reshape(reward_differences, (self.batch_size**2, 1))
if self.summary_writer is not None:
mean_reward_diff = tf.reduce_mean(reward_differences)
tf.summary.scalar('Training/AverageRewardDiff', mean_reward_diff)
self.next_state_distances = self.bisim_horizon_ph * self.s2_target_distances
return reward_differences + self.gamma * self.next_state_distances
def _build_train_op(self):
return tf.no_op()
def _sync_qt_ops(self):
return tf.no_op()
def _build_networks(self):
super(BisimulationRainbowAgent, self)._build_networks()
self._build_all_bisimulation_parts()
def _build_all_bisimulation_parts(self):
"""Builds the bisimulation networks and ops."""
self.batch_size = tf.shape(self._replay.rewards)[0]
self._replay_target_outputs = self.target_convnet(self._replay.states)
self.bisim_horizon_ph = tf.placeholder(tf.float32, ())
self.online_bisimulation = tf.make_template('OnlineBisim',
bisimulation_network)
self.target_bisimulation = tf.make_template('TargetBisim',
bisimulation_network,
trainable=False)
# For evaluating the metric from an episode's first state.
self.source_state_ph = tf.placeholder(self.observation_dtype,
self.state_ph.shape,
name='source_state_ph')
self._initial_state_net = self.online_convnet(self.source_state_ph)
concat_states = tf.concat(
[self._initial_state_net.representation,
self._net_outputs.representation], 1)
self.state_distances = tf.squeeze(self.online_bisimulation(concat_states))
self.state_value = tf.reduce_max(self._net_outputs.q_values, axis=1)[0]
if self.summary_writer is not None:
tf.summary.scalar('Eval/StateDistances', self.state_distances)
if self.evaluate_metric_only:
return
self.s1_online_distances = self.online_bisimulation(
self._concat_states(self._replay_net_outputs.representation))
self.s2_target_distances = self.target_bisimulation(
self._concat_states(
self._replay_next_target_net_outputs.representation))
# bisimulation_target = rew_diff + gamma * next_distance.
bisimulation_target = tf.stop_gradient(self._build_bisimulation_target())
# We zero-out diagonal entries, since those are estimating the distance
# between a state and itself, which we know to be 0.
diagonal_mask = 1.0 - tf.diag(tf.ones(self.batch_size, dtype=tf.float32))
diagonal_mask = tf.reshape(diagonal_mask, (self.batch_size**2, 1))
bisimulation_target *= diagonal_mask
bisimulation_estimate = self.s1_online_distances
bisimulation_loss = tf.losses.mean_squared_error(
bisimulation_target,
bisimulation_estimate)
if self.summary_writer is not None:
average_distance = tf.reduce_mean(bisimulation_estimate)
average_target = tf.reduce_mean(bisimulation_target)
average_next_state_dists = tf.reduce_mean(self.next_state_distances)
tf.summary.scalar('Training/loss', bisimulation_loss)
tf.summary.scalar('Training/AverageDistance', average_distance)
tf.summary.scalar('Training/AverageTargetDistance', average_target)
tf.summary.scalar('Training/AverageNextStateDistance',
average_next_state_dists)
tf.summary.scalar('Training/BisimHorizon', self.bisim_horizon_ph)
tf.summary.histogram('Training/OnlineDistance', bisimulation_estimate)
tf.summary.histogram('Training/TargetDistance', bisimulation_target)
self._train_bisim_op = self.bisim_optimizer.minimize(bisimulation_loss)
self._bisim_sync_op = self._build_sync_op(online_scope='OnlineBisim',
target_scope='TargetBisim')
def _build_sync_op(self, online_scope='Online', target_scope='Target'):
# Get trainable variables from online and target Rainbow
sync_qt_ops = []
trainables_online = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=online_scope)
trainables_target = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=target_scope)
for (w_online, w_target) in zip(trainables_online, trainables_target):
# Assign weights from online to target network.
sync_qt_ops.append(w_target.assign(w_online, use_locking=True))
return sync_qt_ops
def _train_bisimulation(self):
if self._replay.memory.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sess.run(self._train_bisim_op,
{self.bisim_horizon_ph: self.bisim_horizon})
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = self._sess.run(self._merged_summaries,
{self.source_state_ph: self._source_state,
self.state_ph: self.state,
self.bisim_horizon_ph: self.bisim_horizon})
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sess.run(self._bisim_sync_op)
self.bisim_horizon = 1.0 - self.bisim_horizon_discount_value
self.bisim_horizon_discount_value *= self.bisim_horizon_discount
self.training_steps += 1
def _evaluate_bisimulation(self):
if self.start_recording:
current_distance = self._sess.run(
self.state_distances,
{self.source_state_ph: self._source_state,
self.state_ph: self.state,
self.bisim_horizon_ph: self.bisim_horizon})
self.eval_distances.bisimulation.append(current_distance)
source_v = self._sess.run(self.state_value, {self.state_ph: self.state})
target_v = self._sess.run(self.state_value,
{self.state_ph: self._source_state})
self.eval_distances.value.append(abs(source_v - target_v))
if self.evaluate_metric_only and self.summary_writer is not None:
summary = self._sess.run(self._merged_summaries,
{self.source_state_ph: self._source_state,
self.state_ph: self.state,
self.bisim_horizon_ph: self.bisim_horizon})
self.summary_writer.add_summary(summary, self._evaluation_steps)
self._evaluation_steps += 1
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
action = super(BisimulationRainbowAgent, self).begin_episode(observation)
if not self.evaluate_metric_only:
self._train_bisimulation()
self._evaluate_bisimulation()
return action
def step(self, reward, observation, set_source_state=False):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
set_source_state: bool, whether to set the current state as the source
state.
Returns:
int, the selected action.
"""
_ = super(BisimulationRainbowAgent, self).step(reward, observation)
self._store_transition(self._last_observation, self.action, reward, False)
if set_source_state and not self.start_recording:
# We only want to set the source state once.
self.start_recording = True
self._source_state = np.copy(self.state)
if not self.evaluate_metric_only:
self._train_bisimulation()
self._evaluate_bisimulation()
return self.action
def end_episode(self, reward):
"""Signals the end of the episode to the agent.
We store the observation of the current time step, which is the last
observation of the episode.
Args:
reward: float, the last reward from the environment.
"""
self._store_transition(self._observation, self.action, reward, True)
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.memory.sum_tree.max_recorded_priority
self._replay.add(last_observation, action, reward, is_terminal, priority)
def get_distances(self):
return self.eval_distances
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
f31df43de849fa6395cf5ea42e6c2ee9f3772cbf
|
553daabc841cc19d0729880e2624bdaaa2b8219a
|
/problem_109/problem_109.py
|
7a9f6e85d38de529b83b5d9dcb78fe4cff653807
|
[] |
no_license
|
yuhlearn/project_euler
|
ed915b426a6c7d3de8d165165749cae97e4dd156
|
83bd0713994bea1745540855bb4141e4b91308a6
|
refs/heads/master
| 2021-10-10T00:52:11.537263
| 2021-10-07T07:40:22
| 2021-10-07T07:40:22
| 122,016,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
values = [
1, 2, 2, 3, 3, 4, 4, 5, 6, 6,
6, 7, 8, 8, 9, 9, 10, 10, 11, 12,
12, 12, 13, 14, 14, 15, 15, 16, 16, 17,
18, 18, 18, 19, 20, 20, 21, 22, 24, 24,
25, 26, 27, 28, 30, 30, 32, 33, 34, 36,
36, 38, 39, 40, 42, 45, 48, 50, 51, 54,
57, 60
]
doubles = [
2, 6, 9, 13, 17, 20, 24, 28, 31, 35, 37,
38, 41, 43, 44, 46, 48, 49, 51, 53, 57
]
result = [0]*100
def count_aux(s, v, d):
for v in range(v, len(values)):
ss = s + values[v]
if ss < 100 and d < 3:
result[ss] += 1
count_aux(ss, v, d + 1)
return
def count():
for d in doubles:
s = values[d]
result[s] += 1
count_aux(s, 0, 1)
return
count()
print sum(result), result
|
[
"reeves.max@gmail.com"
] |
reeves.max@gmail.com
|
f080048d5c41225d37ffd5e330c98243d87982c8
|
4e0f79fbc1d3a090a5c7f633f360e41514e1fad9
|
/election_by_state.py
|
bca072ca5f2123d1a2949f92a8675c3f2fab8258
|
[
"MIT"
] |
permissive
|
powergascan/mod3-hypothesis-testing
|
3b9d6db874755b928aacccba053125b224fc044a
|
f529e02463b33f9f20657c7e7c3eb249477622ff
|
refs/heads/master
| 2020-07-08T07:50:34.839721
| 2019-08-25T01:52:01
| 2019-08-25T01:52:01
| 203,608,883
| 0
| 0
|
MIT
| 2019-08-25T01:52:02
| 2019-08-21T15:06:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 986
|
py
|
import pandas as pd
def DT_Effect(election_data, labels=['US Blue','US Red']):
election_data=election_data[election_data['year'].isin([2012, 2016])]
election_data=election_data[election_data['party'].isin(['republican'])]
election_data.sort_values(['state_po','year'],inplace=True)
election_data['Vote_Percent']=election_data['candidatevotes']/election_data['totalvotes']
election_data['Vote_Percent_shift']=election_data.groupby(['state'])['Vote_Percent'].shift(1)
election_data=election_data[~election_data['Vote_Percent_shift'].isnull()]
election_data['DT_Effect']=election_data['Vote_Percent']-election_data['Vote_Percent_shift']
election_data=election_data[['state_po','Vote_Percent','DT_Effect']]
election_data=election_data[~election_data['DT_Effect'].isnull()]
election_data.rename(columns={"state_po":"State"},inplace=True)
election_data['Ideology_Bin']=pd.qcut(election_data['DT_Effect'], 2, labels=labels)
return election_data
|
[
"powergascan@gmail.com"
] |
powergascan@gmail.com
|
b4713a8a30c7c5bf5f3f25909e5870277470c208
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_196/ch79_2020_04_07_01_22_28_765152.py
|
c9ba46f971fba6baced161e18915f79d3384d840
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
def monta_dicionario(a,b):
for i in range (len(a)):
print {a[i]: b[i]}
return {a[i]: b[i]}
|
[
"you@example.com"
] |
you@example.com
|
1e0e6b3ba1be9373e3fdbe2a95120a7d459522d2
|
95b119cf752125f1c783267d385dae89ac8e29a1
|
/trees/tree-level-order-traversal.py
|
08b86a3e1871cb2fe97cd8834d7f1fb89e4bd0a3
|
[] |
no_license
|
hanksudo/hackerrank
|
037496b78acc5d41fb2ef9198d45f567409bce15
|
f529fddb5c01e8b10203b043a033e20aeeee8e2d
|
refs/heads/master
| 2022-05-01T19:24:02.966337
| 2022-03-15T13:39:05
| 2022-03-15T13:39:05
| 234,511,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.info (the value of the node)
"""
def levelOrder(root):
queue = []
queue.append(root)
while queue:
# dequeue
node = queue.pop(0)
print(node.info, end=" ")
# enqueue
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
|
[
"drapho@gmail.com"
] |
drapho@gmail.com
|
31af1ed2e3cab55ea4a1708340ded06d077d9a47
|
dd72bd5a699cfabb400dca6528816c74795cc208
|
/accounts/models.py
|
2eb405e18006395295656571364db9c4efb79ecb
|
[] |
no_license
|
Francis-Ebere-Emeafu/donations
|
1d6889a58ce989ab6447cb3b90ee4c933415a097
|
702eb09bae5f3f38a3cab981004a4f5f0d88be63
|
refs/heads/master
| 2023-02-18T18:37:19.631418
| 2021-01-21T01:31:05
| 2021-01-21T01:31:05
| 331,082,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
# Create your models here.
class Membership(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
dues = models.DecimalField(max_digits=10, decimal_places=2)
def __unicode__(self):
return '{} ({} per annum)'.format(self.name, self.dues)
class Account(models.Model):
BANK_TRANSFER = 0
DEBIT_CARD = 1
PAYMENT_TYPES = enumerate(('Bank Transfer', 'Debit Card'))
first_name = models.CharField(max_length=250)
last_name = models.CharField(max_length=250)
email = models.EmailField(max_length=200)
phone = models.CharField(max_length=20)
address = models.TextField(blank=True)
bio = models.TextField(blank=True)
country = models.CharField(max_length=100, blank=True, null=True)
city = models.CharField(max_length=100, blank=True, null=True)
when = models.DateTimeField(default=timezone.now)
membership = models.ForeignKey(Membership, null=True, on_delete=models.SET_NULL)
payment_type = models.PositiveIntegerField(
choices=PAYMENT_TYPES, default=DEBIT_CARD)
paid = models.BooleanField(default=False)
membership_email_sent = models.BooleanField(default=False)
membership_number = models.PositiveIntegerField(null=True, blank=True)
certificate = models.ImageField(
upload_to='certificates', null=True, blank=True)
next_payment_due = models.DateField(null=True, blank=True)
def __unicode__(self):
return self.email
@property
def member_id(self):
if self.membership_number:
mem_no = '{:0>4}'.format(self.membership_number)
yr = self.when.strftime('%y')
return 'ASN{}{}'.format(yr, mem_no)
#return 'ASN17{}'.format("{0:0>4}".format(self.membership_number))
else:
return 'N/A'
@property
def full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
class GiftOptions(models.Model):
amount = models.DecimalField(max_digits=10, decimal_places=2)
dollar_value = models.DecimalField(
max_digits=20, decimal_places=2, default=0)
def __unicode__(self):
return '{} (${})'.format(self.amount, self.dollar_value)
class Meta:
verbose_name_plural = 'Gift Options'
class Gift(models.Model):
BANK_TRANSFER = 0
DEBIT_CARD = 1
PAYMENT_TYPES = enumerate(('Bank Transfer', 'Debit card'))
NGN = 0
USD = 1
GBP = 2
EURO = 3
CURRENCIES = enumerate(('NGN', 'USD', 'GBP', 'EURO'))
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
email = models.EmailField(max_length=200)
phone = models.CharField(max_length=20)
country = models.CharField(max_length=100, blank=True, null=True)
city = models.CharField(max_length=100, blank=True, null=True)
address = models.CharField(max_length=100, blank=True, null=True)
amount = models.DecimalField(max_digits=10, decimal_places=2, null=True)
#currency = models.PositiveIntegerField(choices=CURRENCIES, default=NGN)
#gift_option = models.ForeignKey(GiftOptions, null=True)
when = models.DateTimeField(default=timezone.now)
bio = models.TextField(blank=True)
email_sent = models.BooleanField(default=False)
payment_type = models.PositiveIntegerField(choices=PAYMENT_TYPES, default=BANK_TRANSFER)
paid = models.BooleanField(default=False)
def __unicode__(self):
return self.first_name
class Renewal(models.Model):
account = models.ForeignKey(Account, null=True, on_delete=models.SET_NULL)
payment_date = models.DateField()
def __unicode__(self):
return unicode(self.account)
|
[
"freemandigits@gmail.com"
] |
freemandigits@gmail.com
|
13edb85f07a2b7803336ad1485f6102d93b4bc74
|
eebf2e3578ccb79147c3f5c21438e4296f773a89
|
/pytorch-cifar/models/.ipynb_checkpoints/resnet-checkpoint.py
|
ca9d4bdb7e89e3e31155d17be75606b494630122
|
[
"MIT"
] |
permissive
|
minoriwww/waterquality
|
336dbe4ecd2b382669be0e777aec3a83e3d3138c
|
97db2406c37295dfdea5d2e006611243bd7ca01d
|
refs/heads/main
| 2023-03-22T05:24:41.153869
| 2021-03-09T06:44:12
| 2021-03-09T06:44:12
| 345,905,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,263
|
py
|
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.linear = nn.Linear(512*block.expansion, num_classes)
self.linear = nn.Linear(512*256, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# print("After3: x.shape: " + str(out.shape))
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
# print("last shape " + str(out.shape))
out = self.linear(out)
return out
def ResNet18(n_classes):
return ResNet(BasicBlock, [2,2,2,2], n_classes)
def ResNet34(n_classes):
return ResNet(BasicBlock, [3,4,6,3], n_classes)
def ResNet50(n_classes):
return ResNet(Bottleneck, [3,4,6,3], n_classes)
def ResNet101(n_classes):
return ResNet(Bottleneck, [3,4,23,3], n_classes)
def ResNet152(n_classes):
return ResNet(Bottleneck, [3,8,36,3], n_classes)
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
|
[
"dltdc@gpu12.ttic.edu"
] |
dltdc@gpu12.ttic.edu
|
b6d9f7d19b82b9b6caf04f24e684f12677eb8384
|
bb8acf1eef4c746a220dd94be0f9d8af42ac6f27
|
/cad_python_Ext/MyLayers/structured_gate_action.py
|
d494109c4a66da8b949a2c6acd9ca352ea666716
|
[
"Apache-2.0"
] |
permissive
|
Lucas2012/simplified-SIM-structure-inference-machine
|
4342efa9f779c1e4fc15610ce8fbe76eaa7783dd
|
45998fd52ed19753ed0c8c3658cde7e3d964218c
|
refs/heads/master
| 2021-10-24T07:20:04.420140
| 2019-03-23T06:06:22
| 2019-03-23T06:06:22
| 43,234,820
| 3
| 1
|
Apache-2.0
| 2019-03-23T06:06:23
| 2015-09-27T04:51:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,905
|
py
|
import unittest
import tempfile
import os
import numpy
import caffe
class structured_gate(caffe.Layer):
"""A layer that initialize messages for recurrent belief propagation"""
def setup(self, bottom, top):
self.nScene = 6
self.nAction = 6
self.nPeople = 14
self.K_ = 0;
self.bottom_batchsize = 0
self.slen = 0
self.alen = 0
self.tlen_leaf = 0
self.tlen_mid = 0
self.sunit = 0
self.aunit = 0
self.tunit = 0
self.regularizer = 1
self.message_num_action = self.nPeople+1+2*(self.K_>0)
self.label_stop = []
self.top_batchsize = 0
self.on_edge = True
self.block_diff = True
self.zero2one = True
self.lamda = 0.01
self.C = 10
self.id = 0
def reshape(self, bottom, top):
# have 3 inputs: gate, a2a message, labels
# have one output: gated a2a message
bottom_batchsize = bottom[2].data.shape[0]
edge_num = self.nPeople
self.frame_num = bottom_batchsize/self.nPeople
self.bottom_batchsize = bottom[0].data.shape[0]
top[0].reshape(*bottom[1].data.shape)
def forward(self, bottom, top):
self.id += 1
gate_input = bottom[0].data.copy()
messages = bottom[1].data.copy()
label_stop = self.nPeople*numpy.ones([self.frame_num])
labels = bottom[2].data
count = 0
for i in range(0,self.frame_num):
for j in range(0,self.nPeople):
if labels[i*self.nPeople+j] == 0:
label_stop[i] = j
break
self.label_stop = label_stop
# the paired up inputs should be:
# [(1,2),(2,1)] [(1,3),(3,1)] [(1,4),(4,1)] [(1,5),(5,1)] [(1,6),(6,1)]
# [(2,3),(3,2)] [(2,4),(4,2)] [(2,5),(5,2)] [(2,6),(6,2)]
# [(3,4),(4,3)] [(3,5),(5,3)] [(3,6),(6,3)]
# [(4,5),(5,4)] [(4,6),(6,4)]
# [(5,6),(6,5)]
# gate design:
if self.on_edge:
s_gate = numpy.zeros(bottom[0].data.shape[0])
else:
s_gate = numpy.zeros([bottom[0].data.shape[0],self.nAction])
zero2one = self.zero2one
for i in range(0,self.bottom_batchsize):
s_gate[i] = (1+numpy.tanh(self.C*gate_input[i]))/2.0
'''idx = 0
for f in range(0,self.frame_num):
for i in range(0,int(self.label_stop[f])):
for j in range(0,int(self.label_stop[f]-1)):
if numpy.argmax(bottom[3].data[idx])-1 == labels[i+f*self.nPeople]:
s_gate[idx] = 1
else:
s_gate[idx] = 0'''
for i in range(0,self.bottom_batchsize):
top[0].data[i] = numpy.multiply(s_gate[i],messages[i])
def backward(self, top, propagate_down, bottom):
# to be written
# diffs for : bottom[0] -> paired gates input; bottom[1] -> messages
# diffs from top: gated_messages
gate_input = bottom[0].data.copy()
gates_diff = bottom[0].diff.copy()
messages = bottom[1].data
message_diff = bottom[1].diff.copy()
gated_message_diff = top[0].diff.copy()
label_stop = self.nPeople*numpy.ones([self.frame_num])
labels = bottom[2].data
count = 0
for i in range(0,self.frame_num):
for j in range(0,self.nPeople):
if labels[i*self.nPeople+j] == 0:
label_stop[i] = j
break
self.label_stop = label_stop
# gates diff:
# the paired up inputs should be:
# [(1,2),(2,1)] [(1,3),(3,1)] [(1,4),(4,1)] [(1,5),(5,1)] [(1,6),(6,1)]
# [(2,3),(3,2)] [(2,4),(4,2)] [(2,5),(5,2)] [(2,6),(6,2)]
# [(3,4),(4,3)] [(3,5),(5,3)] [(3,6),(6,3)]
# [(4,5),(5,4)] [(4,6),(6,4)]
# [(5,6),(6,5)]
# gate design:
if self.on_edge:
s_gate = numpy.zeros(bottom[0].data.shape[0])
else:
s_gate = numpy.zeros([bottom[0].data.shape[0],self.nAction])
# non-linearity design:
zero2one = self.zero2one
for i in range(0,self.bottom_batchsize):
s_gate[i] = (1+numpy.tanh(self.C*gate_input[i]))/2.0
#print s_gate[i]
count = 0
idx = 0
for i in range(0,self.bottom_batchsize):
diff = numpy.multiply(gated_message_diff[i],messages[i])
tanh_sq = numpy.multiply(s_gate[i],s_gate[i])
#print numpy.sum(diff)
if self.regularizer == 1:
gates_diff[i] = numpy.sum(diff)*(1-tanh_sq)*self.C + self.lamda*(1-tanh_sq)*self.C/2.0
#print numpy.sum(diff)*(1-tanh_sq)*self.C/2.0 + self.lamda*(1-tanh_sq)*self.C/2.0
#print 'gate',gates_diff[i]
#print 'diff',numpy.sum(diff)*(1-tanh_sq)*self.C
elif self.regularizer == 2:
gates_diff[i] = numpy.sum(diff)*(1-tanh_sq)*self.C + self.lamda*s_gate[i]*(1-tanh_sq)*self.C
message_diff[i] = numpy.multiply(gated_message_diff[i],s_gate[i])
bottom[0].diff[...] = gates_diff
if self.block_diff:
bottom[1].diff[...] = 0.0*message_diff
else:
bottom[1].diff[...] = message_diff
def python_net_file():
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'two' bottom: 'one' top: 'two'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'three' bottom: 'two' top: 'three'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""")
return f.name
class TestPythonLayer(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['three'].data.flat:
self.assertEqual(y, 10**3 * x)
def test_backward(self):
x = 7
self.net.blobs['three'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 10**3 * x)
def test_reshape(self):
s = 4
self.net.blobs['data'].reshape(s, s, s, s)
self.net.forward()
for blob in self.net.blobs.itervalues():
for d in blob.data.shape:
self.assertEqual(s, d)
|
[
"zhiweid@sfu.ca"
] |
zhiweid@sfu.ca
|
6915850a16fdfc44342600508b47769fafc8541e
|
be5bcff6544928bdbb7501f778ddbee11d589235
|
/dataScripts/xls_files_to_csv.py
|
68e0b868f170cf4936f8f5238eb10c1da75f783f
|
[] |
no_license
|
Armadindon/PythonData
|
7441c2235861d9bb0a0acf72d38f5dfb91c225af
|
23127c80eb2b5f91d4760dac7eb7dbddb4bc67c5
|
refs/heads/master
| 2023-02-17T21:44:14.492906
| 2021-01-17T16:02:39
| 2021-01-17T16:02:39
| 309,948,480
| 0
| 0
| null | 2021-01-08T13:55:28
| 2020-11-04T08:59:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,790
|
py
|
"""
Script permettant la conversion de fichier xls (Excel) vers csv
pour la lecture avec la librairie pandas
"""
import os
import csv
import xlrd
def conversion_num_dept(code_dept):
"""
Convertit un département au bon format
Args:
code_dept (str): Code du département à convertir.
Returns:
code_dept (str): Code département au bon format.
"""
if code_dept[-1] == '0':
code_dept = code_dept[:-1]
if code_dept[0] == '0':
code_dept = code_dept[1]
return code_dept
def main():
"""
Fonction Principale
"""
results = []
result_depts = []
for file in sorted(filter(lambda x: x.endswith(".xls"),
os.listdir(os.path.join("data", "RevenusFiscaux")))):
workbook = xlrd.open_workbook(os.path.join("data", "RevenusFiscaux", file))
started = False
sheet = workbook.sheets()[0]
dept = conversion_num_dept(file.replace(".xls", ""))
for line in range(sheet.nrows):
line_data = sheet.row(line)
if line_data[2].value == "Commune":
started = True
continue
if line_data[4].value == "Total":
if not started:
result_depts.append({
"code_departement": dept,
"nom_departement": line_data[3].value,
"nbFoyerFiscaux": line_data[5].value
if line_data[5].value != "n.c." else "",
"revFiscalRefFoyers": line_data[6].value
if line_data[6].value != "n.c." else "",
"impotNet": line_data[7].value
if line_data[7].value != "n.c." else "",
"nbFoyersImposes": line_data[8].value
if line_data[8].value != "n.c." else "",
"revFiscalRefFoyersImpos": line_data[9].value
if line_data[9].value != "n.c." else ""
})
else:
results.append({
"code_ville": dept + line_data[2].value,
"nom_commune": line_data[3].value,
"nbFoyerFiscaux": line_data[5].value
if line_data[5].value != "n.c." else "",
"revFiscalRefFoyers": line_data[6].value
if line_data[6].value != "n.c." else "",
"impotNet": line_data[7].value
if line_data[7].value != "n.c." else "",
"nbFoyersImposes": line_data[8].value
if line_data[8].value != "n.c." else "",
"revFiscalRefFoyersImpos": line_data[9].value
if line_data[9].value != "n.c." else ""
})
with open(os.path.join("data", "revenuFiscauxDepts.csv"), "w+", encoding='utf-8') as file:
fields = ["code_departement",
"nom_departement", "nbFoyerFiscaux",
"revFiscalRefFoyers", "impotNet",
"nbFoyersImposes", "revFiscalRefFoyersImpos"]
writer = csv.DictWriter(file, fields)
writer.writeheader()
writer.writerows(result_depts)
with open(os.path.join("data", "revenuFiscauxCommunes.csv"),
"w+", encoding='utf-8') as file:
fields = ["code_ville", "nom_commune",
"nbFoyerFiscaux", "revFiscalRefFoyers",
"impotNet", "nbFoyersImposes",
"revFiscalRefFoyersImpos"]
writer = csv.DictWriter(file, fields)
writer.writeheader()
writer.writerows(results)
if __name__ == "__main__":
main()
|
[
"baptisteperrin77@gmail.com"
] |
baptisteperrin77@gmail.com
|
2aef48d1aa8dec4012fd07f8560afbd4b24fafeb
|
540df8592749de8e9dc429ad7f5365238bc850c4
|
/4.Practice.py
|
92048ad37ad116409215419c025814de2a84d9bd
|
[] |
no_license
|
2anirban/heroku_demo
|
f50bc10c348030d52e2f5ec1c0bd47e3162474ce
|
da52bdf5cfea21e98f5b4d9b4868ab1985c7eaa2
|
refs/heads/main
| 2023-04-08T13:50:20.071207
| 2021-04-29T15:15:34
| 2021-04-29T15:15:34
| 362,854,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
dataset = pd.read_csv('hiring_1.csv')
print(dataset)
dataset['Experience'].fillna(0, inplace=True)
dataset['test_score'].fillna(dataset['test_score'].mean(), inplace=True)
X = dataset.iloc[:, :3]
###Converting words to integer values
##def convert_to_int(word):
## word_dict = {'one':1, 'two':2, 'three':3, 'four':4, 'five':5, 'six':6, 'seven':7, 'eight':8,
## 'nine':9, 'ten':10, 'eleven':11, 'twelve':12, 'zero':0, 0: 0}
## return word_dict[word]
##
##X['experience'] = X['Experience'].apply(lambda x : convert_to_int(x))
y = dataset.iloc[:, -1]
#Splitting Training and Test Set
#Since we have a very small dataset, we will train our model with all availabe data.
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
#Fitting model with trainig data
regressor.fit(X, y)
# Saving model to disk
pickle.dump(regressor, open('model.pkl','wb'))
### Loading model to compare the results
model = pickle.load(open('model.pkl','rb'))
print(model.predict([[2, 9, 6]]))
|
[
"noreply@github.com"
] |
2anirban.noreply@github.com
|
c796416794b9fa7de79303f934efe14617e7f2da
|
d75b1b88ef64786498ad257a9a6d12c01a3f8cd2
|
/certif_page/models/Token.py
|
0c7e80d08532c62cb8a903f691e718a0a23632d7
|
[
"Apache-2.0"
] |
permissive
|
xSandie/schoolCertif
|
9d8928efbbb134aa4139dad00dc5b4cfd93affab
|
df5e235305886a0f110cc5d507d863c89df56e57
|
refs/heads/master
| 2022-12-15T11:41:55.119178
| 2019-07-02T14:59:39
| 2019-07-02T14:59:39
| 157,819,681
| 0
| 0
|
Apache-2.0
| 2022-12-08T04:54:33
| 2018-11-16T05:56:37
|
Python
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
from certif_page.models.base import Base, db
class Token(Base):
id = db.Column(db.BigInteger,primary_key=True, autoincrement=True)
value = db.Column(db.String(140), index=True, nullable=False)
limitPerDay = db.Column(db.Integer,default=80000)#日限额,默认300000次一天
status = db.Column(db.SmallInteger,default=1)#正常
permission = db.Column(db.SmallInteger,default=0)#0 无看头像权限 1 有查看头像权限 10 有全部权限
|
[
"sandiexiang@foxmail.com"
] |
sandiexiang@foxmail.com
|
58f5ebbdecaff2dbcb6be1432c7638d04fefd5ef
|
e733d07a1492f6e9b762d9ca496ec59668aedb95
|
/qcloudsdkbmeip/EipAclBmApplyRequest.py
|
bf62678e2db69b2618ca2c58f848c0db67259c13
|
[
"Apache-2.0"
] |
permissive
|
QcloudApi/qcloudcli
|
1f67d8467b81ac8964362491cd4f3104f8e59161
|
ba16161f65df5f621d9f1c5587b9900dca600cb5
|
refs/heads/master
| 2023-08-15T01:51:05.236254
| 2018-07-11T08:07:29
| 2018-07-11T08:07:29
| 100,922,202
| 8
| 6
| null | 2018-03-29T11:57:26
| 2017-08-21T06:55:45
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class EipAclBmApplyRequest(Request):
def __init__(self):
super(EipAclBmApplyRequest, self).__init__(
'bmeip', 'qcloudcliV1', 'EipAclBmApply', 'bmeip.api.qcloud.com')
def get_aclName(self):
return self.get_params().get('aclName')
def set_aclName(self, aclName):
self.add_param('aclName', aclName)
def get_status(self):
return self.get_params().get('status')
def set_status(self, status):
self.add_param('status', status)
|
[
"zhiqiangfan@tencent.com"
] |
zhiqiangfan@tencent.com
|
6d8c6af32fef5c9cfcf561b7acdec6ef6048243d
|
a841be002c03abcf03c875ba8cde5ab28e34de79
|
/Toxic/bayes.py
|
091fd7c3353edb719c46586b1307c12837209117
|
[] |
no_license
|
PForet/Kaggle
|
a7a89e02be0f980671b6c4393c7dc0cf9ac7348a
|
c970708cb975d234f2b8b664e5bdc0bab9758d61
|
refs/heads/master
| 2021-05-15T12:17:31.756098
| 2018-01-05T16:05:06
| 2018-01-05T16:05:06
| 108,417,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,617
|
py
|
from utils import load_dataframes, _all_labels, remove_non_strings, dict_to_submit
import numpy as np
import re, string
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split
#from sklearn.naive_bayes import MultinomialNB as model
from sklearn.ensemble import GradientBoostingClassifier
from NBSVM import NBSVM as model
from sklearn.metrics import log_loss
sample, test, train = load_dataframes()
Y_dict = {k:train[k] for k in _all_labels}
X = train['comment_text']
y_train, y_val = {}, {}
for k in _all_labels:
X_train_r, X_val_r, y_train[k], y_val[k] = train_test_split(X, Y_dict[k], random_state = 1)
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
#def tokenize(s): return re_tok.sub(r' \1 ', s).split()
def tokenize(s): return re.findall(r"[\w']+|!|@|!!",s)
def processing(s):
re_links = re.compile("http.*?\s")
s = re_links.sub("_link_", s)
return s.lower()
tf_transformer = TfidfVectorizer(use_idf=True,
tokenizer=tokenize,
#ngram_range=(1,2),
sublinear_tf=True,
preprocessor=processing,
min_df=10,
max_df=0.9).fit(X_train_r)
"""
tf_transformer = CountVectorizer(min_df=12,
max_df=0.5,
ngram_range=(1,2)).fit(X_train)
"""
X_train = tf_transformer.transform(X_train_r)
X_val = tf_transformer.transform(X_val_r)
X_test = tf_transformer.transform(remove_non_strings(test['comment_text']))
predictions_dict, val_dict, train_dict = {}, {}, {}
logloss_ = []
print("Naive Bayes classifier")
for k in _all_labels:
gnb = model(alpha=0.3, C=1.5)
gnb.fit(X_train, y_train[k])
prediction = [e[1] for e in gnb.predict_proba(X_val)]
train_prediction = [e[1] for e in gnb.predict_proba(X_train)]
print("Naive bayes Logloss for {} : {} with train loss of {}".format(k,
log_loss(y_val[k], prediction),log_loss(y_train[k], train_prediction)))
val_dict[k] = prediction
train_dict[k] = train_prediction
logloss_.append(log_loss(y_val[k], prediction))
predictions_dict[k] = [e[1] for e in gnb.predict_proba(X_test)]
print("Mean logloss : {}".format(np.mean(logloss_)))
print("Assembling")
def dict_to_X(d):
return np.array([d[k] for k in _all_labels]).transpose()
X_train_pred = dict_to_X(train_dict)
X_val_pred = dict_to_X(val_dict)
X_test_pred = dict_to_X(predictions_dict)
assembled_predictions_dict, assembled_val_dict, assembled_train_dict = {}, {}, {}
assembled_logloss_ = []
for k in _all_labels:
gnb = GradientBoostingClassifier(n_estimators=1000)
gnb.fit(X_train_pred, y_train[k])
prediction = [e[1] for e in gnb.predict_proba(X_val_pred)]
train_prediction = [e[1] for e in gnb.predict_proba(X_train_pred)]
print("Assembled Logloss for {} : {} with train loss of {}".format(k,
log_loss(y_val[k], prediction),log_loss(y_train[k], train_prediction)))
assembled_val_dict[k] = prediction
assembled_train_dict[k] = train_prediction
assembled_logloss_.append(log_loss(y_val[k], prediction))
assembled_predictions_dict[k] = [e[1] for e in gnb.predict_proba(X_test_pred)]
print("Mean logloss : {}".format(np.mean(assembled_logloss_)))
dict_to_submit(assembled_predictions_dict,"assembled_BNBv2_all.csv")
|
[
"noreply@github.com"
] |
PForet.noreply@github.com
|
304752a2b7b71e334f306da10f61daa8c1aa16d8
|
cb34657f22bd7754aa1c9e134e06c73bf91c4cde
|
/sv94.py
|
b52636aa738a900dab5027440399a0c92af209bd
|
[] |
no_license
|
varsha126/varshaguvi
|
4cf4b0b4d25264340ee23e5d968a8403eab3d33a
|
ea624a972c0d8b84d7f6081cf789eca35a4d6e81
|
refs/heads/master
| 2020-06-07T17:04:45.831040
| 2019-08-04T02:44:45
| 2019-08-04T02:44:45
| 193,061,419
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
nvn=input()
print(ord(nvn))
|
[
"noreply@github.com"
] |
varsha126.noreply@github.com
|
e1c7ce2caac30bd082b0767f7ae4434eb4acc734
|
7b35e24e1164a5356b37e1e705c0d5e08540157f
|
/src/cnn_mod_cla.py
|
ca95ee390904620f6295db82ec40a7c39203b272
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Lighting-Zhu/py-radio-autoencoder
|
6fe341fea3e96f13499571c9c65ef95104211924
|
842cd1f14a17ee0798766dffcf132950a9e745bd
|
refs/heads/master
| 2023-07-02T00:14:35.491304
| 2021-08-09T04:57:02
| 2021-08-09T04:57:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
import tensorflow as tf
from tensorflow.keras import layers, Model, Sequential
def cnn_mod_cla_2d(in_shape, class_num):
'''2是(mod, snr)'''
input_tensor = layers.Input(shape=in_shape, dtype='float32')
x = layers.Conv2D(filters=128, kernel_size=8, padding='valid')(input_tensor)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')(x)
x = layers.Conv2D(filters=64, kernel_size=16, padding='valid')(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')(x)
x = layers.Flatten()(x)
x = layers.Dense(units=128)(x)
x = layers.ReLU()(x)
x = layers.Dense(units=64)(x)
x = layers.ReLU()(x)
x = layers.Dense(units=32)(x)
x = layers.ReLU()(x)
x = layers.Dense(units=class_num)(x)
output = layers.Softmax()(x)
# 定义完 layers 后还要调用 Model 才算真正建立了模型
model = Model(inputs=input_tensor, outputs=output)
return model
def cnn_mod_cla_1d(in_shape, class_num):
'''2是(mod, snr)'''
input_tensor = layers.Input(shape=in_shape, dtype='float32')
x = layers.Conv1D(filters=128, kernel_size=8, padding='valid')(input_tensor)
x = layers.ReLU()(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='valid')(x)
x = layers.Conv1D(filters=64, kernel_size=16, padding='valid')(x)
x = layers.ReLU()(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='valid')(x)
x = layers.Flatten()(x)
x = layers.Dense(units=128)(x)
x = layers.ReLU()(x)
x = layers.Dense(units=64)(x)
x = layers.ReLU()(x)
x = layers.Dense(units=32)(x)
x = layers.ReLU()(x)
x = layers.Dense(units=class_num)(x)
output = layers.Softmax()(x)
# 定义完 layers 后还要调用 Model 才算真正建立了模型
model = Model(inputs=input_tensor, outputs=output)
return model
if __name__ == '__main__':
model = cnn_mod_cla_1d(in_shape=(128, 2), class_num=11)
model.summary()
|
[
"whw0315@gmail.com"
] |
whw0315@gmail.com
|
b9da5c9c9860ad4abda5235757331a6bd4a6f19b
|
3e2447737acc8e6bef6728b1a8e5f1d5e6db2968
|
/opennem/spiders/wem/__init__.py
|
d55ac53eb10d2c124149c65b442bb414f607efbd
|
[
"MIT"
] |
permissive
|
gaslitbytech/opennem
|
5a5197003662725ccd2f82d790cdb1495a975a07
|
deec3e2079db9d9d84171010fd0c239170d1e7ce
|
refs/heads/master
| 2023-07-23T14:08:28.949054
| 2020-10-09T03:53:20
| 2020-10-09T03:53:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
import logging
from datetime import datetime
import scrapy
from opennem.pipelines.files import LinkExtract
from opennem.pipelines.nem import ExtractCSV
from opennem.pipelines.wem.balancing_summary import WemStoreBalancingSummary
from opennem.spiders.dirlisting import DirlistingSpider
def get_date_component(format_str):
return datetime.now().strftime(format_str)
class WemCurrentSpider(scrapy.Spider):
url_params = {
"day": get_date_component("%d"),
"month": get_date_component("%m"),
"year": get_date_component("%Y"),
}
def start_requests(self):
request_url = self.start_url.format(**self.url_params)
yield scrapy.Request(request_url)
def parse(self, response):
yield {"content": response.text}
class WemHistoricSpider(DirlistingSpider):
allowed_domains = ["wa.nemweb.com.au"]
pipelines = set([LinkExtract,])
# Archives tend to contain large zips of embedded zips so throttle
# to limit memory use
custom_settings = {
"CONCURRENT_REQUESTS": 4,
"CONCURRENT_ITEMS": 8,
}
|
[
"nc9@protonmail.com"
] |
nc9@protonmail.com
|
ea27dda43f6f92c519b2d4f892418fabf50c910a
|
be6bba1b253552aca871c1f6b005f880fe3ee637
|
/tax_calculator/controllers/register_all_controllers.py
|
76500bd9977c79c4314c4b48e549b8bb199f00cf
|
[] |
no_license
|
kadekutama/TaxCalculator
|
fd116075aeb5cafad52d8f22e4a7aa942c7a02de
|
51eb4ba16685325d183ca4fe0ba16dbd4f4c1317
|
refs/heads/master
| 2021-07-05T12:22:20.719561
| 2020-09-08T18:23:40
| 2020-09-08T18:23:40
| 177,821,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from .products import *
def register_routes(app):
app.add_route(index, "/", methods=["GET"])
app.add_route(get_all_products, "/get_all_products/<mode>", methods=["GET"])
app.add_route(get_bill, "/get_bill", methods=["POST"])
|
[
"kadekdwibudiutama@gmail.com"
] |
kadekdwibudiutama@gmail.com
|
72b2936601244a9d25293638394d04fed8cfece3
|
836ea2055f7d7b73ef1c970268801884799d8f59
|
/lib/risksense_api/__subject/__application_findings/__init__.py
|
736e0937bf1eca346b873459234d4b34694cd17f
|
[
"Apache-2.0"
] |
permissive
|
mtornga/risksense_tools
|
aee3ee71597712077cd67406c38b4e2087444b55
|
1564cd93505a4d4ccd546f68310e0a09f888e590
|
refs/heads/master
| 2023-05-14T15:26:00.912692
| 2021-06-09T00:03:42
| 2021-06-09T00:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
""" *******************************************************************************************************************
|
| Name : __init__.py
| Description : Application Findings
| Project : risksense_api
| Copyright : 2019 RiskSense, Inc.
| License : Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
|
******************************************************************************************************************* """
from .__application_findings import ApplicationFindings
"""
Copyright 2019 RiskSense, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
[
"burr.webb@risksense.com"
] |
burr.webb@risksense.com
|
b056ae7c81d4aeb266972e6bf0ec2b8a88e4a3bf
|
bed2fb940e90ef573a047aee5f5154ccca4a1c13
|
/scripts/doppler_sim/dopsim.py
|
411ca9f70e6f2658af442196641b39df9ccdba85
|
[] |
no_license
|
dcoder-mm/heli-shadow-supplementary
|
045f387b64d596e3356c6c566384e7cd3d7899ff
|
fac9e6277dfb091e5ee004a8dbde51269a473508
|
refs/heads/main
| 2023-06-27T15:51:15.883570
| 2021-08-02T01:59:11
| 2021-08-02T01:59:11
| 391,168,015
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,790
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import sweep_poly
from PIL import Image, ImageDraw
# Ground truth from https://mars.nasa.gov/mars2020/mission/where-is-the-rover/
meters_per_pixel = 50/167
rover = np.array([740, 274, 0])*meters_per_pixel
heli_p1 = np.array([501,209, 5])*meters_per_pixel
heli_p2 = np.array([573, 653, 5])*meters_per_pixel
heli_p3 = np.array([520, 210, 5])*meters_per_pixel
title = "Ingenuity 4th flight. BPF tone Doppler shift simulation"
# rover = np.array([740, 274, 0])*meters_per_pixel
# heli_p1 = np.array([501,209, 5])*meters_per_pixel
# heli_p2 = np.array([457, 653, 5])*meters_per_pixel
# heli_p3 = np.array([520, 210, 5])*meters_per_pixel
# title = "10° test"
# rover = np.array([740, 274, 0])*meters_per_pixel
# heli_p1 = np.array([520, 210, 5])*meters_per_pixel
# heli_p2 = np.array([531, 306, 5])*meters_per_pixel
# heli_p3 = np.array([633, 639, 5])*meters_per_pixel
# title = "Offscreen 10° turn"
# meters_per_pixel=1.0
# rover = np.array([0, 0, 0])*meters_per_pixel
# heli_p1 = np.array([5000.0,200, 200])*meters_per_pixel
# heli_p2 = np.array([-3000.0, 200, 200])*meters_per_pixel
# title = "Flyby, 200m above, 200m sideways, 200m/s"
heli_pos = heli_p1.copy()
heli_direction = np.zeros(3)
heli_target = heli_p2
heli_speed = 0
heli_vel = 0
dt = 0.1
hover_time_at_liftoff = 10
hover_time_at_landing = 10
hover_time_at_p2 = 3
accel_time = 10
max_speed = 3.5
center_freq = 84.36
speed_of_sound = 250
do_map = True
rovel_color = (220,20,20)
heli_color = (20,20,220)
r = 5
points = []
time = 0.0
dist_p1 = np.linalg.norm(rover-heli_p1)
print("Rover-Heli dist @ start: %.1f meters, sound lag %.2f sec"%( dist_p1, dist_p1/speed_of_sound ))
dist_p2 = np.linalg.norm(rover-heli_p2)
print("Rover-Heli dist max: %.1f meters, sound lag %.2f sec"%( dist_p2, dist_p2/speed_of_sound ))
print("p1-p2 trip: %.1f meters"%( np.linalg.norm(heli_p1-heli_p2) ))
print("p2-p3 trip: %.1f meters"%( np.linalg.norm(heli_p2-heli_p3) ))
if do_map:
flight_map = Image.open("flight_map.jpg")
draw = ImageDraw.Draw(flight_map)
p = rover / meters_per_pixel
draw.ellipse((p[0]-r, p[1]-r, p[0]+r, p[1]+r), fill = rovel_color, outline ='black')
def set_target(p):
global heli_direction, heli_target
heli_target = p
v = p - heli_pos
heli_direction = v/np.linalg.norm(v)
def update_heli():
global heli_pos, heli_vel, points, time
heli_vel = heli_direction * heli_speed
heli_pos += heli_vel*dt
d1 = np.linalg.norm(rover-heli_pos)
d2 = np.linalg.norm(rover-(heli_pos+heli_vel*dt))
sh = (d2 - d1)/dt
fo = center_freq*(speed_of_sound)/(speed_of_sound+sh)
print("%.1fs\t%.1fm\t%.1fm/s\t%.1fm/s\t%.1fHz"%(time, d1, heli_speed, sh, fo))
points.append(fo)
if do_map:
m = flight_map.copy()
draw = ImageDraw.Draw(m)
p = heli_pos / meters_per_pixel
draw.ellipse((p[0]-r, p[1]-r, p[0]+r, p[1]+r), fill = heli_color, outline ='black')
m.save("./frames/%d.jpg"%(int(time*10)), quality=95)
m.close()
time += dt
def hover(s):
for i in range(int(s/dt)):
update_heli()
def accelerate(speed, time, k=1):
global heli_speed
for i in range(int(time/dt)):
heli_speed+=speed/(time/dt)*k
update_heli()
def decelerate(target_speed, time):
accelerate(heli_speed-target_speed, time, k=-1)
def free_flight(warn_time):
while True:
braking_dist = heli_speed/2*warn_time
update_heli()
if np.linalg.norm(heli_pos-heli_target)<braking_dist:
break
# ~~~~~~ Ground truth
hover(hover_time_at_liftoff) # Hovering at p1
set_target(heli_p2)
accelerate(max_speed, accel_time) # Accelerating at p1
free_flight(accel_time) # Flying to p2
decelerate(0, accel_time) # Decelerating at p2
hover(hover_time_at_p2) # Hovering at p2
set_target(heli_p3) # Going home
accelerate(max_speed, accel_time) # Accelerating at p2
free_flight(accel_time) # Flying to p3
decelerate(0, accel_time) # Decelerating at p3
hover(hover_time_at_landing) # Hovering at p3
# ~~~~~~ 10° offscreen turn
# hover(hover_time_at_liftoff)
# set_target(heli_p2)
# accelerate(max_speed, accel_time)
# free_flight(0.5)
# set_target(heli_p3)
# free_flight(accel_time)
# decelerate(0, accel_time)
# hover(hover_time_at_p2)
# set_target(heli_p2)
# accelerate(max_speed, accel_time)
# free_flight(0.5)
# set_target(heli_p1)
# free_flight(accel_time)
# decelerate(0, accel_time)
# hover(hover_time_at_landing)
# ~~~~~~ Flyby 200m/s
# set_target(heli_p2)
# accelerate(200, 1)
# free_flight(1)
plt.figure(figsize=(12, 6), dpi=100)
plt.title(title)
plt.xlabel("Time (s)")
plt.ylabel("BPF1 tone (Hz)")
plt.axhline(y=center_freq, color='g', linestyle='--', linewidth=1)
plt.plot(np.linspace(0, len(points)*dt, len(points)), points)
plt.show()
|
[
"dcoder.mail@gmail.com"
] |
dcoder.mail@gmail.com
|
baae253d96fb13826c09de4bf5fcd9dc7272919d
|
73cd7ebe847adf267a3f9d9a5176d1421e1e93b3
|
/Exercises/testExercise.py
|
196c5d1c8ba800165629fa706fb02aac23b583e2
|
[
"MIT"
] |
permissive
|
davidavg/OOP_Python
|
4658b891b6a27397751072f54e621614c7b5ae87
|
ca4e8376a50b9c81b5ac18c466bd8d147bdbe679
|
refs/heads/master
| 2020-05-25T07:13:15.737224
| 2019-05-20T17:15:48
| 2019-05-20T17:15:48
| 187,681,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
'''
Created on Nov 05, 2018
@author: jose aperez
'''
#import abstract library
from abc import abstractmethod, ABC
'Class Declaration Person'
class Person(ABC):
'Constructor declaration'
def __init__(self,name):
'Class variables'
self.name = name
'Method talk'
def talk(self, name):
print("Words = " + str(name))
'Method hobby'
@abstractmethod
def hobby(self):
pass
'Class Declaration Teacher'
class Teacher(Person):
'Constructor declaration'
def __init__(self,signature):
'Class variables'
'Private'
self.__name = name
self.signature = signature
'Method hobby'
def hobby(self):
print("My hobby is to read")
'Method teach'
def teach(self):
print(str(name)+" is giving "+ str(signature) + " class")
'Getter'
def getName(self):
return self.__name
'Setter'
def setName(self,Newvalue):
self.__name = Newvalue
'Class Declaration Engineer'
class Engineer(Person):
'Method hobby'
def hobby(self):
print("My hobby is to play video games")
'''
Main program
'''
'Save variables'
name= str(input("Please write your name: "))
'Create Object'
#PersonObj = Person(name)
EngineerObj = Engineer(name)
'Create Class Object Teacher'
TeacherObj = Teacher(name)
TeacherObj.teach()
'Print Variables from Object'
#print("Accediendo a name desde Object Person = " + str(PersonObj.name))
print("Accediendo a name desde Object Engineer = " + str(EngineerObj.name))
'Print encapsulated name'
print("Accediendo a variable encapsulada:", TeacherObj.getName())
'Change encapsulated name value'
TeacherObj.setName("New name value")
print("Accediendo a variable con valor nuevo:", TeacherObj.getName())
|
[
"david avalos@tiempodevelopment.com"
] |
david avalos@tiempodevelopment.com
|
6b200f1ece1ee35292f0e8cd767179a1932d439b
|
bb21604fde22e52d9136c95569d469316dc1b3a9
|
/blog/admin.py
|
2eecbd25abc3cefa7adf6b8d74ac0271a5878fa9
|
[] |
no_license
|
MerryPoppins92/django-blogphoto
|
7be47ad6893cdba30e89b8787c0bf2514cb74505
|
62f7f1f2019f4c49aaa7c20171f9900a9cfe8719
|
refs/heads/master
| 2023-01-18T17:15:11.064804
| 2020-11-20T14:08:40
| 2020-11-20T14:08:40
| 297,322,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from django.contrib import admin
from .models import Blog, Blogbuster
admin.site.register(Blog)
admin.site.register(Blogbuster)
# Register your models here.
|
[
"merry2lassus@gmail.com"
] |
merry2lassus@gmail.com
|
3f099eee1f57de1fb0b2421f2f6f6a424b5ca067
|
d6d0b175251e8bce792381b5c8140c1e64154e66
|
/aliexpress.py
|
21ae2e9046621f9cc5df91ba94fea292b99ee973
|
[] |
no_license
|
AURZeeshan/Practice
|
4724711ef5443120a81fe676a3a2bd6a4ed464fb
|
3a9fbfb6a49f51d1aff483211b9f2c9050f46ae8
|
refs/heads/master
| 2023-02-15T11:11:00.093950
| 2020-12-14T16:50:45
| 2020-12-14T16:50:45
| 321,410,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,132
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import urllib
import urllib.request
import time
import pandas as pd
import csv
import random
from os import name, system
# Current Working directory or root_dir
current = os.getcwd()
# image name and root_dir global var
direc = "/ image"
global root_dir
# this ali express
# driver
browser = r'chromedriver.exe'
# url of aliexpress
url = "https://www.aliexpress.com"
# url ="http://api.scraperapi.com?api_key=4b12245e29bfe1d452c932337876fc9e&url=https://www.aliexpress.com"
# url = "http://api.scraperapi.com?api_key=4b12245e29bfe1d452c932337876fc9e&url=https://www.aliexpress.com/wholesale?catId=0&initiative_id=SB_20201129104137&SearchText=Men+s+Fashion"
driver = webdriver.Chrome(executable_path=browser)
# Input from user
find = input("input a category : ")
# get url
driver.get(url)
List_page_window = driver.current_window_handle
# Download Images and save in sub DIR
def download_img(url , address):
name = random.randrange(1,1000)
global full_name
try:
full_name = str(address)
full_name += str(name) + '.jpg'
urllib.request.urlretrieve(url,full_name)
print('\t Your image is downloaded and saved.')
except:
full_name = None
# function for directrys make two directory one root_directory and one sub directory with name category
def directry():
global photospath
root_dir = 'Ali express- ' + str(find)
os.mkdir(root_dir)
os.chdir(root_dir)
# change root dir to sub dir
os.mkdir(find)
os.chdir(find)
sub_dir = os.getcwd()
photospath = sub_dir + direc
# call directory function and one step back directory
directry()
os.getcwd()
os.chdir("..")
os.getcwd()
# Send Category for search
InputField = driver.find_element_by_name('SearchText')
InputField.send_keys(find)
InputField.submit()
driver.minimize_window()
time.sleep(3)
# function for scroll of page
def Scroll():
driver.execute_script("window.scroll(500,1500)")
time.sleep(4)
driver.execute_script("window.scroll(1500,2500)")
time.sleep(4)
driver.execute_script("window.scroll(2500,3500)")
time.sleep(4)
driver.execute_script("window.scroll(2500,4500)")
time.sleep(4)
driver.execute_script("window.scroll(4500,5200)")
time.sleep(4)
# Scroll()
# # get Product Linkshoe
# cats = driver.find_elements_by_xpath('//*[@class="item-title"]')
driver.execute_script("window.scroll(4000,5000)")
Getpage = driver.find_elements_by_xpath('//*[@class="next-pagination-list"]/button')
for pageNo in range(len(Getpage)):
key = 0
print("\n\n \t Scraper Start...wait a few mint >> PageNo : ", pageNo)
if pageNo >= 0:
# Empty dictionary key of dicsho
D = {}
Scroll()
# # get Product Link
cats = driver.find_elements_by_xpath('//*[@class="item-title"]')
# Start of for loop -----------------------
for c in cats:
try: # Main try-cat
cat = c.get_attribute('href')
driver.execute_script("window.open('" + cat +"');")
detail_page_window = driver.window_handles
new_window = [x for x in detail_page_window if x != List_page_window][0]
driver.switch_to.window(new_window)
time.sleep(2)
#driver1 = webdriver.Chrome(browser)
# driver.execute_script("window.open('" + my_href +"');")
try: # 1 try-title
Title = driver.find_element_by_xpath('//*[@id="root"]/div/div/div/div/div/h1').text
except: #1 except -title
Title = None
try: # 2 try-price
price = driver.find_element_by_xpath('//*[@class="product-price-current"]/span').text
except: # 2 except -price
price = None
try: # 3 try-rating
rating = driver.find_element_by_xpath('//*[@class="product-reviewer-reviews black-link"]').text
except: # 3 except -rating
rating = None
try: # 4 try-image
# Img = driver1.find_element_by_xpath('//*[@class="magnifier-image"]')
Img = driver.find_elements_by_xpath('//*[@class="images-view-item"]/img')
for photo in Img:
image = photo.get_attribute('src')
download_img(image, photospath)
# print("\t {}\n {}\n {}\n {}\n {}\n ".format(Title, price, rating, full_name,cat))
D[key] = [Title,price,rating,full_name,cat]
except: # 4 except -image
image = None
except IndexError as e: #except -Main
print(e)
# os.chdir(os.path.dirname(Main))
time.sleep(2)
key +=1
df = pd.DataFrame.from_dict(D,orient='index',columns=['Title','Price','rating','Image Path','Product URL']).drop_duplicates(keep=False)
name = find + '.csv'
if os.path.isfile(name):
df.drop_duplicates(keep=False).to_csv(name,mode='w',header=True, index=True)
else:
df.drop_duplicates(keep=False).to_csv(name,mode='w', header=True,index=True)
print("\t Get Detail NO {} from , Page No << {} \n".format(key,pageNo))
driver.close()
driver.switch_to.window(List_page_window)
# end of for loop-------------------------
print("\t Completed Pages Are : ",pageNo)
driver.execute_script("window.scroll(4000,5000)")
time.sleep(2)
driver.find_element_by_xpath('//*[@class="next-btn next-medium next-btn-normal next-pagination-item next-next"]').click()
|
[
"zk40003@gmail.com"
] |
zk40003@gmail.com
|
31cacf2f4017c05027b72b80151b2a4900c83bcb
|
b223a4176da6e9510175929ac049d1990b3cbedf
|
/blogs/blog/templatetags/blog_tags.py
|
bac880b5161099028b05fcf73a7a7d422b8ce2d0
|
[] |
no_license
|
DewangYang/Blogs
|
3ff81ceedcf31a458629a1de8d94a10feca1934b
|
cc15a6d3da376e49cee2babc9bf9ca75fceecc31
|
refs/heads/master
| 2020-04-09T21:22:39.743400
| 2018-12-06T01:38:01
| 2018-12-06T01:38:01
| 160,598,869
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
from django import template
from django.db.models.aggregates import Count
from ..models import Post, Category, Tag
register = template.Library()
#最新文章模板标签
@register.simple_tag
def get_recent_posts(num=5):
return Post.objects.all().order_by('-created_time')[:num]
#归档模板标签
@register.simple_tag
def archives():
return Post.objects.dates('created_time', 'month', order='DESC')
#分类模板标签
@register.simple_tag
def get_categories():
# 记得在顶部引入 count 函数
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
@register.simple_tag
def get_tags():
# 记得在顶部引入 Tag model
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
|
[
"1697755202@qq.com"
] |
1697755202@qq.com
|
76b217501082a69184f849b87695f10f5261347c
|
3d68b35626311dacee3bb1b1b830d430c7d33a4e
|
/Code-part1/plot/gainloss_plot.py
|
ef10191cb357af28edef409fdeee6e05db530828
|
[] |
no_license
|
Jal-ghamdi/INFS7410
|
b6709e8137357293fa644d9033343437424a22e5
|
01ac6fb34a440db0b4e5c54f3b292c24d7aa9eda
|
refs/heads/main
| 2023-06-04T18:44:57.416078
| 2021-06-19T06:53:15
| 2021-06-19T06:53:15
| 378,132,194
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
import sys
import matplotlib.pyplot as plt
import numpy as np
from trectools import TrecRes
if __name__ == "__main__":
if len(sys.argv) < 5:
print("not enough arguments specified")
sys.exit(1)
fname1 = sys.argv[1]
fname2 = sys.argv[2]
measure = sys.argv[3]
output = sys.argv[4]
ev1 = TrecRes(fname1)
ev2 = TrecRes(fname2)
r1 = ev1.get_results_for_metric(measure)
r2 = ev2.get_results_for_metric(measure)
ind = np.arange(len(r1))
# HINT: https://docs.scipy.org/doc/numpy/reference/generated/numpy.subtract.html
plt.bar(ind,np.subtract(list(r1.values()),list(r2.values())))
plt.xticks(ind, list(r1.keys()), rotation="vertical")
plt.ylim(-1, 1)
plt.title("2018 Testing Title Queries using KLI at 0.5 - BM25 and TF_IDF")
plt.ylabel(measure)
plt.tight_layout()
plt.savefig(output)
|
[
"ijawaher.a.al.ghamdi@gmail.com"
] |
ijawaher.a.al.ghamdi@gmail.com
|
b5a6a0a6412e193f6feab24d96c611f4ca9a46d9
|
4738ab5811476237576fa5cabce548c50940e8a7
|
/day 9/untitled1.py
|
c20f422b24a215fef9f4401d040464cb654b9065
|
[] |
no_license
|
vijayshersiya/pythoncodes
|
0b1d9b4839b2ff495358d53fe374dfdce3180f78
|
38c70d73e28d388a9a4dda4a6492ce5880c1d198
|
refs/heads/master
| 2020-05-24T07:16:23.929640
| 2019-05-17T07:26:36
| 2019-05-17T07:26:36
| 187,155,984
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 11:43:01 2019
@author: Vijay
"""
"""
Code Challenge 2
Perform similar steps as in the above code challenge but store the contents in
an online mongo atlas database
"""
import pymongo
#import dns # required for connecting with SRV
#client = pymongo.MongoClient("mongodb://K_Vaid:123chandu30%26@cluster0-shard-00-00-tofyu.mongodb.net:27017,cluster0-shard-00-01-tofyu.mongodb.net:27017,cluster0-shard-00-02-tofyu.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true")
client = pymongo.MongoClient("mongodb://vijay1997:vijay%401997@vijay-shard-00-00-obffw.gcp.mongodb.net:27017,vijay-shard-00-01-obffw.gcp.mongodb.net:27017,vijay-shard-00-02-obffw.gcp.mongodb.net:27017/test?ssl=true&replicaSet=vijay-shard-0&authSource=admin&retryWrites=true")
mydb = client.vijayshersiyadb
def add_stu(student_name,student_age,student_roll_no,student_branch):
#unique_employee = mydb.employees.find_one({"id":idd})
#if unique_employee:
# return "Employee already exists"
#else:
mydb.vijay.insert_one(
{
"student_name" :student_name ,
"student_age" :student_age,
"student_roll_no" : student_roll_no,
"student_branch " : student_branch
})
return "stu added successfully"
def fetch_all_stu():
user = mydb.vijay.find()
for i in user:
print (i)
add_stu ('vijay',22, 45, 'cs')
add_stu ('mohit',21, 46, 'cs')
add_stu ('dig',23, 47, 'cs')
add_stu ('viku',24, 48, 'cs')
add_stu ('anupam',22, 49, 'cs')
fetch_all_stu()
|
[
"vijayshersiya1997@gmail.com"
] |
vijayshersiya1997@gmail.com
|
9828e3bacaa6964bc3c123c316e9e504fce3dc5a
|
674f5dde693f1a60e4480e5b66fba8f24a9cb95d
|
/tests/armv6_tests/opcode_tests/test_mul.py
|
6858af0a7cb71951d5fbb8f90d2ebe8a80622ddd
|
[
"MIT"
] |
permissive
|
matan1008/armulator
|
75211c18ebc9cd9d33a02890e76fc649483c3aad
|
44f4275ab1cafff3cf7a1b760bff7f139dfffb07
|
refs/heads/master
| 2023-08-17T14:40:52.793120
| 2023-08-08T04:57:02
| 2023-08-08T04:57:02
| 91,716,042
| 29
| 7
|
MIT
| 2023-08-08T04:55:59
| 2017-05-18T16:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
from armulator.armv6.opcodes.concrete.mul_a1 import MulA1
from armulator.armv6.opcodes.concrete.mul_t1 import MulT1
from armulator.armv6.opcodes.concrete.mul_t2 import MulT2
def test_mul_t1(thumb_v6_without_fetch):
arm = thumb_v6_without_fetch
arm.opcode = 0b0100001101000001
arm.opcode_len = 16
opcode = arm.decode_instruction(arm.opcode)
opcode = opcode.from_bitarray(arm.opcode, arm)
assert type(opcode) == MulT1
assert opcode.instruction == arm.opcode
assert opcode.m == 1
assert opcode.d == 1
assert opcode.n == 0
assert opcode.setflags
arm.registers.set(0, 5)
arm.registers.set(1, 4)
arm.emulate_cycle()
assert arm.registers.get(1) == 20
assert arm.registers.cpsr.n == 0
assert arm.registers.cpsr.z == 0
assert arm.registers.cpsr.c == 0
assert arm.registers.cpsr.v == 0
def test_mul_t2(thumb_v6_without_fetch):
arm = thumb_v6_without_fetch
arm.opcode = 0b11111011000000001111001000000001
arm.opcode_len = 32
opcode = arm.decode_instruction(arm.opcode)
opcode = opcode.from_bitarray(arm.opcode, arm)
assert isinstance(opcode, MulT2)
assert opcode.setflags is False
assert opcode.n == 0
assert opcode.m == 1
assert opcode.d == 2
arm.registers.set(opcode.n, 0x00000004)
arm.registers.set(opcode.m, 0x00000002)
arm.emulate_cycle()
assert arm.registers.get(opcode.d) == 0x00000008
assert arm.registers.cpsr.n == 0
assert arm.registers.cpsr.z == 0
assert arm.registers.cpsr.c == 0
assert arm.registers.cpsr.v == 0
def test_mul_a1(arm_v6_without_fetch):
arm = arm_v6_without_fetch
arm.opcode = 0b11100000000100100000000010010001
arm.opcode_len = 32
opcode = arm.decode_instruction(arm.opcode)
opcode = opcode.from_bitarray(arm.opcode, arm)
assert isinstance(opcode, MulA1)
assert opcode.setflags
assert opcode.n == 1
assert opcode.m == 0
assert opcode.d == 2
arm.registers.set(opcode.n, 0x00000004)
arm.registers.set(opcode.m, 0x00000002)
arm.emulate_cycle()
assert arm.registers.get(opcode.d) == 0x00000008
assert arm.registers.cpsr.n == 0
assert arm.registers.cpsr.z == 0
assert arm.registers.cpsr.c == 0
assert arm.registers.cpsr.v == 0
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
18798f0266954d7f7e163c351ce49b0a4761e852
|
5f34c24225991f6332d73fd57e74e0f4145499bf
|
/pyarubaoss/acls.py
|
c1cfc9163b31518747163951411fe595b86b05b7
|
[
"Apache-2.0"
] |
permissive
|
RouteNotTaken/pyarubaoss
|
9b09c9901e374e59939cf894f03fb68bdbf801c4
|
14f8e132f3ab17d109bf724568c2b498e8d35cc9
|
refs/heads/master
| 2021-08-20T09:09:50.648826
| 2017-11-28T18:25:31
| 2017-11-28T18:25:31
| 104,519,191
| 0
| 0
| null | 2017-09-22T21:12:48
| 2017-09-22T21:12:47
| null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, json
def get_acls(auth):
url = 'http://{}/rest/{}/acls'.format(auth.ipaddr, auth.version)
try:
r = requests.get(url, headers = auth.cookie)
acls = json.loads(r.text)['acl_element']
return acls
except requests.exceptions.RequestException as error:
return 'Error:\n' + str(error) + ' get_acls: An Error has occured'
def get_acl_rules(auth, acl_id=None):
if acl_id:
url = 'http://{}/rest/{}/acls/{}/rules'.format(auth.ipaddr, auth.version, acl_id)
else:
url = 'http://{}/rest/{}/acls/rules'.format(auth.ipaddr, auth.version)
try:
r = requests.get(url, headers = auth.cookie)
acl_rules = json.loads(r.text)['acl_rule_element']
return acl_rules
except requests.exceptions.RequestException as error:
return 'Error:\n' + str(error) + ' get_acl_rules: An Error has occured'
|
[
"nrhernandez@scoe.net"
] |
nrhernandez@scoe.net
|
044706e36d879630d9396ab0241eca736624048f
|
48af945e9f8933d1cdddb3d99e6656ddf0285cca
|
/new/feed/urls.py
|
92cd4e032d4cdb3e9c058debbdb26d2729c54188
|
[] |
no_license
|
kumarprafful/Website
|
324860aeb933279098564e753efc5d831b0ba0d8
|
40ab08c8378b888eb7b179b3ed1adba8d138e593
|
refs/heads/master
| 2021-01-22T18:23:32.140256
| 2017-03-15T11:22:05
| 2017-03-15T11:22:05
| 85,077,872
| 3
| 0
| null | 2017-03-15T13:48:52
| 2017-03-15T13:48:52
| null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from django.conf.urls import url
from . import views
app_name = 'feed'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
#url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='read'),
url(r'^(?P<pk>[0-9]+)/$', views.story_detail, name='read'),
url(r'^(?P<pk>[0-9]+)/like/$', views.story_like, name='like'),
url(r'publish/$', views.AddStory.as_view(), name='story-publish'),
url(r'^(?P<pk>[0-9]+)/edit/$', views.StoryUpdate.as_view(), name='story-edit'),
url(r'^(?P<pk>[0-9]+)/delete/$', views.StoryDelete.as_view(), name='story-delete'),
#comment
#url(r'^(?P<pk>[0-9]+)/$', views.comment, name='comment'),
url(r'^about/$', views.about, name='about'),
url(r'^profile/$', views.profile, name='profile'),
]
|
[
"pal.vishal41@gmail.com"
] |
pal.vishal41@gmail.com
|
ee2e5f8bb3bbdb0ceadcb8b720066148e1e4cace
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5766201229705216_0/Python/tripodduan/p2.py
|
0870e361c9dc6c9ade8a7942a368c04a11fba496
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
#p2
import sys
def test(edges, root, notin):
outedges = filter(lambda x: x != notin, edges[root])
if len(outedges) < 2:
return 1
results = []
for edge in outedges:
results += [test(edges, edge, root)]
results.sort()
return 1 + results[-1] + results[-2]
lines = sys.stdin.readlines()
t = int(lines[0])
counter = 1
for ncase in range(t):
n = int(lines[counter])
counter += 1
edges = [[] for i in range(n)]
results = []
for a in range(n-1):
data = map(lambda x: int(x) - 1, lines[counter].split())
counter+=1
edges[data[0]] += [data[1]]
edges[data[1]] += [data[0]]
for i in range(n):
results += [test(edges, i, n)]
print "Case #%d:"%(ncase+1), n-max(results)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
21dacff46e3742d8562593d6ea73dcae3f477040
|
3f11e22b7bdf8a45345776f7bc2cfd2e6a9d1bf7
|
/src/data/preprocessing.py
|
829f6fe8d6eb71c21ff022dec0a38aa8e10964a8
|
[
"MIT"
] |
permissive
|
wuyukun888/ocsvm
|
4beec777a4c362097b4f6d08ce0a565bcdc8e8d3
|
fcb7588782cf671baa670616bbeb1b4cde052a9e
|
refs/heads/master
| 2022-11-04T15:57:33.359848
| 2020-06-21T09:07:20
| 2020-06-21T09:07:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,998
|
py
|
import numpy as np
from sklearn.decomposition import MiniBatchDictionaryLearning, PCA
from sklearn.feature_extraction.image import PatchExtractor
from PIL import Image
def center_data(X_train, X_val, X_test,
mode, offset=None):
""" center images per channel or per pixel
"""
if offset is None:
if mode == "per channel":
n_channels = np.shape(X_train)[1]
offset = np.mean(X_train, axis=(0, 2, 3)).reshape(1, n_channels, 1, 1)
elif mode == "per pixel":
offset = np.mean(X_train, 0)
else:
raise ValueError("Specify mode of centering "
"(should be 'per channel' or 'per pixel')")
X_train -= offset
X_val -= offset
X_test -= offset
def normalize_data(X_train, X_val, X_test,
mode="per channel", scale=None):
""" normalize images per channel, per pixel or with a fixed value
"""
if scale is None:
if mode == "per channel":
n_channels = np.shape(X_train)[3]
scale = np.std(X_train, axis=(0, 2, 1)).reshape(1, n_channels, 1, 1)
elif mode == "per pixel":
scale = np.std(X_train, 0)
elif mode == "fixed value":
scale = 255.
else:
raise ValueError("Specify mode of scaling (should be "
"'per channel', 'per pixel' or 'fixed value')")
X_train /= scale
X_val /= scale
X_test /= scale
def rescale_to_unit_interval(X_train, X_val, X_test):
"""
Scaling all data to [0,1] w.r.t. the min and max in the train data is very
important for networks without bias units. (data close to zero would
otherwise not be recovered)
"""
X_train = np.array(X_train, dtype=np.float32)
X_val = np.array(X_val, dtype=np.float32)
X_test = np.array(X_test, dtype=np.float32)
X_train_min = np.min(X_train)
X_train_max = np.max(X_train)
# X_train_min = np.array(X_train_min, dtype=np.float32)
# X_train_max = np.array(X_train_max, dtype=np.float32)
X_train -= X_train_min
X_val -= X_train_min
X_test -= X_train_min
X_train /= (X_train_max - X_train_min)
X_val /= (X_train_max - X_train_min)
X_test /= (X_train_max - X_train_min)
def global_contrast_normalization(X_train, X_val, X_test, scale="std"):
"""
Subtract mean across features (pixels) and normalize by scale, which is
either the standard deviation, l1- or l2-norm across features (pixel).
That is, normalization for each sample (image) globally across features.
"""
assert scale in ("std", "l1", "l2")
na = np.newaxis
X_train_mean = np.mean(X_train, axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_val_mean = np.mean(X_val, axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_test_mean = np.mean(X_test, axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_train = np.array(X_train, dtype=np.float32)
X_val = np.array(X_val, dtype=np.float32)
X_test = np.array(X_test, dtype=np.float32)
X_train -= X_train_mean
X_val -= X_val_mean
X_test -= X_test_mean
if scale == "std":
X_train_scale = np.std(X_train, axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_val_scale = np.std(X_val, axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_test_scale = np.std(X_test, axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
if scale == "l1":
X_train_scale = np.sum(np.absolute(X_train), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_val_scale = np.sum(np.absolute(X_val), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_test_scale = np.sum(np.absolute(X_test), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
if scale == "l2":
# equivalent to "std" since mean is subtracted beforehand
X_train_scale = np.sqrt(np.sum(X_train ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
X_val_scale = np.sqrt(np.sum(X_val ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
X_test_scale = np.sqrt(np.sum(X_test ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
X_train /= X_train_scale
X_val /= X_val_scale
X_test /= X_test_scale
return [ X_train, X_val, X_test ]
def zca_whitening(X_train, X_val, X_test, eps=0.1):
"""
Apply ZCA whitening. Epsilon parameter eps prevents division by zero.
"""
# get shape to later reshape data to original format
shape_train = X_train.shape
shape_val = X_val.shape
shape_test = X_test.shape
if X_train.ndim > 2:
X_train = X_train.reshape(shape_train[0], np.prod(shape_train[1:]))
X_val = X_val.reshape(shape_val[0], np.prod(shape_val[1:]))
X_test = X_test.reshape(shape_test[0], np.prod(shape_test[1:]))
# center data
means = np.mean(X_train, axis=0)
X_train -= means
X_val -= means
X_test -= means
# correlation matrix
sigma = np.dot(X_train.T, X_train) / shape_train[0]
# SVD
U,S,V = np.linalg.svd(sigma)
# ZCA Whitening matrix
ZCAMatrix = np.dot(U, np.dot(np.diag(1.0 / np.sqrt(S + eps)), U.T))
# Whiten
X_train = np.dot(X_train, ZCAMatrix.T)
X_val = np.dot(X_val, ZCAMatrix.T)
X_test = np.dot(X_test, ZCAMatrix.T)
# reshape to original shape
X_train = X_train.reshape(shape_train)
X_val = X_val.reshape(shape_val)
X_test = X_test.reshape(shape_test)
return X_train, X_val, X_test
def make_unit_norm(X_train, X_val, X_test, norm="l2"):
"""
Normalize each image/tensor to length 1 w.r.t. to the selected norm
"""
assert norm in ("l1", "l2")
na = np.newaxis
if norm == "l2":
X_train_norms = np.sqrt(np.sum(X_train ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
X_val_norms = np.sqrt(np.sum(X_val ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
X_test_norms = np.sqrt(np.sum(X_test ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
if norm == "l1":
X_train_norms = np.sum(np.absolute(X_train), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_val_norms = np.sum(np.absolute(X_val), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_test_norms = np.sum(np.absolute(X_test), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
X_train /= X_train_norms
X_val /= X_val_norms
X_test /= X_test_norms
def pca(X_train, X_val, X_test, var_retained=0.95):
"""
PCA such that var_retained of variance is retained (w.r.t. train set)
"""
print("Applying PCA...")
# reshape to 2D if input is tensor
if X_train.ndim > 2:
X_train = X_train.reshape(X_train.shape[0], -1)
if X_val.size > 0:
X_val = X_val.reshape(X_val.shape[0], -1)
if X_test.size > 0:
X_test = X_test.reshape(X_test.shape[0], -1)
pca = PCA(n_components=var_retained)
pca.fit(X_train)
X_train = pca.transform(X_train)
if X_val.size > 0:
X_val = pca.transform(X_val)
if X_test.size > 0:
X_test = pca.transform(X_test)
print("PCA pre-processing finished.")
return X_train, X_val, X_test
def crop_to_square(image):
"""
crops an image (n_channels, height, width) to have square size
with center as in original image
"""
h, w = image[0, ...].shape
min_len = min(h, w)
h_start = (h / 2) - (min_len / 2)
h_end = (h / 2) + (min_len / 2)
w_start = (w / 2) - (min_len / 2)
w_end = (w / 2) + (min_len / 2)
return image[:, h_start:h_end, w_start:w_end]
def downscale(image, pixels=64):
"""
downscale image (n_channels, height, width) by factor
"""
img = Image.fromarray(np.rollaxis(image, 0, 3))
return np.rollaxis(np.array(img.resize(size=(pixels, pixels))), 2)
def gcn(X, scale="std"):
"""
Subtract mean across features (pixels) and normalize by scale, which is
either the standard deviation, l1- or l2-norm across features (pixel).
That is, normalization for each sample (image) globally across features.
"""
assert scale in ("std", "l1", "l2")
na = np.newaxis
X_mean = np.mean(X, axis=(1, 2, 3), dtype=np.float32)[:, na, na, na]
X -= X_mean
if scale == "std":
X_scale = np.std(X, axis=(1, 2, 3), dtype=np.float32)[:, na, na, na]
if scale == "l1":
X_scale = np.sum(np.absolute(X), axis=(1, 2, 3),
dtype=np.float32)[:, na, na, na]
if scale == "l2":
# equivalent to "std" since mean is subtracted beforehand
X_scale = np.sqrt(np.sum(X ** 2, axis=(1, 2, 3),
dtype=np.float32))[:, na, na, na]
X /= X_scale
def extract_norm_and_out(X, y, normal, outlier):
'''
:param X: numpy array with data features
:param y: numpy array with labels
:param normal: list with labels declared normal
:param outlier: list with labels declared outliers
:return: X_normal, X_outlier, y_normal, y_outlier
'''
# Reshape to fit the new architecture of autoencoder
X = np.reshape(X,(len(X),X.shape[3],X.shape[2],X.shape[1]))
y = np.reshape(y,(len(y)))
# print("[INFO:] THe shape of X is ",X.shape)
# print("[INFO:] THe shape of y is ", y.shape)
# print(y[0:100])
idx_normal = np.any(y[..., None] == np.array(normal)[None, ...], axis=1)
idx_outlier = np.any(y[..., None] == np.array(outlier)[None, ...], axis=1)
print("[INFO] : The idx_normal is: ", idx_normal)
print("[INFO] : The idx_outlier is: ", idx_outlier)
X_normal = X[idx_normal]
y_normal = np.zeros(np.sum(idx_normal), dtype=np.uint8)
# print("[INFO] : The shape of X is: ", X.shape)
# print("[INFO] : The shape of y is: ", y.shape)
X_outlier = X[idx_outlier]
# y_outlier = -1*np.ones(np.sum(idx_outlier), dtype=np.uint8)
y_outlier = np.ones(np.sum(idx_outlier), dtype=np.uint8)
# print("[INFO] : The shape of X_normal is: ", X_normal.shape)
# print("[INFO] : The shape of X_outlier is: ", X_outlier.shape)
return X_normal, X_outlier, y_normal, y_outlier
def learn_dictionary(X, n_filters, filter_size, n_sample=1000,
n_sample_patches=0, **kwargs):
"""
learn a dictionary of n_filters atoms from n_sample images from X
"""
print("[ INFO ] : The shape of Xtrain is ",X.shape)
n_channels = X.shape[3]
# subsample n_sample images randomly
rand_idx = np.random.choice(len(X), n_sample, replace=False)
# extract patches
patch_size = (filter_size, filter_size)
patches = PatchExtractor(patch_size).transform(
X[rand_idx, ...].reshape(n_sample, X.shape[1], X.shape[2], X.shape[3]))
patches = patches.reshape(patches.shape[0], -1)
patches -= np.mean(patches, axis=0)
patches /= np.std(patches, axis=0)
if n_sample_patches > 0 and (n_sample_patches < len(patches)):
np.random.shuffle(patches)
patches = patches[:n_sample_patches, ...]
# learn dictionary
print('Learning dictionary for weight initialization...')
dico = MiniBatchDictionaryLearning(n_components=n_filters, alpha=1, n_iter=1000, batch_size=10, shuffle=True,
verbose=True, **kwargs)
W = dico.fit(patches).components_
W = W.reshape(n_filters, filter_size, filter_size,n_channels)
print('Dictionary learned.')
return W.astype(np.float32)
|
[
"thinhbka.nguyenvan@gmail.com"
] |
thinhbka.nguyenvan@gmail.com
|
ba8dca8825a048583c7978ad04a9c927dee30dd4
|
93f4df46e9ddd5e4aebac862256805c7c6a2290f
|
/Orientation/rep1ace.py
|
8cac5e91acecf2aaa10f74b230cbf6d4fcc515a1
|
[] |
no_license
|
heraldia/ADLR-Python
|
038de9af4c625f738e0e08e22efb9fb0b9a7b924
|
9013e2ac820ba84d570f47eab395f6ced54d1e4d
|
refs/heads/master
| 2020-12-22T13:01:46.659905
| 2016-11-02T13:04:50
| 2016-11-02T13:04:50
| 34,348,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
import re
f1=open('activityList1.txt','r').read()
f1 = re.sub('Cooking chopping','Chopping',f1)
f_w=open('activityList.txt','wb')
f_w.write(f1)
f_w.close()
|
[
"wlmqxx@gmail.com"
] |
wlmqxx@gmail.com
|
3c57e140f888ce19cc88bbd58b0eeae119b722af
|
ab06098bb8ecab6f0e4dcbc78e952a8bed090342
|
/exercise5.py
|
65872a0be5d1602f2f2e7826496c645c160aeded
|
[] |
no_license
|
divyaamin9825/Learn-Python-the-Hard-Way-Code
|
c889f4ba0b30d07133162ea00f288456e0685ca4
|
f2381e417a30ba5aa906c807eda2022c06c5021a
|
refs/heads/master
| 2021-04-04T05:00:49.954538
| 2020-03-23T04:03:39
| 2020-03-23T04:03:39
| 248,426,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
name = 'Zed A. Shaw'
age = 35 # not a lie
height = 74 # inches
height_cm = height*2.54
weight = 180 # lbs
weight_kg = weight/2-weight/20
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print(f"Let's talk about {name}.")
print(f"He's {height} inches tall and {height_cm} cm tall.")
print(f"He's {weight} lbs heavy and {weight_kg} kgs heavy.")
print("Actually that's not too heavy.")
print(f"He's got {eyes} eyes and {hair} hair.")
print(f"His teeth are usually {teeth} depending on the coffee.")
# This line is tricky, try to get it exactly right
total = age + height + weight
print(f"If I add {age}, {height}, and {weight} I get {total}.")
print(round(1.7333))
|
[
"noreply@github.com"
] |
divyaamin9825.noreply@github.com
|
6c1111afd168fa48bd9f06b72a8a04d163020534
|
1dc0be5908067202eddbd4c071695bdd6125132e
|
/server/Classification_Standard.py
|
a1ea1037c81b5732506cae39513c8e8be0d3209c
|
[] |
no_license
|
onlywant/NIDS
|
ffe992a340284703c71c7a71b954e9ff2f1db18a
|
05ae34dd7609383603a6d59687b99fdd430889db
|
refs/heads/master
| 2023-02-15T03:36:13.845107
| 2021-01-08T08:21:20
| 2021-01-08T08:21:20
| 327,822,813
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,834
|
py
|
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pandas as pd
from time import sleep
from queue import Queue
import multiprocessing
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from logging import basicConfig,INFO
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Feature_DB import Fea_test, Fea_train, Extra_train
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
# from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=INFO)
class Classification:
'''
1. init -> get_category_from_feature
2. init -> -train_dbow ->test
-train_dm
'''
def __init__(self):
self.sizeDbow = 50
self.sizeDm = 16
self.circle = 5
self.epoch = 4
self.rate = 0.5
self.derate = 0.0025
self.success = None
self.addNew = False
self.docTrain_Tagged = None
self.docTest_Tagged = None
self.vec_token_dbow = None
self.mid_model_dbow = None
self.modelDbow = None
self.list_flag = ['normal', 'dos', 'probe']
# self.list_flag = ['normal.', 'err.']
############加载模型
self.load_model_dbow()
print(self.modelDbow)
self.set_token_dbow(self.modelDbow)
print('classification init finished')
# 从特征获得分类
def get_category_from_feature(self, fea):
# TODO
# 1. fea to taggled sentence
# 2. get vec of sentence
# 3.
sents = fea
for i in range(len(sents)):
sents[i] = str(sents[i])
vec = self.modelDbow.infer_vector(sents, steps=20)
cur = -1
maxdis = -1
for i in range(3):
dis = float(
np.dot(vec, self.vec_token_dbow[i]) / (np.linalg.norm(vec) * np.linalg.norm(self.vec_token_dbow[i])))
if maxdis < dis:
maxdis = dis
cur = i
return self.list_flag[cur]
# 设置训练参数
def set_para(self, sizeDbow, circle, epoch, rate, derate, addNew):
self.sizeDbow = sizeDbow
self.circle = circle
self.epoch = epoch
self.rate = rate
self.derate = derate
self.addNew = addNew
# 通过模型设置标签向量
def set_token_dbow(self, model):
self.vec_token_dbow = []
for i in range(len(self.list_flag)):
self.vec_token_dbow.append(model.docvecs[self.list_flag[i]])
def set_token_dm(self):
self.vec_token_dm = []
for i in range(len(self.list_flag)):
self.vec_token_dm.append(self.modelDm.docvecs[self.list_flag[i]])
# 加载模型
def load_model_dbow(self):
self.modelDbow = Doc2Vec.load(
'./model/docmodel__dbow__standard__cos_and_logis.model')
def load_model_dm(self):
self.modelDm = Doc2Vec.load(
'./model/docmodel__dm__standard__cos_and_logis.model')
# 保存中间模型并重新加载
def save_model_from_mid(self):
self.modelDbow = Doc2Vec.load(
'./model/docmodel__mid__dbow__standard__cos_and_logis.model')
self.modelDbow.save(
'./model/docmodel__dbow__standard__cos_and_logis.model')
self.set_token_dbow(self.modelDbow)
print('cover finish')
''' 数据预处理 '''
def data_preprocess(self):
# csvDataTrain = pd.read_csv('./data/nsl-train.csv') # 读取训练数据
# arrCsvTrain = np.array(csvDataTrain)
# csvDataTest = pd.read_csv('./data/nsl-test.csv') # 读取测试数据
# arrCsvTest = np.array(csvDataTest)
# docTrain = arrCsvTrain[:, :-1]
# tagTrain = arrCsvTrain[:, -1]
# docTest = arrCsvTest[:, :-1]
# tagTest = arrCsvTest[:, -1]
# docTrain_Tagged = []
# docTest_Tagged = []
#
# for i in range(len(docTrain)):
# docTrain_Tagged.append(
# TaggedDocument(' '.join(docTrain[i]).split(' '), [str(tagTrain[i])]))
# for i in range(len(docTest)):
# docTest_Tagged.append(TaggedDocument(' '.join(docTest[i]).split(' '), [str(tagTest[i])]))
# self.docTrain_Tagged = docTrain_Tagged
# self.docTest_Tagged = docTest_Tagged
engine = create_engine('sqlite:///./data/Fea.db')
DBSession = sessionmaker(bind=engine)
# 创建session对象:
session = DBSession()
train_data = session.query(Fea_train).all()
test_data = session.query(Fea_test).all()
extra_data = session.query(Extra_train).all()
session.close()
docTrain_Tagged = []
docTest_Tagged = []
for i in range(len(train_data)):
docTrain_Tagged.append(
TaggedDocument(train_data[i].feature.split(' '), [train_data[i].classification]))
if self.addNew:
for i in range(len(extra_data)):
docTrain_Tagged.append(
TaggedDocument(train_data[i].feature.split(' '), [train_data[i].classification]))
for i in range(len(test_data)):
docTest_Tagged.append(
TaggedDocument(test_data[i].feature.split(' '), [test_data[i].classification]))
print(docTest_Tagged[0])
self.docTrain_Tagged = docTrain_Tagged
self.docTest_Tagged = docTest_Tagged
''' 获得向量函数 '''
def get_vector_for_learning(self, model, tagged_docs):
sents = tagged_docs
targets, regressors = zip(*[(doc.tags[0], model.infer_vector(doc.words, steps=20)) for doc in sents])
return targets, regressors
''' 余弦相似度预测函数 '''
def calculate_cos_sim(self, X_test, y_test, buffer_p=None):
y_pred_cos = []
for i in range(len(X_test)):
maxdis = -1
cur = -1
for j in range(len(self.list_flag)):
dis = float(np.dot(X_test[i], self.vec_token_dbow[j]) / (
np.linalg.norm(X_test[i]) * np.linalg.norm(self.vec_token_dbow[j])))
if maxdis < dis:
maxdis = dis
cur = j
y_pred_cos.append(self.list_flag[cur])
if buffer_p is not None:
buffer_p.put(90.0)
# with open('C:\\Users\\ThinkPad\\Desktop\\bishe\\test\\2.txt', 'w', newline='') as file:
# for i in range(len(y_pred_cos)):
# file.write(str(y_pred_cos[i]))
# with open('C:\\Users\\ThinkPad\\Desktop\\bishe\\test\\3.txt', 'w', newline='') as file:
# for i in range(len(y_test)):
# file.write(str(y_test[i]))
# f = open('./test.txt', 'w')
num_acc = 0
num_attack = 0
num_attack_p = 0
num_normal = 0
num_p_attack = 0
for i in range(len(y_pred_cos)):
# f.write(y_pred_cos[i] + '\n')
# print(y_test[i],y_pred_cos[i])
if y_test[i] != 'normal':
num_attack += 1
if y_pred_cos[i] != 'normal':
num_attack_p += 1
else:
num_normal += 1
if y_pred_cos[i] != 'normal':
num_p_attack += 1
if y_test[i] == y_pred_cos[i]:
num_acc += 1
# f.close()
if buffer_p is not None:
buffer_p.put(100.0)
sleep(1)
if buffer_p is not None:
buffer_p.put(None)
buffer_p.put("余弦相似度正确率为:"+str(num_acc / len(y_pred_cos)) +
" 检测率(DR):"+str( num_attack_p / num_attack) +
" 误报率(DR):"+str( num_p_attack / num_normal))
# print("余弦相似度正确率为:{}".format(num_acc / len(y_pred_cos)) + \
# "检测率(DR):{}".format( num_attack_p / num_attack) + \
# "误报率(DR):{}".format( num_p_attack / num_normal))
''' logistics回归预测函数 '''
def calculate_logist_sim(X_train, y_train, X_test, y_test):
logreg = LogisticRegression(n_jobs=1, C=1e5)
logreg.fit(X_train, y_train)
y_pred_logis = logreg.predict(X_test)
# with open('../model/test/4.txt', 'w', newline='') as file:
# for i in range(len(y_pred_logis)):
# file.write(str(y_pred_logis[i] + ' ' + str(y_test[i]) + '\n'))
print('逻辑回归Testing accuracy :%s' % accuracy_score(y_test, y_pred_logis) + \
'逻辑回归Testing F1 score: {}'.format(f1_score(y_test, y_pred_logis, average='weighted')))
''' 训练并保存 dbow模型 '''
def train_dbow(self, buffer_p=None):
print('start dbow train')
self.data_preprocess()
if buffer_p is not None:
buffer_p.put(1 / self.circle * 100 - 1)
cores = multiprocessing.cpu_count()
self.mid_model_dbow = Doc2Vec(dm=0, min_count=1, workers=cores, vector_size=self.sizeDbow)
self.mid_model_dbow.build_vocab(self.docTrain_Tagged)
for epoch in range(self.circle):
self.mid_model_dbow.train(self.docTrain_Tagged, total_examples=len(self.docTrain_Tagged), epochs=self.epoch)
self.mid_model_dbow.alpha -= self.derate
self.mid_model_dbow.min_alpha = self.mid_model_dbow.alpha
if buffer_p is not None:
buffer_p.put(((epoch + 1) / self.circle) * 100)
sleep(1)
print("dbow train over")
self.mid_model_dbow.save(
'./model/docmodel__mid__dbow__standard__cos_and_logis.model')
self.test_model(self.mid_model_dbow, self.docTest_Tagged, buffer_p)
''' 训练并保存 dm模型 '''
def train_dm(self):
print('start train dm')
cores = multiprocessing.cpu_count()
docmodel_dm = Doc2Vec(alpha=0.025, dm_mean=None, dm=1, min_count=1, window=2, vector_size=16,
sample=1e-3, min_alpha=0.0001, epochs=10, negative=5, workers=4)
docmodel_dm.build_vocab(self.docTrain_Tagged)
docmodel_dm.train(self.docTrain_Tagged, total_examples=len(self.docTrain_Tagged), epochs=docmodel_dm.epochs)
print("dm over")
docmodel_dm.save('./model/docmodel__dm__standard__cos_and_logis.model')
''' 加载dbow模型,获得向量,预测结果,显示结果 目前74 '''
def test_model(self, model, docstest, buffer_p=None, docs=None):
print('start test model')
self.set_token_dbow(model)
if buffer_p is not None:
buffer_p.put(20.0)
# y_train, X_train = self.get_vector_for_learning(model,docs)
y_test, X_test = self.get_vector_for_learning(model, docstest)
if buffer_p is not None:
buffer_p.put(50.0)
# 余弦相似度预测
self.calculate_cos_sim(X_test, y_test, buffer_p)
# 逻辑回归预测
# y_train, X_train = self.get_vector_for_learning(model, docs)
# self.calculate_logist_sim(X_train, y_train, X_test, y_test)
''' 段向量加和求平均 和 标签向量结合 提高2%左右 '''
def CalTokenDoc(self, tags, X_train, size):
num_doc = [0] * 5
token_doc = np.zeros((5, size))
for i in range(len(tags)):
index = int(tags[i])
token_doc[index] += X_train[i]
num_doc[index] += 1
for i in range(5):
token_doc[i] = token_doc[i] / num_doc[i]
return token_doc
''' 词向量加和求平均 和 标签向量结合 提高 左右 '''
def CalTokenWord(self, tags, model, size, docTrain):
num_word = [0] * 5
token_word = np.zeros((5, size))
for i in range(len(tags)):
index = int(tags[i])
words = docTrain[i].split()
for j in range(len(words)):
token_word[index] += model.wv.get_vector(words[j])
num_word[index] += 1
for i in range(5):
token_word[i] = token_word[i] / num_word[i]
return token_word
if __name__ == '__main__':
c = Classification()
q = Queue()
c.train_dbow(q)
|
[
"1114128406@qq.com"
] |
1114128406@qq.com
|
f4a16702a4149f1c2674845ef79d67e9eede14ac
|
104b4f496df84cc74cbd1b1c97e24f5b3061e85a
|
/news/rest/serializers.py
|
eee1cf9f6e36eb802048fb07733196ddd9afef6e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
osuka/dognews-server
|
5f29f49f0200374809236f9408bdcd7b61be5102
|
0db18946b62379039791794c702f91b75ab95258
|
refs/heads/master
| 2023-03-03T00:43:33.016837
| 2023-02-04T19:47:22
| 2023-02-04T19:47:22
| 219,573,887
| 2
| 0
|
BSD-3-Clause
| 2023-02-15T20:39:40
| 2019-11-04T18:53:44
|
Python
|
UTF-8
|
Python
| false
| false
| 10,364
|
py
|
""" Django rest framework serializers for all the entities
These transform models into various representations
"""
from collections import OrderedDict
from dogauth.models import User
from typing import Any, List
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework import serializers
from rest_framework.fields import SerializerMethodField
from rest_framework.validators import UniqueValidator
from drf_spectacular.utils import (
extend_schema,
extend_schema_field,
extend_schema_serializer,
)
from drf_spectacular.types import OpenApiTypes
from dogauth import permissions
from ..models import Retrieval, Moderation, Submission, Vote
# pylint: disable=missing-class-docstring
# note on django rest framework and nulls: by default fields that are null
# in the DB are serialized as nulls. We extend the default model serializer
# not remove them
class NonNullModelSerializer(serializers.ModelSerializer):
"""Any field that has a value of null _or_ empty string in the output json
will be removed
"""
def to_representation(self, instance):
result = super().to_representation(instance)
# see discussion https://stackoverflow.com/a/45569581
return OrderedDict(
[
(key, result[key])
for key in result
if result[key] is not None and result[key] != ""
]
)
# --------------------------------------
def _username_masking_admins(user: User) -> str:
"""Helper that hides some information we don't
want to show externally about users"""
if not user or user.is_superuser:
return "admin"
return user.username
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ["id", "username", "groups"]
username = serializers.SerializerMethodField()
groups = serializers.SerializerMethodField()
def get_username(self, user: User) -> str:
# we fake an 'admin' user given to all superusers
return _username_masking_admins(user)
def get_groups(self, user: User) -> List[str]:
# we fake an 'admin' group
groups = [g.name for g in user.groups.all()]
if user.is_superuser:
return groups + ["admin"]
return groups
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ["url", "name"]
# class RatingSerializer(serializers.ModelSerializer):
# class Meta:
# model = Rating
# read_only_fields = ["user"]
# exclude = []
# --------------------------------------
class ModerationSerializer(NonNullModelSerializer):
"""A human evaluation of a submission"""
class Meta:
model = Moderation
fields = [
"url",
"target_url",
"status",
"owner",
"title",
"description",
"last_updated",
"date_created",
]
read_only_fields = [
"url",
"target_url",
"owner",
"title",
"description",
"last_updated",
"date_created",
]
# --------------------------------------
class RetrievalSerializer(NonNullModelSerializer):
"""The result of a bot retrieving the information"""
class Meta:
model = Retrieval
fields = [
"url",
"status",
"owner",
"title",
"description",
"thumbnail",
"fetched_page",
"last_updated",
"date_created",
"thumbnail_from_page",
"thumbnail_submitted",
"thumbnail_processed",
]
read_only_fields = [
"thumbnail_from_page",
"thumbnail_submitted",
"thumbnail_processed",
"url",
"owner",
"last_updated",
"date_created",
]
thumbnail = serializers.SerializerMethodField()
# https://drf-spectacular.readthedocs.io/en/latest/customization.html#step-3-extend-schema-field-and-type-hints
@extend_schema_field(
{
"type": "string",
"example": "https://s3.amazonaws.com/xxxx/env/name.png?xxx&yyy",
}
)
def get_thumbnail(self, obj: Retrieval):
"""return the most specific thumbnail available: the one parsed by the system,
the one submitted by the user or the one extracted from the page - depending on
the state of the submission."""
return (
obj.thumbnail_processed
or obj.thumbnail_submitted
or obj.thumbnail_from_page
)
class RetrievalThumbnailImageSerializer(serializers.ModelSerializer):
"""The normal serializer can be used for FormUpload but for API upload
we enable this one that can expose only the image as separate endpoint"""
class Meta:
model = Retrieval
fields = ["thumbnail_from_page", "thumbnail_submitted", "thumbnail_processed"]
def validate(self, attrs: Any) -> Any:
if not any(x in attrs for x in self.Meta.fields):
raise serializers.ValidationError(
f"One of {self.Meta.fields} must be provided as a multipart form-data"
)
return super().validate(attrs)
def save(self, *args, **kwargs):
# if self.instance.thumbnail_image:
# self.instance.thumbnail_image.delete()
return super().save(*args, **kwargs)
# --------------------------------------
class VoteSerializer(NonNullModelSerializer):
"""Votes are provided in Lists and don't link back to their
submissions once serialized"""
class Meta:
model = Vote
exclude = []
read_only_fields = [
"owner",
"date_created",
"last_updated",
]
# list of fields changes based on permissions
def get_fields(self, *args, **kwargs):
fields = super().get_fields(*args, **kwargs)
request = self.context.get("request")
if request:
user = request.user
if not permissions.is_moderator(request):
# TODO: can we do this also if it's not the owner?
fields.pop("owner", None)
fields.pop("submission", None)
fields.pop("date_created", None)
fields.pop("last_updated", None)
fields.pop("id", None)
return fields
# --------------------------------------
class SubmissionSerializer(
NonNullModelSerializer, serializers.HyperlinkedModelSerializer
):
"""A submission object that is in initial processing"""
class Meta:
model = Submission
fields = [
"id",
"url",
"target_url",
"status",
"owner",
"title",
"description",
"date",
"retrieval",
"moderation",
"votes",
]
read_only_fields = [
"owner",
"status",
"date_created",
"last_updated",
"last_modified_by",
"domain",
"retrieval",
"moderation",
]
url = serializers.HyperlinkedIdentityField(
view_name="submission-detail", lookup_field="pk"
)
retrieval = RetrievalSerializer(required=False, allow_null=True)
moderation = ModerationSerializer(required=False, allow_null=True)
owner = serializers.HyperlinkedRelatedField(view_name="user-detail", read_only=True)
votes = serializers.ListSerializer(
child=VoteSerializer(), required=False, allow_empty=True, allow_null=True
)
# --------------------------------------
def _first(elements: List[str], defvalue: str) -> str:
"""Returns the first element that is not none.
If all are none returns the default provided"""
l = [x for x in elements if x]
if len(l):
return l[0]
return defvalue
class ArticleSerializer(NonNullModelSerializer):
"""An article is an approved submission and it takes the title
and description from either the automated bots or the moderation,
if the moderator entered any"""
thumbnail = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
title = serializers.SerializerMethodField()
submitter = serializers.SerializerMethodField()
approver = serializers.SerializerMethodField()
# submitter = serializers.CharField(source="owner__username", read_only=True)
# approver = serializers.CharField(
# source="moderation__owner__username", read_only=True
# )
class Meta:
model = Submission
fields = read_only_fields = [
"url",
"status",
"target_url",
"title",
"description",
"thumbnail",
"last_updated",
"date_created",
"submitter",
# "moderated_submission",
"approver",
]
def get_thumbnail(self, sub: Submission) -> str:
retrieval: Retrieval = sub.retrieval
return _first(
[
retrieval.thumbnail_processed,
retrieval.thumbnail_submitted,
retrieval.thumbnail_from_page,
],
"https://onlydognews.com/gfx/site/onlydognews-logo-main.png",
)
def get_description(self, sub: Submission) -> str:
values = [
sub.moderation.description,
sub.retrieval.description,
sub.description,
]
return _first(values, "")
def get_title(self, sub: Submission) -> str:
values = [sub.moderation.title, sub.retrieval.title, sub.title]
return _first(values, "")
def get_target_url(self, sub: Submission) -> str:
return _first([sub.moderation.target_url], sub.target_url)
def get_submitter(self, sub: Submission) -> str:
return _username_masking_admins(sub.owner)
def get_approver(self, sub: Submission) -> str:
if hasattr(sub, "moderation"):
return _username_masking_admins(sub.moderation.owner)
else:
return _username_masking_admins(None)
|
[
"486736+osuka@users.noreply.github.com"
] |
486736+osuka@users.noreply.github.com
|
6a89e4ce43df01a945f17c15ff2b1d7caf4ada62
|
5058e5fb05efe6050c33f2a389da49284922d9b9
|
/apps/the_wall/migrations/0001_initial.py
|
ed0232f2af43fda5e8f0ce9ba909e1df1db396eb
|
[] |
no_license
|
akshay5193/The-Wall_Django
|
d1a7849b4e4c660a05fee54675a7ab7b2a38bb4d
|
76f85f1ea3cafdcfda0aa5cc78034a3d0f6fd20d
|
refs/heads/master
| 2020-07-22T16:04:46.115793
| 2019-09-09T07:52:35
| 2019-09-09T07:52:35
| 207,254,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-07-18 22:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login_reg', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_content', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='login_reg.User')),
],
),
]
|
[
"akshaytnande@gmail.com"
] |
akshaytnande@gmail.com
|
a802f9c9136a652dccdbc4f90a28432961097d39
|
246fd025719a952a66c8cf3ccb9952fa60aab79e
|
/mbgpDriver/myDB.py
|
4f2f4cfac7504488d7a9a92e70411d25b55fd5cf
|
[] |
no_license
|
srxzr/decoyrouter
|
d2295fae64145b2bd04d46b30909d6b4d448ff0d
|
949281457d802bc9672065d1eb35751392656ecc
|
refs/heads/master
| 2021-05-03T21:47:29.456143
| 2016-10-21T06:33:59
| 2016-10-21T06:33:59
| 71,536,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,534
|
py
|
__author__ = 'milad'
import cPickle as pickle
import logging
import copy
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
class Route:
def __init__(self,source,dest):
self.source=source
self.destination= dest
self.bestpath=[]
self.alternativepaths=[]
self.radpath=[]
self.selectedpath=[]
self.routetype=0
self.radpath=[]
def setbestpath(self,path):
self.bestpath=path
self.selectedpath=self.bestpath
def addpath(self,path):
self.alternativepaths.append(path)
def getallpathes(self):
return self.alternativepaths
class RouteMananger:
def __init__(self):
self.routes={}
self.asrels={}
self.fillASrels()
def fillASrels(self):
asf= open('../Datasets/caida-m.txt','r')
for l in asf:
as1 , as2 =l.split(' ')[0:2]
self.asrels.setdefault(as1,{})[as2]=int(l.split(' ')[2])
self.asrels.setdefault(as2,{})[as1]=-int(l.split(' ')[2])
asf.close()
def addRoutes(self,source,dest,route):
path=self.routes.setdefault(source,{}).setdefault(dest,Route(source,dest))
path.addpath(route)
def setBestRoute(self,source,dest,route):
path=self.routes.setdefault(source,{}).setdefault(dest,Route(source,dest))
path.setbestpath(route)
def save(self,path):
f=open(path,'wb')
pickle.dump(self.routes,f)
f.close()
def getRoutes(self):
return self.routes
def getRouteForSource(self,src):
res= self.routes.get(src,[])
#logging.debug( str(len(res)))
return res
def hasDecoy(self,r,decoys):
if len(r)<2:
return False
for i in r[0]:
if decoys.has_key(str(i)):
return True
return False
def compareRAD(self,r1,r2):
per1=int(r1[1])
per2=int(r2[1])
#LOCAL PRE
if per2>per1:
return r2
elif per1>per2:
return r1
#SHORTEST
if len(r2[0])>len(r1[0]):
return r1
if len(r2[0])<len(r1[0]):
return r2
if r1[0][0]<r2[0][0]:
return r1
return r2
def computeRAD(self,source,destination,decoys):
if not self.routes.has_key(source) or not self.routes[source].has_key(destination):
return [],-1
route=self.routes[source][destination]
if not self.hasDecoy(route.bestpath,decoys):
route.radpath=copy.deepcopy(route.bestpath)
route.routetype=0
return route.radpath, 0
early_selection=[]
for i in route.alternativepaths:
if not self.hasDecoy(i,decoys):
early_selection.append(i)
if len(early_selection) == 0 :
route.routetype=-1
route.radpath=[]
return [],-1
best=[]
for r in relays:
if not self.routes.has_key(r) or not self.routes[r].has_key(destination):
continue
rel= self.routes[r][destination]
for ass in rel.alternativepaths:
if self.hasDecoy(ass,decoys):
continue
if best==[]:
best=ass
best= self.compareRAD(best,ass)
if best == []:
return [],-2
return best,2
best= early_selection[0]
for i in early_selection:
best=self.compareRAD(best,i)
route.radpath=copy.deepcopy(best)
route.routetype=1
return best , 1
def computeRAD_justcompute(self,source,destination,decoys):
if not self.routes.has_key(source) or not self.routes[source].has_key(destination):
return [],-1
route=self.routes[source][destination]
if not self.hasDecoy(route.bestpath,decoys):
return route.radpath, 0
early_selection=[]
for i in route.alternativepaths:
if not self.hasDecoy(i,decoys):
early_selection.append(i)
if len(early_selection) == 0 :
return [],-1
best=[]
for r in relays:
if not self.routes.has_key(r) or not self.routes[r].has_key(destination):
continue
rel= self.routes[r][destination]
for ass in rel.alternativepaths:
if self.hasDecoy(ass,decoys):
continue
if best==[]:
best=ass
best= self.compareRAD(best,ass)
if best == []:
return [],-2
return best,2
best= early_selection[0]
for i in early_selection:
best=self.compareRAD(best,i)
return best , 1
class RouteParser:
def __init__(self,path):
self.rm=RouteMananger()
self.file= open(path,'r')
def parseForDests(self,dests):
source=''
dest=''
count=0
for i in self.file :
sp= i.replace('\n','').split('\t')
if len(sp)==1 and len(i)>1:
source=sp[0]
elif len(sp)>1:
pref=sp[2]
isbest=sp[0][:2]=='*>'
path=sp[4]
dest=path.split(' ')[-1]
if not dest in dests:
continue
self.rm.addRoutes(source,dest,([ int(ss) for ss in (source+' '+path).split(' ')],pref))
if isbest:
self.rm.setBestRoute(source,dest,([ int(ss) for ss in (source+' '+path).split(' ')],pref))
count +=1
if count%1000000==0:
print count
self.file.close()
self.check()
def startParsing(self,sources,rings):
source=''
dest=''
count=0
for i in self.file :
sp= i.replace('\n','').split('\t')
if len(sp)==1 and len(i)>1:
source=sp[0]
elif len(sp)>1:
pref=sp[2]
isbest=sp[0][:2]=='*>'
path=sp[4]
dest=path.split(' ')[-1]
if dest in sources or int(dest) in rings or str(dest) in rings:
continue
self.rm.addRoutes(source,dest,([ int(ss) for ss in (source+' '+path).split(' ')],pref))
if isbest:
self.rm.setBestRoute(source,dest,([ int(ss) for ss in (source+' '+path).split(' ')],pref))
#break
count +=1
if count%1000000==0:
#break
print count
self.file.close()
self.check()
def check(self):
m=[]
for i in self.rm.routes:
m.append(len(self.rm.routes[i]))
alt=[]
for i in self.rm.routes:
avg=0
for j in self.rm.routes[i]:
avg += len(self.rm.routes[i][j].alternativepaths)
avg=float(avg)/float(len(self.rm.routes[i]))
alt.append(avg)
alt.sort()
m.sort()
print avg
def saveRoutes(self,path):
self.rm.save(path)
def getRouteManager(self):
return self.rm
import multiprocessing.managers
class MyManager(multiprocessing.managers.BaseManager):
pass
|
[
"milad@cs.umass.edu"
] |
milad@cs.umass.edu
|
a83a5e4abbe7b76e5cbdcc7adad28cf164c569fd
|
72e7e02fa0731ff6991329f5a706154a3dc8de6e
|
/API_num.py
|
af28a450cbcd54d817caa94d9e0df4669826dcb0
|
[] |
no_license
|
11111001001/STEPIK
|
2006ab7506f7e7aa37e4773d44cc6e8d6f6656ac
|
36a86cd9ef06def916fdb062108d4ff13bf63a76
|
refs/heads/master
| 2022-12-05T02:24:49.486750
| 2020-08-20T12:17:27
| 2020-08-20T12:17:27
| 288,160,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
import requests
import json
# Обработка чисел из файла
# with open("example_dataset.txt") as f:
# for num in f:
# res = requests.get(api_url + num.strip() + "/math?json=true").text
# print(json.loads(res)["text"] if json.loads(res)["found"] else 'Boring')
api_url = 'http://numbersapi.com/'
for num in input().split():
res = requests.get(api_url + str(num) + "/math?json=true").text
print(json.loads(res)["text"] if json.loads(res)["found"] else 'Boring')
|
[
"noreply@github.com"
] |
11111001001.noreply@github.com
|
29eaa6b1fd044584f45a9580ce32b5f7f7e7ec2d
|
173c0986418c758ed9372e3a5bb18baf96925ce9
|
/QA_03_基本数据类型_字符串.py
|
23a8080187156394bc47e65e635a28cbfeb709a8
|
[] |
no_license
|
leadecree/python
|
8ecd64045196117dc8c6f64d529fb6206ae53d93
|
429faaf4ff8b092c0f54d33586140c65ecceb6b7
|
refs/heads/main
| 2023-08-02T07:30:44.616302
| 2021-10-02T08:14:24
| 2021-10-02T08:14:24
| 350,205,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# *_* coding:utf-8 *_*
# @author:sdh
# @Time : 2021/9/19 0019 08:04
# 字符串:单引号、双引号、三引号字符串
# 1 字符串的三种形式
x = 'python'
y = "python"
z = """python"""
print(x, y, z) # python python python
# 2 字符串中有字符串
a = "this is 'python'"
print(a) # this is 'python'
# 3 字符串的切片
b = "learn python"
print(b[:]) # 取全部的字符串learn python
print(b[0:5]) # 取部分左闭右开区间,learn
print(b[0:6:2]) # 区间内间隔取,lan
print(b[::-1]) # 字符串翻转,nohtyp nrael
|
[
"noreply@github.com"
] |
leadecree.noreply@github.com
|
1af7319609de94556066e619b6963688250a26b3
|
e50eaf7779e8073bf511e17ec5006f9ccfe9e48d
|
/other/lab2/messenger/examples/app.py
|
408b756c561618d39bd23e12e22fda6f95cbc0c3
|
[] |
no_license
|
serhiisad/xmlDb_labs_2019
|
2d0b06a7af3ddc83bdb5eb4dd5035a05b8f5a4de
|
a9e5d6fa76f34c7f5a4432dd10a666f81a0828fa
|
refs/heads/master
| 2021-06-13T10:39:39.928224
| 2019-06-22T09:39:24
| 2019-06-22T09:39:24
| 170,873,247
| 0
| 0
| null | 2021-03-29T20:00:20
| 2019-02-15T13:57:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
import redis
import time
import traceback
import random
def check_spam(self, message):
'''
imitation of spam-checking
:param self:
:param message:
:return:
'''
time.sleep(random.randint(0, 3))
r = bool(random.getrandbits(1))
print(r)
return r
def RedisCheck():
try:
r = redis.StrictRedis(host='localhost', port=6379) # Connect to local Redis instance
p = r.pubsub() # See https://github.com/andymccurdy/redis-py/#publish--subscribe
p.subscribe('startScripts') # Subscribe to startScripts channel
PAUSE = True
while PAUSE: # Will stay in loop until START message received
print("Waiting For redisStarter...")
message = p.get_message() # Checks for message
if message:
command = message['data'] # Get data from message
if command == b'START': # Checks for START message
PAUSE = False # Breaks loop
time.sleep(1)
print("Permission to start...")
except Exception as e:
print("Connecting error")
print(str(e))
print(traceback.format_exc())
RedisCheck()
#
# def WorkCheck():
# try:
#
# # HERE SOME INITIAL WORK IS DONE THAT SCRIPTS 1 & 2 NEED TO WAIT FOR
# # IDs SERIAL PORTS
# # SAVE TO db
#
# r = redis.StrictRedis(host='localhost', port=6379) # Connect to local Redis instance
# p = r.pubsub() # See https://github.com/andymccurdy/redis-py/#publish--subscribe
#
# print("Starting main scripts...")
#
# r.publish('startScripts', 'START') # PUBLISH START message on startScripts channel
#
# print("Done")
#
# except Exception as e:
# print("Connection error")
# print(str(e))
# print(traceback.format_exc())
|
[
"serhiisad.kpi@gmail.com"
] |
serhiisad.kpi@gmail.com
|
04cd950ffd4444f0180c80dfafcffab70bcbc0dd
|
57a958244cd90f1cd8a21d958f70bb96e1ecb75e
|
/migrations/versions/9c17c43cf372_notifications.py
|
703f1581cf1a758dc28cc20cf184c951d9a2d44f
|
[] |
no_license
|
victoriaroan/microblog
|
cd5c7ea1a0ab3d80afd396abf4827e94b13a7c10
|
b9a5d59d8c5fd6f2fda1899dfe451ee17b248f94
|
refs/heads/master
| 2022-12-09T19:14:55.386839
| 2019-12-23T20:47:47
| 2019-12-23T20:47:47
| 226,374,523
| 0
| 0
| null | 2022-12-08T03:20:11
| 2019-12-06T17:05:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
"""notifications
Revision ID: 9c17c43cf372
Revises: 9a08e453b7bc
Create Date: 2019-12-20 11:03:38.251403
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9c17c43cf372'
down_revision = '9a08e453b7bc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('notification',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.Float(), nullable=True),
sa.Column('payload_json', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_notification_name'), 'notification', ['name'], unique=False)
op.create_index(op.f('ix_notification_timestamp'), 'notification', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_notification_timestamp'), table_name='notification')
op.drop_index(op.f('ix_notification_name'), table_name='notification')
op.drop_table('notification')
# ### end Alembic commands ###
|
[
"vgood@linode.com"
] |
vgood@linode.com
|
7c75e1037951eecb90c898a717ca71f18c263273
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02747/s173828040.py
|
26114d23e715060f839c2e250c10e568d43e5e65
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
print('YNeos'[input().replace('hi','')!=''::2])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
644e8272099f69cc01b87e9f14836df016df6f42
|
bb53e49aee013758573f3b9db3739c38e81e0e84
|
/converter_2_10.py
|
97b5891cbb0bebafab8201a3ebb4159e8f13996b
|
[] |
no_license
|
LaszloNagy85/Converter_Dojo
|
db57384f75b08921f298b60e557ec5851b3bc626
|
8d7f06d929b4310c03a37bb782c83b7f86712e1d
|
refs/heads/master
| 2020-05-29T21:24:11.039655
| 2019-05-30T08:47:06
| 2019-05-30T08:47:06
| 189,379,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
# 2 / 10 converter
num = (input("Add number and the type of it(separated by space): ")).split()
n, v = int(num[0]), int(num[1])
bin = []
if v == 10:
while n >= 1:
bin.insert(0, n % 2)
n = n // 2
for i in range(len(bin)):
bin[i] = str(bin[i])
result = "".join(bin)
print(f"The result is: {result} 2")
result = 0
if v == 2:
n = str(n)
n = n[::-1]
for i in range(len(n)):
if n[i] == "1":
result = result + 2**i
print(f"The result is: {result} 10")
|
[
"n.laszlo.uk@gmail.com"
] |
n.laszlo.uk@gmail.com
|
090b6cdc689365e07f78330f5f2da9f2ff75ba8b
|
8f5b67b222e14a5eae5b1afa795351ed7ceb2c67
|
/backend/bg_test_app_25843/urls.py
|
19798f357903f89a228d1e1c6faacf01b61a393b
|
[] |
no_license
|
crowdbotics-apps/bg-test-app-25843
|
f495ec91078a5818ae413061ac65e3ef4acb29bf
|
9c0ffe925b29a4bb1d89449689937e1687b2d4ba
|
refs/heads/master
| 2023-04-14T08:14:39.111199
| 2021-04-22T20:34:40
| 2021-04-22T20:34:40
| 360,675,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
"""bg_test_app_25843 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "BG test app"
admin.site.site_title = "BG test app Admin Portal"
admin.site.index_title = "BG test app Admin"
# swagger
api_info = openapi.Info(
title="BG test app API",
default_version="v1",
description="API documentation for BG test app App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ced201d5272d848d0d8e7da843b200dca85ffdd0
|
a9bf6b6595adb92fb0302c08b5fc55a3b4a8000d
|
/other_models/cutter_model1/model1.py
|
6b0769b2952d1904543a126c5fa60d001ff91b9b
|
[] |
no_license
|
JanCVanB/cs152model
|
5356c9bcb168b52d4a3b8531a44e4c7f6342f428
|
3e5f9b4bc2efda51a7c93afcd3b00af75d03a4fc
|
refs/heads/master
| 2021-06-06T05:17:08.390511
| 2014-12-06T21:53:16
| 2014-12-06T21:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
import numpy as np
import numpy.random as random
import matplotlib.pyplot as pl
import sys
import plot_defaults
##################
### Parameters ###
num_songs = 1e2 # number of songs in songspace
frac = 1e-1 # fraction of songs in songspace that are intended
power = 1e0 # for skewing the utilities toward 0 (power > 1) or 1 (power < 1)
##################
def pick_song(probs_all):
# songs indicies are 0 to num_songs - 1, with intended songs occupying lower indices
return random.choice(int(num_songs), p=probs_all)
def KL_divergence(distribution, approx_distribution):
return np.sum(distribution * np.log(distribution / approx_distribution))
def pirate(N, eta, probs_all):
'''
Pirate by listening `N` times. `eta` is the multiplicative weights update parameter.
'''
weights = np.ones(num_songs)
probs = weights / np.sum(weights)
for _ in range(int(N)):
song = pick_song(probs_all)
weights[song] *= np.exp(eta)
probs = weights / np.sum(weights)
return KL_divergence(probs_all, probs)
################
### Plotting ###
num_Ns = 50
upper_N_exponent = 4
ntrials = 10 # trials at each N, epsilon value
eta = 1e-3
fig = pl.figure(figsize=(10,8))
fig.subplots_adjust(left=0.2)
fig.subplots_adjust(bottom=0.16)
fig.subplots_adjust(top=0.85)
fig.subplots_adjust(right=0.85)
for epsilon in (0.1, 1, 10, 100):
num_intended = frac * num_songs
num_unintended = num_songs - num_intended
utilities = (random.random(size=num_intended))**power # sensitivity assumed 1
intended_weights = np.exp(0.5 * epsilon * utilities)
intended_weight = np.sum(intended_weights)
unintended_weight = num_unintended
# probabilities we pick X
prob_intended = intended_weight / (intended_weight + unintended_weight)
probs_intended = intended_weights / (intended_weight + unintended_weight)
probs_unintended = np.ones(num_unintended) / (intended_weight + unintended_weight)
probs_all = np.concatenate((probs_intended, probs_unintended))
print "Probability a given picked song is intended: ", prob_intended
# setup toolbar
toolbar_width = num_Ns
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1)) # return to start of line, after '['
Ns = np.logspace(-1, upper_N_exponent, num=num_Ns)
div_means = []
div_stds = []
for N in Ns:
divs = []
for _ in range(ntrials):
divs.append(pirate(N, eta, probs_all))
div_means.append(np.mean(divs))
div_stds.append(np.std(divs))
sys.stdout.write("-")
sys.stdout.flush()
pl.plot(Ns, div_means, lw=8, label=r"\(\varepsilon = {}\)".format(epsilon))
pl.xlabel('Number of queries')
pl.ylabel('KL-divergence')
pl.xscale('log')
pl.yscale('log')
pl.legend(loc="center left")
pl.show()
|
[
"cyghost@gmail.com"
] |
cyghost@gmail.com
|
b875a411996438c00d0f845d3d9830153a3ffd06
|
ae90864ffefaae65da9c6e1809bf27208a9d34fc
|
/EXAMPLES/API/export_basetype_csv_default_options.py
|
cccc38b623f26762619346fdc09368f88db9197b
|
[] |
no_license
|
apDataGuy/pytan
|
6374f128c4b4070d041d8bfb05c22e2434ff4a68
|
24c2907e28b3a93f3a96aa4cb42933ed2c6f83bf
|
refs/heads/master
| 2021-01-16T22:10:31.715453
| 2015-03-26T17:32:11
| 2015-03-26T17:32:11
| 38,935,203
| 0
| 0
| null | 2015-07-11T17:56:56
| 2015-07-11T17:56:56
| null |
UTF-8
|
Python
| false
| false
| 4,369
|
py
|
"""
Export a BaseType from getting objects as CSV with the default options
"""
import os
import sys
sys.dont_write_bytecode = True
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# determine the pytan lib dir and add it to the path
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
path_adds = [lib_dir]
for aa in path_adds:
if aa not in sys.path:
sys.path.append(aa)
# connection info for Tanium Server
USERNAME = "Tanium User"
PASSWORD = "T@n!um"
HOST = "172.16.31.128"
PORT = "444"
# Logging conrols
LOGLEVEL = 2
DEBUGFORMAT = False
import tempfile
import pytan
handler = pytan.Handler(
username=USERNAME,
password=PASSWORD,
host=HOST,
port=PORT,
loglevel=LOGLEVEL,
debugformat=DEBUGFORMAT,
)
print handler
# setup the export_obj kwargs for later
export_kwargs = {}
export_kwargs["export_format"] = u'csv'
# get the objects that will provide the basetype that we want to use
get_kwargs = {
'name': [
"Computer Name", "IP Route Details", "IP Address",
'Folder Name Search with RegEx Match',
],
'objtype': 'sensor',
}
response = handler.get(**get_kwargs)
# export the object to a string
# (we could just as easily export to a file using export_to_report_file)
export_kwargs['obj'] = response
export_str = handler.export_obj(**export_kwargs)
print ""
print "print the export_str returned from export_obj():"
out = export_str
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print out
'''Output from running this:
Handler for Session to 172.16.31.128:444, Authenticated: True, Version: 6.2.314.3279
print the export_str returned from export_obj():
category,creation_time,delimiter,description,exclude_from_parse_flag,hash,hidden_flag,id,ignore_case_flag,last_modified_by,max_age_seconds,metadata_item_0_admin_flag,metadata_item_0_name,metadata_item_0_value,modification_time,name,parameter_definition,queries_query_0_platform,queries_query_0_script,queries_query_0_script_type,queries_query_1_platform,queries_query_1_script,queries_query_1_script_type,queries_query_2_platform,queries_query_2_script,queries_query_2_script_type,source_id,string_count,subcolumns_subcolumn_0_hidden_flag,subcolumns_subcolumn_0_ignore_case_flag,subcolumns_subcolumn_0_index,subcolumns_subcolumn_0_name,subcolumns_subcolumn_0_value_type,subcolumns_subcolumn_1_hidden_flag,subcolumns_subcolumn_1_ignore_case_flag,subcolumns_subcolumn_1_index,subcolumns_subcolumn_1_name,subcolumns_subcolumn_1_value_type,subcolumns_subcolumn_2_hidden_flag,subcolumns_subcolumn_2_ignore_case_flag,subcolumns_subcolumn_2_index,subcolumns_subcolumn_2_name,subcolumns_subcolumn_2_value_type,subcolumns_subcolumn_3_hidden_flag,subcolumns_subcolumn_3_ignore_case_flag,subcolumns_subcolumn_3_index,subcolumns_subcolumn_3_name,subcolumns_subcolumn_3_value_type,subcolumns_subcolumn_4_hidden_flag,subcolumns_subcolumn_4_ignore_case_flag,subcolumns_subcolumn_4_index,subcolumns_subcolumn_4_name,subcolumns_subcolumn_4_value_type,subcolumns_subcolumn_5_hidden_flag,subcolumns_subcolumn_5_ignore_case_flag,subcolumns_subcolumn_5_index,subcolumns_subcolumn_5_name,subcolumns_subcolumn_5_value_type,value_type
Reserved,,,"The assigned name of the client machine.
Example: workstation-1.company.com",0,3409330187,0,3,1,,86400,,,,,Computer Name,,Windows,select CSName from win32_operatingsystem,WMIQuery,,,,,,,0,9,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,String
Network,2015-03-03T19:03:36,|,"Returns IPv4 network routes, filtered to exclude noise. With Flags, Metric, Interface columns.
Example: 172.16.0.0|192.168.1.1|255.255.0.0|UG|100|eth0",1,435227963,0,737,1,Jim Olsen,60,0,defined,Tanium,2015-03-03T19:03:36,IP Route Details,,Windows,"strComputer = "."
Set objWMIService = GetObject("winmgmts:" _
& "{impersonationLevel=impersonate}!\\" & strComputer & "\root\cimv2")
Set collip = objWMIService.ExecQuery("select * from win32_networkadapterconfiguration where IPEnabled='True'")
dim ipaddrs()
ipcount = 0
for each ipItem in collip
for each ipaddr in ipItem.IPAddress
ipcount = ipcount + 1
next
..trimmed for brevity..
'''
|
[
"jim.olsen@tanium.com"
] |
jim.olsen@tanium.com
|
43536d8db6c4c1914e4a95e7d5c4197d02ecbbfd
|
98efe1aee73bd9fbec640132e6fb2e54ff444904
|
/loldib/getratings/models/NA/na_tahmkench/na_tahmkench_bot.py
|
dac7b83b91ea31853e289b78ea698d1ad2a695fd
|
[
"Apache-2.0"
] |
permissive
|
koliupy/loldib
|
be4a1702c26546d6ae1b4a14943a416f73171718
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
refs/heads/master
| 2021-07-04T03:34:43.615423
| 2017-09-21T15:44:10
| 2017-09-21T15:44:10
| 104,359,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,959
|
py
|
from getratings.models.ratings import Ratings
class NA_TahmKench_Bot_Aatrox(Ratings):
pass
class NA_TahmKench_Bot_Ahri(Ratings):
pass
class NA_TahmKench_Bot_Akali(Ratings):
pass
class NA_TahmKench_Bot_Alistar(Ratings):
pass
class NA_TahmKench_Bot_Amumu(Ratings):
pass
class NA_TahmKench_Bot_Anivia(Ratings):
pass
class NA_TahmKench_Bot_Annie(Ratings):
pass
class NA_TahmKench_Bot_Ashe(Ratings):
pass
class NA_TahmKench_Bot_AurelionSol(Ratings):
pass
class NA_TahmKench_Bot_Azir(Ratings):
pass
class NA_TahmKench_Bot_Bard(Ratings):
pass
class NA_TahmKench_Bot_Blitzcrank(Ratings):
pass
class NA_TahmKench_Bot_Brand(Ratings):
pass
class NA_TahmKench_Bot_Braum(Ratings):
pass
class NA_TahmKench_Bot_Caitlyn(Ratings):
pass
class NA_TahmKench_Bot_Camille(Ratings):
pass
class NA_TahmKench_Bot_Cassiopeia(Ratings):
pass
class NA_TahmKench_Bot_Chogath(Ratings):
pass
class NA_TahmKench_Bot_Corki(Ratings):
pass
class NA_TahmKench_Bot_Darius(Ratings):
pass
class NA_TahmKench_Bot_Diana(Ratings):
pass
class NA_TahmKench_Bot_Draven(Ratings):
pass
class NA_TahmKench_Bot_DrMundo(Ratings):
pass
class NA_TahmKench_Bot_Ekko(Ratings):
pass
class NA_TahmKench_Bot_Elise(Ratings):
pass
class NA_TahmKench_Bot_Evelynn(Ratings):
pass
class NA_TahmKench_Bot_Ezreal(Ratings):
pass
class NA_TahmKench_Bot_Fiddlesticks(Ratings):
pass
class NA_TahmKench_Bot_Fiora(Ratings):
pass
class NA_TahmKench_Bot_Fizz(Ratings):
pass
class NA_TahmKench_Bot_Galio(Ratings):
pass
class NA_TahmKench_Bot_Gangplank(Ratings):
pass
class NA_TahmKench_Bot_Garen(Ratings):
pass
class NA_TahmKench_Bot_Gnar(Ratings):
pass
class NA_TahmKench_Bot_Gragas(Ratings):
pass
class NA_TahmKench_Bot_Graves(Ratings):
pass
class NA_TahmKench_Bot_Hecarim(Ratings):
pass
class NA_TahmKench_Bot_Heimerdinger(Ratings):
pass
class NA_TahmKench_Bot_Illaoi(Ratings):
pass
class NA_TahmKench_Bot_Irelia(Ratings):
pass
class NA_TahmKench_Bot_Ivern(Ratings):
pass
class NA_TahmKench_Bot_Janna(Ratings):
pass
class NA_TahmKench_Bot_JarvanIV(Ratings):
pass
class NA_TahmKench_Bot_Jax(Ratings):
pass
class NA_TahmKench_Bot_Jayce(Ratings):
pass
class NA_TahmKench_Bot_Jhin(Ratings):
pass
class NA_TahmKench_Bot_Jinx(Ratings):
pass
class NA_TahmKench_Bot_Kalista(Ratings):
pass
class NA_TahmKench_Bot_Karma(Ratings):
pass
class NA_TahmKench_Bot_Karthus(Ratings):
pass
class NA_TahmKench_Bot_Kassadin(Ratings):
pass
class NA_TahmKench_Bot_Katarina(Ratings):
pass
class NA_TahmKench_Bot_Kayle(Ratings):
pass
class NA_TahmKench_Bot_Kayn(Ratings):
pass
class NA_TahmKench_Bot_Kennen(Ratings):
pass
class NA_TahmKench_Bot_Khazix(Ratings):
pass
class NA_TahmKench_Bot_Kindred(Ratings):
pass
class NA_TahmKench_Bot_Kled(Ratings):
pass
class NA_TahmKench_Bot_KogMaw(Ratings):
pass
class NA_TahmKench_Bot_Leblanc(Ratings):
pass
class NA_TahmKench_Bot_LeeSin(Ratings):
pass
class NA_TahmKench_Bot_Leona(Ratings):
pass
class NA_TahmKench_Bot_Lissandra(Ratings):
pass
class NA_TahmKench_Bot_Lucian(Ratings):
pass
class NA_TahmKench_Bot_Lulu(Ratings):
pass
class NA_TahmKench_Bot_Lux(Ratings):
pass
class NA_TahmKench_Bot_Malphite(Ratings):
pass
class NA_TahmKench_Bot_Malzahar(Ratings):
pass
class NA_TahmKench_Bot_Maokai(Ratings):
pass
class NA_TahmKench_Bot_MasterYi(Ratings):
pass
class NA_TahmKench_Bot_MissFortune(Ratings):
pass
class NA_TahmKench_Bot_MonkeyKing(Ratings):
pass
class NA_TahmKench_Bot_Mordekaiser(Ratings):
pass
class NA_TahmKench_Bot_Morgana(Ratings):
pass
class NA_TahmKench_Bot_Nami(Ratings):
pass
class NA_TahmKench_Bot_Nasus(Ratings):
pass
class NA_TahmKench_Bot_Nautilus(Ratings):
pass
class NA_TahmKench_Bot_Nidalee(Ratings):
pass
class NA_TahmKench_Bot_Nocturne(Ratings):
pass
class NA_TahmKench_Bot_Nunu(Ratings):
pass
class NA_TahmKench_Bot_Olaf(Ratings):
pass
class NA_TahmKench_Bot_Orianna(Ratings):
pass
class NA_TahmKench_Bot_Ornn(Ratings):
pass
class NA_TahmKench_Bot_Pantheon(Ratings):
pass
class NA_TahmKench_Bot_Poppy(Ratings):
pass
class NA_TahmKench_Bot_Quinn(Ratings):
pass
class NA_TahmKench_Bot_Rakan(Ratings):
pass
class NA_TahmKench_Bot_Rammus(Ratings):
pass
class NA_TahmKench_Bot_RekSai(Ratings):
pass
class NA_TahmKench_Bot_Renekton(Ratings):
pass
class NA_TahmKench_Bot_Rengar(Ratings):
pass
class NA_TahmKench_Bot_Riven(Ratings):
pass
class NA_TahmKench_Bot_Rumble(Ratings):
pass
class NA_TahmKench_Bot_Ryze(Ratings):
pass
class NA_TahmKench_Bot_Sejuani(Ratings):
pass
class NA_TahmKench_Bot_Shaco(Ratings):
pass
class NA_TahmKench_Bot_Shen(Ratings):
pass
class NA_TahmKench_Bot_Shyvana(Ratings):
pass
class NA_TahmKench_Bot_Singed(Ratings):
pass
class NA_TahmKench_Bot_Sion(Ratings):
pass
class NA_TahmKench_Bot_Sivir(Ratings):
pass
class NA_TahmKench_Bot_Skarner(Ratings):
pass
class NA_TahmKench_Bot_Sona(Ratings):
pass
class NA_TahmKench_Bot_Soraka(Ratings):
pass
class NA_TahmKench_Bot_Swain(Ratings):
pass
class NA_TahmKench_Bot_Syndra(Ratings):
pass
class NA_TahmKench_Bot_TahmKench(Ratings):
pass
class NA_TahmKench_Bot_Taliyah(Ratings):
pass
class NA_TahmKench_Bot_Talon(Ratings):
pass
class NA_TahmKench_Bot_Taric(Ratings):
pass
class NA_TahmKench_Bot_Teemo(Ratings):
pass
class NA_TahmKench_Bot_Thresh(Ratings):
pass
class NA_TahmKench_Bot_Tristana(Ratings):
pass
class NA_TahmKench_Bot_Trundle(Ratings):
pass
class NA_TahmKench_Bot_Tryndamere(Ratings):
pass
class NA_TahmKench_Bot_TwistedFate(Ratings):
pass
class NA_TahmKench_Bot_Twitch(Ratings):
pass
class NA_TahmKench_Bot_Udyr(Ratings):
pass
class NA_TahmKench_Bot_Urgot(Ratings):
pass
class NA_TahmKench_Bot_Varus(Ratings):
pass
class NA_TahmKench_Bot_Vayne(Ratings):
pass
class NA_TahmKench_Bot_Veigar(Ratings):
pass
class NA_TahmKench_Bot_Velkoz(Ratings):
pass
class NA_TahmKench_Bot_Vi(Ratings):
pass
class NA_TahmKench_Bot_Viktor(Ratings):
pass
class NA_TahmKench_Bot_Vladimir(Ratings):
pass
class NA_TahmKench_Bot_Volibear(Ratings):
pass
class NA_TahmKench_Bot_Warwick(Ratings):
pass
class NA_TahmKench_Bot_Xayah(Ratings):
pass
class NA_TahmKench_Bot_Xerath(Ratings):
pass
class NA_TahmKench_Bot_XinZhao(Ratings):
pass
class NA_TahmKench_Bot_Yasuo(Ratings):
pass
class NA_TahmKench_Bot_Yorick(Ratings):
pass
class NA_TahmKench_Bot_Zac(Ratings):
pass
class NA_TahmKench_Bot_Zed(Ratings):
pass
class NA_TahmKench_Bot_Ziggs(Ratings):
pass
class NA_TahmKench_Bot_Zilean(Ratings):
pass
class NA_TahmKench_Bot_Zyra(Ratings):
pass
|
[
"noreply@github.com"
] |
koliupy.noreply@github.com
|
dfbed0cf31ade9a3f9c18e54055c6eff0e25863c
|
58654db29fc3dfb5af340b0368d85b7141db4263
|
/mf6shell/rasterfiles.py
|
c6a7359cce77807c03bc835cc0945e926abf72cd
|
[
"MIT"
] |
permissive
|
tomvansteijn/mf6shell
|
2149174ef849f26cfbfcdbdbbd02962ef98bf073
|
670fdcda33a4e1322cf62e478069d496dd0d7be1
|
refs/heads/master
| 2020-06-20T13:45:42.475305
| 2019-12-04T13:16:50
| 2019-12-04T13:16:50
| 197,139,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Tom van Steijn, Royal HaskoningDHV
import rasterio
import logging
import os
log = logging.getLogger(os.path.basename(__file__))
def read_raster(rasterfile, masked=True, band=1):
log.debug('reading {f.name:}'.format(f=rasterfile))
with rasterio.open(rasterfile) as src:
return src.read(band, masked=masked)
def write_raster(rasterfile, values, profile):
log.debug('writing {f.name:}'.format(f=rasterfile))
with rasterio.open(rasterfile, 'w', **profile) as dst:
return dst.write(values, 1)
|
[
"tom.van.steijn@rhdhv.com"
] |
tom.van.steijn@rhdhv.com
|
d6fd3b369eb3b14ede95e9dc1ccdadb80a68081c
|
7d22c06007cfa981585fd7144d8463e3f9ce4793
|
/index/index_download_daily.py
|
550955254f832dbbb7e4e1a7493294cc748d349c
|
[] |
no_license
|
helenypan/petrologica-python
|
7aedf2bb4ac283c8badf97bfec9144c4ea515f06
|
60bce447d262797d73dcae283bffd60ef5e7e240
|
refs/heads/master
| 2021-01-19T11:16:27.022755
| 2017-07-13T11:56:10
| 2017-07-13T11:56:10
| 87,949,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
import sys
sys.path.append('../includes')
from db import DB
from common_functions import parse_company_prices, parse_ftse_prices,parse_currency
import urllib.request
import ssl
import re
context = ssl._create_unverified_context()
url_to_complete = "https://uk.finance.yahoo.com/quote/{}/history?interval=1d&filter=history&frequency=1d"
url_ftse_current = "https://uk.investing.com/indices/uk-100-historical-data"
url_currency_current = "https://www.investing.com/currencies/eur-nok-historical-data"
db = DB()
def extract_ftse_current_data(url):
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, context = context) as url_file:
html = str(url_file.read())
html_list = re.split("<html |</html>",html)
html_to_parse = "<html " + html_list[1] + "</html>"
parse_ftse_prices(html_to_parse, db)
def extract_currency_current_data(url, currency):
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, context = context) as url_file:
html = str(url_file.read())
html_list = re.split("<html |</html>",html)
html_to_parse = "<html " + html_list[1] + "</html>"
parse_currency(html_to_parse, db,currency )
def extract_company_current_prices(company_code):
url = url_to_complete.format(company_code)
with urllib.request.urlopen(url, context=context) as url_file:
html = url_file.read()
parse_company_prices(company_code,html, db)
db.cur.execute('''select company_code from tomorrow_external_data.index_company;''')
for row in db.cur.fetchall():
company_code = row["company_code"]
extract_company_current_prices(company_code)
# extract ftse100 data
extract_ftse_current_data(url_ftse_current)
# retrieve eur_nok currency data
extract_currency_current_data(url_currency_current,"EUR_NOK" )
db.close_connection()
|
[
"yue.pan@me.com"
] |
yue.pan@me.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.