blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c55812681bffcd67f705310e9d3133f402e043f6
|
9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af
|
/services/web__morningstaronline_co_uk.py
|
ac5fc53d8d59be5e0045ca7297f649f07c83b74c
|
[] |
no_license
|
rudolphos/NewsGrabber
|
f9bddc9a9b3a9e02f716133fd746f48cee635b36
|
86354fb769b2710ac7cdd5bd8795e43158b70ad2
|
refs/heads/master
| 2021-01-12T12:07:55.335079
| 2016-10-09T22:39:17
| 2016-10-09T22:39:17
| 72,316,773
| 0
| 0
| null | 2016-10-30T00:35:08
| 2016-10-30T00:35:08
| null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
refresh = 5
version = 20160312.01
urls = ['https://www.morningstaronline.co.uk/britain',
'https://www.morningstaronline.co.uk/world',
'https://www.morningstaronline.co.uk/editorial',
'https://www.morningstaronline.co.uk/features',
'https://www.morningstaronline.co.uk/sport',
'https://www.morningstaronline.co.uk/arts']
regex = [r'^https?:\/\/[^\/]*morningstaronline\.co\.uk']
videoregex = []
liveregex = []
|
[
"Arkiver@hotmail.com"
] |
Arkiver@hotmail.com
|
1ddcd5557d75e12aacac8b96c81f84d3742dcb9c
|
7a0b5b4315f7059fab4272c54d8c31b0fe956dbd
|
/Attributes.py
|
3502604e17d2e7b22ba62b8464ccce4a26026ed2
|
[] |
no_license
|
welkerCode/MachineLearning-SVM
|
94400d2a540d38e14f95ab0d14985e234b88b4cc
|
3d3ef3e71e0186816903754bd676ad9c1535bb47
|
refs/heads/master
| 2020-03-11T16:51:51.124393
| 2018-04-23T23:08:20
| 2018-04-23T23:08:20
| 130,130,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# I learned to parse workbooks with https://www.sitepoint.com/using-python-parse-spreadsheet-data/
class Attributes:
def __init__(self):
self.values = []
def addValue(self, newValue):
if self.values.count(newValue) == 0:
self.values.append(newValue)
def getValues(self):
return self.values
|
[
"taylormaxwelker@gmail.com"
] |
taylormaxwelker@gmail.com
|
4ad9eb923372792981a00e9cb69f041884f5aec9
|
5e3cbf8bfded9d719f2af2256121bd17c2d53e84
|
/GPUCheck.py
|
462223c40c8635854b6e87e3c3a73372aa75d130
|
[] |
no_license
|
UnrealPawn/3d-mask-gan
|
dca929ab2828fa74e7db7ead685f4c00dc0c404e
|
142f1705f1c5e9915593a9fe789d7fb8bf21beaf
|
refs/heads/master
| 2020-04-24T07:47:03.243999
| 2019-02-21T07:05:55
| 2019-02-21T07:05:55
| 171,809,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
import os
os.environ["THEANO_FLAGS"] = "device=gpu%d,lib.cnmem=0"%(0)
from theano import function, config, shared, sandbox
import theano.tensor as T
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], T.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
|
[
"183827350@qq.com"
] |
183827350@qq.com
|
57055c02b52144a20b3dc308a1a65e5d180c2744
|
5c70f2e44f915d42240d5283a6455417eb8182e4
|
/skills/views.py
|
47537b9e89c5675cc54981aa69a7e45513d50b0d
|
[] |
no_license
|
Lairion/MainAcadProg
|
3aafd788609a8aeae8598b97a576f18b37995751
|
62870f704938008571044120a8e07466ead7bff9
|
refs/heads/master
| 2021-04-06T20:36:07.727625
| 2018-04-09T14:13:23
| 2018-04-09T14:13:23
| 125,405,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from .models import Skill,Project
# Create your views here.
class SkillsViews(object):
"""docstring for SkillsViews"""
@staticmethod
def skills(request):
skills = Skill.objects.all()
print(request.user)
print(request.user.is_staff)
context = {
"title":"Skills",
"skills":skills
}
return render(request,"skills.html",context)
@staticmethod
def show_projects(request):
projects = Project.objects.all()
new_projects = []
for i in range(len(projects)):
new_projects.append({"instance":projects[i],"result":i % 2 == 0})
print(new_projects[0]['instance'].get_url())
context ={
"title":"Projects",
"projects": new_projects
}
return render(request, "projects.html",context)
@staticmethod
def get_project(request,id):
project = Project.objects.get(id=int(id))
context ={
"title":"Project",
"project": project
}
return render(request, "project.html",context)
@staticmethod
def form_sessions(request):
context = {
'title':'examp'
}
return render(request,"sessions_example.html",context)
@staticmethod
def add_sessions(request):
print(request.session['some_text'])
leadboard = request.session.get("leadboard",[])
leadboard += [{'name':request.GET.get("name",""),"score":0}]
request.session.update({"leadboard":leadboard})
return redirect("skills:form")
[{'name':'peter',"score":0}] + [{'name':'peter',"score":0}] == [{'name':'peter',"score":0},{'name':'peter',"score":0}]
|
[
"nunyes_m_a@ukr.net"
] |
nunyes_m_a@ukr.net
|
427c47514b38a992a79fd0148c3b909718f39386
|
82aaf5317c6c5009548da0b0e440c5dfb38d1fb8
|
/SConstruct
|
c5641fe6243c20cfb9ebfcaeab1717dc1f2e98a4
|
[] |
no_license
|
listentowindy/to_silicon_valley
|
62835f1cf256ccf65c845482117e4f5350e06624
|
876a6db9c5cbc2c6c613d468abd5807908364093
|
refs/heads/master
| 2021-01-10T08:45:49.744093
| 2015-12-19T07:59:55
| 2015-12-19T07:59:55
| 47,193,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
env = Environment(tools=['default', 'packaging'])
env.Append(CCFLAGS = Split('-g -ggdb -Wall -Wextra -DBTDEBUG'))
#env.Append(CCFLAGS = Split('-g -O2 -Wall -Wextra -DNDEBUG'))
#set include path
comroot='../../../com/'
cpppath=[
'./',
comroot+'thirdparty/gflags-2.0/include',
comroot+'thirdparty/glog-0.3.3/include',
comroot+'thirdparty/gtest-1.6.0/include',
];
env.Append(CPPPATH = cpppath)
#set libpath
libpath=[
'./',
comroot+'thirdparty/gflags-2.0/lib/',
comroot+'thirdparty/glog-0.3.3/lib/',
comroot+'thirdparty/gtest-1.6.0/lib/',
comroot+'thirdparty/boost_1.52.0/lib/',
];
env.Append(LIBPATH = libpath)
#set libs
libs=['gflags', 'gtest', 'glog']
env.Append(LIBS = libs)
Export("env")
import os
import shutil
all_path = [
];
main_path = [
#'sort',
'6',
'8',
];
def FindTestSource(paths):
objs = []
for path in paths:
for file in os.listdir(path):
if file[-2:] != ".c" and file[-3:] != ".cc" and file[-4:] != ".cpp":
continue
if file.startswith("test_"):
file = '%s/%s' % (path, file)
objs.append(file)
return objs
def FindSource(paths):
objs = []
for path in paths:
for file in os.listdir(path):
if file[-2:] != ".c" and file[-3:] != ".cc" and file[-4:] != ".cpp":
continue
if not file.startswith("test_"):
file = '%s/%s' % (path, file)
objs.append(file)
return objs
for bin_source in FindSource(main_path):
bin_name, suffix = bin_source.split('.')
if suffix == 'c' or suffix == 'cc' or suffix == 'cpp':
env.Program(target = bin_name, source = [bin_source] + FindSource(all_path))
for bin_source in FindTestSource(all_path):
bin_name = bin_source.split('.')[0]
env.Program(target = bin_name, source = [bin_source] + FindSource(all_path))
|
[
"hongchunxiao.360.cn"
] |
hongchunxiao.360.cn
|
|
529e1d1dcbc6ea47584217f70df3e62ca984ad37
|
7f3802c03f27fe61d9cdf9140385bee158a7f436
|
/coma/interfaces/mrtrix3.py
|
64464b2ce1b8aab6dc5213308fd641e63bab9a83
|
[
"MIT"
] |
permissive
|
GIGA-Consciousness/structurefunction
|
0157e88f12577f7dc02b40e251cc40ee6f61d190
|
5c5583bb26d6092fa3b7a630192d8e79199f8df0
|
refs/heads/master
| 2023-07-11T20:01:58.844183
| 2015-01-05T09:06:48
| 2015-01-05T09:06:48
| 14,110,584
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,826
|
py
|
def inclusion_filtering_mrtrix3(track_file, roi_file, fa_file, md_file, roi_names=None, registration_image_file=None, registration_matrix_file=None, prefix=None, tdi_threshold=10):
import os
import os.path as op
import numpy as np
import glob
from coma.workflows.dmn import get_rois, save_heatmap
from coma.interfaces.dti import write_trackvis_scene
import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.mrtrix as mrtrix
import nipype.interfaces.diffusion_toolkit as dtk
from nipype.utils.filemanip import split_filename
import subprocess
import shutil
rois = get_rois(roi_file)
fa_out_matrix = op.abspath("%s_FA.csv" % prefix)
md_out_matrix = op.abspath("%s_MD.csv" % prefix)
invLen_invVol_out_matrix = op.abspath("%s_invLen_invVol.csv" % prefix)
subprocess.call(["tck2connectome", "-assignment_voxel_lookup",
"-zero_diagonal",
"-metric", "mean_scalar", "-image", fa_file,
track_file, roi_file, fa_out_matrix])
subprocess.call(["tck2connectome", "-assignment_voxel_lookup",
"-zero_diagonal",
"-metric", "mean_scalar", "-image", md_file,
track_file, roi_file, md_out_matrix])
subprocess.call(["tck2connectome", "-assignment_voxel_lookup",
"-zero_diagonal",
"-metric", "invlength_invnodevolume",
track_file, roi_file, invLen_invVol_out_matrix])
subprocess.call(["tcknodeextract", "-assignment_voxel_lookup",
track_file, roi_file, prefix + "_"])
fa_matrix_thr = np.zeros((len(rois), len(rois)))
md_matrix_thr = np.zeros((len(rois), len(rois)))
tdi_matrix = np.zeros((len(rois), len(rois)))
track_volume_matrix = np.zeros((len(rois), len(rois)))
out_files = []
track_files = []
for idx_i, roi_i in enumerate(rois):
for idx_j, roi_j in enumerate(rois):
if idx_j >= idx_i:
filtered_tracks = glob.glob(op.abspath(prefix + "_%s-%s.tck" % (roi_i, roi_j)))[0]
print(filtered_tracks)
if roi_names is None:
roi_i = str(int(roi_i))
roi_j = str(int(roi_j))
idpair = "%s_%s" % (roi_i, roi_j)
idpair = idpair.replace(".","-")
else:
roi_name_i = roi_names[idx_i]
roi_name_j = roi_names[idx_j]
idpair = "%s_%s" % (roi_name_i, roi_name_j)
tracks2tdi = pe.Node(interface=mrtrix.Tracks2Prob(), name='tdi_%s' % idpair)
tracks2tdi.inputs.template_file = fa_file
tracks2tdi.inputs.in_file = filtered_tracks
out_tdi_name = op.abspath("%s_TDI_%s.nii.gz" % (prefix, idpair))
tracks2tdi.inputs.out_filename = out_tdi_name
tracks2tdi.inputs.output_datatype = "Int16"
binarize_tdi = pe.Node(interface=fsl.ImageMaths(), name='binarize_tdi_%s' % idpair)
binarize_tdi.inputs.op_string = "-thr %d -bin" % tdi_threshold
out_tdi_vol_name = op.abspath("%s_TDI_bin_%d_%s.nii.gz" % (prefix, tdi_threshold, idpair))
binarize_tdi.inputs.out_file = out_tdi_vol_name
mask_fa = pe.Node(interface=fsl.MultiImageMaths(), name='mask_fa_%s' % idpair)
mask_fa.inputs.op_string = "-mul %s"
mask_fa.inputs.operand_files = [fa_file]
out_fa_name = op.abspath("%s_FA_%s.nii.gz" % (prefix, idpair))
mask_fa.inputs.out_file = out_fa_name
mask_md = mask_fa.clone(name='mask_md_%s' % idpair)
mask_md.inputs.operand_files = [md_file]
out_md_name = op.abspath("%s_MD_%s.nii.gz" % (prefix, idpair))
mask_md.inputs.out_file = out_md_name
mean_fa = pe.Node(interface=fsl.ImageStats(op_string = '-M'), name = 'mean_fa_%s' % idpair)
mean_md = pe.Node(interface=fsl.ImageStats(op_string = '-M'), name = 'mean_md_%s' % idpair)
mean_tdi = pe.Node(interface=fsl.ImageStats(op_string = '-l %d -M' % tdi_threshold), name = 'mean_tdi_%s' % idpair)
track_volume = pe.Node(interface=fsl.ImageStats(op_string = '-l %d -V' % tdi_threshold), name = 'track_volume_%s' % idpair)
tck2trk = mrtrix.MRTrix2TrackVis()
tck2trk.inputs.image_file = fa_file
tck2trk.inputs.in_file = filtered_tracks
trk_file = op.abspath("%s_%s.trk" % (prefix, idpair))
tck2trk.inputs.out_filename = trk_file
tck2trk.base_dir = op.abspath(".")
if registration_image_file is not None and registration_matrix_file is not None:
tck2trk.inputs.registration_image_file = registration_image_file
tck2trk.inputs.matrix_file = registration_matrix_file
workflow = pe.Workflow(name=idpair)
workflow.base_dir = op.abspath(idpair)
workflow.connect(
[(tracks2tdi, binarize_tdi, [("tract_image", "in_file")])])
workflow.connect(
[(binarize_tdi, mask_fa, [("out_file", "in_file")])])
workflow.connect(
[(binarize_tdi, mask_md, [("out_file", "in_file")])])
workflow.connect(
[(mask_fa, mean_fa, [("out_file", "in_file")])])
workflow.connect(
[(mask_md, mean_md, [("out_file", "in_file")])])
workflow.connect(
[(tracks2tdi, mean_tdi, [("tract_image", "in_file")])])
workflow.connect(
[(tracks2tdi, track_volume, [("tract_image", "in_file")])])
workflow.config['execution'] = {'remove_unnecessary_outputs': 'false',
'hash_method': 'timestamp'}
result = workflow.run()
tck2trk.run()
fa_masked = glob.glob(out_fa_name)[0]
md_masked = glob.glob(out_md_name)[0]
if roi_names is not None:
tracks = op.abspath(prefix + "_%s-%s.tck" % (roi_name_i, roi_name_j))
shutil.move(filtered_tracks, tracks)
else:
tracks = filtered_tracks
tdi = glob.glob(out_tdi_vol_name)[0]
nodes = result.nodes()
node_names = [s.name for s in nodes]
mean_fa_node = [nodes[idx] for idx, s in enumerate(node_names) if "mean_fa" in s][0]
mean_fa = mean_fa_node.result.outputs.out_stat
mean_md_node = [nodes[idx] for idx, s in enumerate(node_names) if "mean_md" in s][0]
mean_md = mean_md_node.result.outputs.out_stat
mean_tdi_node = [nodes[idx] for idx, s in enumerate(node_names) if "mean_tdi" in s][0]
mean_tdi = mean_tdi_node.result.outputs.out_stat
track_volume_node = [nodes[idx] for idx, s in enumerate(node_names) if "track_volume" in s][0]
track_volume = track_volume_node.result.outputs.out_stat[1] # First value is in voxels, 2nd is in volume
if track_volume == 0:
os.remove(fa_masked)
os.remove(md_masked)
os.remove(tdi)
else:
out_files.append(md_masked)
out_files.append(fa_masked)
out_files.append(tracks)
out_files.append(tdi)
if op.exists(trk_file):
out_files.append(trk_file)
track_files.append(trk_file)
assert(0 <= mean_fa < 1)
fa_matrix_thr[idx_i, idx_j] = mean_fa
md_matrix_thr[idx_i, idx_j] = mean_md
tdi_matrix[idx_i, idx_j] = mean_tdi
track_volume_matrix[idx_i, idx_j] = track_volume
fa_matrix = np.loadtxt(fa_out_matrix)
md_matrix = np.loadtxt(md_out_matrix)
fa_matrix = fa_matrix + fa_matrix.T
md_matrix = md_matrix + md_matrix.T
fa_matrix_thr = fa_matrix_thr + fa_matrix_thr.T
md_matrix_thr = md_matrix_thr + md_matrix_thr.T
tdi_matrix = tdi_matrix + tdi_matrix.T
invLen_invVol_matrix = np.loadtxt(invLen_invVol_out_matrix)
invLen_invVol_matrix = invLen_invVol_matrix + invLen_invVol_matrix.T
track_volume_matrix = track_volume_matrix + track_volume_matrix.T
if prefix is not None:
npz_data = op.abspath("%s_connectivity.npz" % prefix)
else:
_, prefix, _ = split_filename(track_file)
npz_data = op.abspath("%s_connectivity.npz" % prefix)
np.savez(npz_data, fa=fa_matrix, md=md_matrix, tdi=tdi_matrix, trkvol=track_volume_matrix,
fa_thr=fa_matrix_thr, md_thr=md_matrix_thr, invLen_invVol=invLen_invVol_matrix)
print("Saving heatmaps...")
fa_heatmap = save_heatmap(fa_matrix, roi_names, '%s_fa' % prefix)
fa_heatmap_thr = save_heatmap(fa_matrix_thr, roi_names, '%s_fa_thr' % prefix)
md_heatmap = save_heatmap(md_matrix, roi_names, '%s_md' % prefix)
md_heatmap_thr = save_heatmap(md_matrix_thr, roi_names, '%s_md_thr' % prefix)
tdi_heatmap = save_heatmap(tdi_matrix, roi_names, '%s_tdi' % prefix)
trk_vol_heatmap = save_heatmap(track_volume_matrix, roi_names, '%s_trk_vol' % prefix)
invLen_invVol_heatmap = save_heatmap(invLen_invVol_matrix, roi_names, '%s_invLen_invVol' % prefix)
summary_images = []
summary_images.append(fa_heatmap)
summary_images.append(fa_heatmap_thr)
summary_images.append(md_heatmap)
summary_images.append(md_heatmap_thr)
summary_images.append(tdi_heatmap)
summary_images.append(trk_vol_heatmap)
summary_images.append(invLen_invVol_heatmap)
out_merged_file = op.abspath('%s_MergedTracks.trk' % prefix)
skip = 80.
track_merge = pe.Node(interface=dtk.TrackMerge(), name='track_merge')
track_merge.inputs.track_files = track_files
track_merge.inputs.output_file = out_merged_file
track_merge.run()
track_names = []
for t in track_files:
_, name, _ = split_filename(t)
track_names.append(name)
out_scene = op.abspath("%s_MergedScene.scene" % prefix)
out_scene_file = write_trackvis_scene(out_merged_file, n_clusters=len(track_files), skip=skip, names=track_names, out_file=out_scene)
print("Merged track file written to %s" % out_merged_file)
print("Scene file written to %s" % out_scene_file)
out_files.append(out_merged_file)
out_files.append(out_scene_file)
return out_files, npz_data, summary_images
|
[
"erik.ziegler@ulg.ac.be"
] |
erik.ziegler@ulg.ac.be
|
2fa80adb5365c206c46eb8da749aa6e4b59a116f
|
c6838a47be5b22f1202867e577890ab76973184b
|
/polyphony/compiler/verilog_common.py
|
36572a37cf5497b2df899d14577bbe3fe78bf903
|
[
"MIT"
] |
permissive
|
Chippiewill/polyphony
|
2d129fc26d716d5b989dff085120b7303ca39815
|
3f7a90779f082bbc13855d610b31d1058f11da9c
|
refs/heads/master
| 2021-09-13T11:48:56.471829
| 2017-08-31T01:47:15
| 2017-08-31T01:47:15
| 112,966,318
| 0
| 0
| null | 2017-12-03T21:44:12
| 2017-12-03T21:44:11
| null |
UTF-8
|
Python
| false
| false
| 1,457
|
py
|
PYTHON_OP_2_VERILOG_OP_MAP = {
'And': '&&', 'Or': '||',
'Add': '+', 'Sub': '-', 'Mult': '*', 'FloorDiv': '/', 'Mod': '%',
'LShift': '<<', 'RShift': '>>>',
'BitOr': '|', 'BitXor': '^', 'BitAnd': '&',
'Eq': '==', 'NotEq': '!=', 'Lt': '<', 'LtE': '<=', 'Gt': '>', 'GtE':'>=',
'IsNot': '!=',
'USub': '-', 'UAdd': '+', 'Not': '!', 'Invert':'~'
}
def pyop2verilogop(op):
return PYTHON_OP_2_VERILOG_OP_MAP[op]
_keywords = {
'always', 'assign', 'automatic',
'begin',
'case', 'casex', 'casez', 'cell', 'config',
'deassign', 'default', 'defparam', 'design', 'disable',
'edge', 'else', 'end', 'endcase', 'endconfig', 'endfunction', 'endgenerate',
'endmodule', 'endprimitive', 'endspecify', 'endtable', 'endtask', 'event',
'for', 'force', 'forever', 'fork', 'function',
'generate', 'genvar',
'if', 'ifnone', 'incdir', 'include', 'initial', 'inout', 'input', 'instance',
'join', 'liblist', 'library', 'localparam',
'macromodule', 'module',
'negedge', 'noshowcancelled',
'output',
'parameter', 'posedge', 'primitive', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'reg', 'release', 'repeat',
'scalared', 'showcancelled', 'signed', 'specparam', 'strength',
'table', 'task', 'tri', 'tri0', 'tri1', 'triand', 'trior', 'trireg',
'unsigned', 'use',
'vectored',
'wait', 'wand', 'while', 'wor', 'wire'
}
def is_verilog_keyword(word):
return word in _keywords
|
[
"kataoka@sinby.com"
] |
kataoka@sinby.com
|
32509504483d88e28dab7859a56a2999d721accc
|
4a0c58dfeef5e444b9f3d78f9d13d597ebc87d6b
|
/exercise_48_all_lines/exercise_48_all_lines.py
|
29809e0f2fed1e27273bb63f3b29841710f7af2c
|
[] |
no_license
|
jahokas/Python_Workout_50_Essential_Exercises_by_Reuven_M_Lerner
|
e2dc5a08515f30b91514c8eb9b76bb10552ba7a2
|
96ddabda7871faa0da9cd9e0563b16df99f0c853
|
refs/heads/master
| 2023-06-21T11:54:41.794571
| 2021-07-20T08:46:49
| 2021-07-20T08:46:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
import os
def all_lines(path):
for filename in os.listdir(path):
full_filename = os.path.join(path, filename)
try:
for line in open(full_filename):
yield line
except OSError:
pass
|
[
"dmitriyrubanov1988@gmail.com"
] |
dmitriyrubanov1988@gmail.com
|
2138801a48a4de3e3f328339e1a0f23bbedcdf77
|
32d6370fe51c42cd99af52b03f81a82d687e5bb3
|
/Desafio073.py
|
c73ea9d3a3a68dad7fa5feb6d681f879f71d1e3e
|
[] |
no_license
|
AndreLovo/Python
|
a5f4b10c72641048dfd65b6c58bce3b233abeb07
|
ea98a2ca572dfebf9be32502f041016ec14ac0d5
|
refs/heads/master
| 2022-05-03T13:08:37.178401
| 2022-04-10T16:41:54
| 2022-04-10T16:41:54
| 157,377,691
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
# Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre:
# a) Os 5 primeiros times.
# b) Os últimos 4 colocados.
# c) Times em ordem alfabética.
# d) Em que posição está o time do São Paulo.
times =('Atlético-MG', 'Flamengo', 'Palmeiras', 'Fortaleza', 'Corinthians', 'RB Bragantino', 'Fluminense', 'América-MG', 'Atlético-GO', 'Santos', 'Ceará', 'Internacional', 'São Paulo', 'Athletico-PR', 'Cuiabá', 'Juventude')
print('==' *15)
#a) "Fatiando" a tupla
# Mostrando os 5 primeiros times:
for t in times:
print (t)
print('==' *15)
a= times[:5]
print(f'Os cinco primeiros times são: {a}')
print('==' *15)
#b) Mostrando os quatro últimos times
b= times[-4:]
print(f'Os quatro últimos times são: {b}')
print('==' *15)
#c) Mostrando os times em ordem alfabética:
c= print(sorted(times))
#d) Mostrando a posição do São Paulo:
d=print(f'O São Paulo está na posição {times.index("São Paulo")+1}')
|
[
"noreply@github.com"
] |
AndreLovo.noreply@github.com
|
ab771edf0437105d900a63d8b5655d39a750a2ff
|
121c583ac01aa6ad3d5e417fe764236ecb98deb7
|
/src/Modeling and evaluation/NN-multi-targets/drawModel.py
|
79bcecb094ccdc523253f1d3c8e27316ca9735b9
|
[] |
no_license
|
thangdnsf/ML_DVFS
|
e00b82294d35c314209bc56458e7980abc200884
|
908810969588cbcf7c784c0f3ad829eb15bbe0b4
|
refs/heads/master
| 2023-06-07T03:35:52.696587
| 2021-06-25T18:03:13
| 2021-06-25T18:03:13
| 380,318,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 16:46:31 2021
@author: ndthang
"""
import pandas as pd
import numpy as np
import sys
import os
import time
import random
from model import DVFSModel
from datagenerator import HPC_DVFS, labels,labelsZ, make_weights_for_balanced_classes
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from tqdm import tqdm
import matplotlib.pyplot as plt
import util
from datetime import datetime
import json
from warnings import filterwarnings
filterwarnings("ignore")
# hyperparmeter
kfold = 0
seed = 42
warmup_epo = 5
init_lr = 1e-3
batch_size = 100
valid_batch_size = 100
n_epochs = 300#1000
num_batch = 400
warmup_factor = 10
num_workers = 4
use_amp = True
early_stop = 100
device = torch.device('cuda')
model_dir = 'logs7/'
class GradualWarmupSchedulerV2(GradualWarmupScheduler):
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
super(GradualWarmupSchedulerV2, self).__init__(optimizer, multiplier, total_epoch, after_scheduler)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True # for faster training, but not deterministic
seed_everything(seed)
#load dataset for training
df = pd.read_csv('../../data4train/train.csv', na_filter= False, index_col=0)
ignore_columns = ['typedata','expe','UID','target','targetZ','kfold', #'guest', 'guest_nice', 'irq',
#'steal','nice','emulation_faults','irxp', 'irxb',
#'itxp', 'itxb', 'core0','core1','iowait','softirq','txp',
]
feature_list = list(set(df.columns) - set(ignore_columns))
train_df = df[df.kfold != kfold]
train_df = train_df.sample(frac=1)
val_df = df[df.kfold == kfold]
#standarzation data training
sc = StandardScaler()
train_loader_= HPC_DVFS(df=train_df,mode='training',augmentation = True, feature_list=feature_list,sc=sc)
sc_train = train_loader_.StandardScaler()
val_loader_ = HPC_DVFS(df=val_df,mode='valid',feature_list= feature_list,sc=sc_train)
weights = make_weights_for_balanced_classes(train_df.target.values, len(labels))
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_loader = torch.utils.data.DataLoader(train_loader_, batch_size=batch_size,sampler = sampler, num_workers=num_workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(val_loader_,batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)
model = DVFSModel(n_cont=len(feature_list), out_sz=len(labels),out_szz=len(labelsZ), szs=[1024,512,256, 128, 64], drops=[0.001,0.01,0.05, 0.1,0.2])
model = model.to(device)
x,y = next(iter(train_loader))
x = x.to(device)
yhat = model(x)
from torchviz import make_dot
from torchsummary import summary
make_dot(yhat, params=dict(list(model.named_parameters()))).render("rnn_torchviz", format="png")
summary(model, [190])
|
[
"thangdn.tlu@gmail.com"
] |
thangdn.tlu@gmail.com
|
040bb40356755d5212e78b84510e1694a8c54de4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03211/s451237439.py
|
f9efa41c7796ee9c183843e22d4cccb747349d8b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
s =input()
m = 100000
for i in range(len(s)-2):
a=(s[i:i+3])
m=min(abs(753-int(a)),m)
print(m)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a44cdf5bac620404b03de57b61aafde1143c1c6c
|
c017800ec1ef8425ca74b6bcd9f33bbbb0c45181
|
/client.py
|
76ae12a2d0f874645ac58d608c5573e77fe88720
|
[] |
no_license
|
Ak47001/client-server
|
2de4d21029d2149e5e7aa27f9df0c82e144ef0b0
|
bc2705bed7c38588e3c4d2abe29b3ae226cdd404
|
refs/heads/master
| 2020-08-05T02:16:50.212121
| 2019-10-08T04:42:02
| 2019-10-08T04:42:02
| 212,359,856
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import socket
import sys
#read file
filename = 'filename.txt'
# client program using python
s = socket.socket()
ip = ''
port = 8777
s.connect((ip,port))
print("Successfully connected to ",ip)
with open(filename,'r') as fd:
str = fd.read(1024)
s.sendall(bytes(filename,'utf-8'))
print("Last Modified time",s.recv(1024))
while(str):
s.send(bytes(str,'utf-8'))
str=fd.read(1024)
fd.close()
s.close()
|
[
"noreply@github.com"
] |
Ak47001.noreply@github.com
|
24de7c918a9da758aeae9ce57ebec7dee3270ce3
|
dd4ea40348f994151e5d4279b483363526c54936
|
/constants/search_page.py
|
35c43a56c2e63eca3c835429c086381c9207ca89
|
[] |
no_license
|
myshkarus/qa-demo-shop
|
a0aed167f5febfc471100173bff03a3c1568a84a
|
517f3e82f4632f8472564e155a0d121796101291
|
refs/heads/master
| 2023-08-02T21:24:52.248299
| 2021-09-14T18:52:28
| 2021-09-14T18:52:28
| 406,486,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
class SearchPageConstants:
"""Store constants related to the SearchPage object"""
pass
|
[
"m.shpilenko@gmail.com"
] |
m.shpilenko@gmail.com
|
6a0f5439c7a515742c41b2b89a24418d3e311706
|
4b47c77e3bd7ac31f230bcc46f8a08c70fd66893
|
/src/processing/test.py
|
b73d32aceb1db509bf4a9d1e650719bf56a990cd
|
[] |
no_license
|
DaniilRoman/predictiveAnalyticsForTimeSeries
|
1d48c7fde7c1c642e8304a9ee011ba49c81e1a8f
|
362e7d0f743416f3685a1e4ffc4382df6505d6f4
|
refs/heads/master
| 2020-04-15T02:58:29.427886
| 2019-05-26T17:47:35
| 2019-05-26T17:47:35
| 164,330,957
| 0
| 0
| null | 2019-11-01T13:54:21
| 2019-01-06T17:46:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
from multiprocessing import Pool, Process
import time
from src.processing.DataHolder import DataHolder
# def func(arg="check"):
# time.sleep(1)
# print(arg)
# if __name__ == '__main__':
# pass
# p = Process(target=func, args=("test",))
# p.start()
# p.join() # означает что мы присоединяем процесс p к мейн процессу и получается что мы выполняем эо последоватеьно то есть дожидаемся завершения работы процесса p
# dataHolder = DataHolder()
# p = Process(target=dataHolder.storeNewValue)
# p.start()
#
# import time
# from multiprocessing import Process, Value, Lock
#
# def func(val, lock):
# for i in range(50):
# time.sleep(0.01)
# with lock:
# val.value += 1
# print(val.value)
#
# if __name__ == '__main__':
# v = Value('i', 0)
# lock = Lock()
# v.value += 1
# procs = [Process(target=func, args=(v, lock)) for i in range(10)]
#
# for p in procs: p.start()
# # for p in procs: p.join()
#
# # print(v.value)
#
# from multiprocessing import Process, Pipe
# import time
#
# def reader_proc(pipe):
# p_output, p_input = pipe
# p_input.close()
# while p_output.poll():
# msg = p_output.recv()
# # print(msg)
#
# def writer(count, p_input):
# for ii in range(0, count):
# p_input.send(ii)
#
# if __name__=='__main__':
# for count in [10]:
# p_output, p_input = Pipe()
# reader_p = Process(target=reader_proc, args=((p_output, p_input),))
# reader_p.daemon = True
# reader_p.start()
# p_output.close()
#
# _start = time.time()
# writer(count, p_input) # Send a lot of stuff to reader_proc()
# p_input.close()
# reader_p.join()
# print("Sending {0} numbers to Pipe() took {1} seconds".format(count,
# (time.time() - _start)))
import matplotlib.pylab as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn import datasets
diabetes = datasets.load_diabetes()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(diabetes.data, diabetes.target, test_size=0.2, random_state=0)
model = LinearRegression()
# print(X_train)
# print(X_train.shape)
X_train = X_train[:, 0].reshape(-1, 1)
X_test = X_test[:, 0].reshape(-1, 1)
# print(X_train)
# print(X_train.shape)
# print(y_train)
# print(y_train.shape)
# 2. Use fit
model.fit(X_train, y_train)
# 3. Check the score
print(X_test)
print(X_test.shape)
predict = model.predict(X_test)
# print(predict)
# print(predict.shape)
|
[
"danroman17397@gmail.com"
] |
danroman17397@gmail.com
|
1eac67a4a0e78a788d99dc2ae97cc6d4071fe1e9
|
4986c32aa387a0231b4c9de9b32ddd5f62b41931
|
/app/extend.py
|
23cfce9697c46bb1db7366147c027f717059185b
|
[] |
no_license
|
Mrdorom/Pluto
|
08e8e03d0d0a3e265182b13bbe26acaffcf82e0d
|
cb0f6be168e4c182c06573b812fdc29c2dcf3076
|
refs/heads/master
| 2023-04-01T17:52:39.249643
| 2021-04-09T13:08:30
| 2021-04-09T13:08:30
| 349,041,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
"""
-------------------------------------------------
File Name: extend
Description : 本文件主要使用一些扩展
Author : shili
date: 2021/3/11
-------------------------------------------------
Change Activity: 2021/3/11:
-------------------------------------------------
"""
__author__ = 'shili'
from flask_mongoengine import MongoEngine
from flask_jwt_extended import JWTManager
from flask_marshmallow import Marshmallow
mongo = MongoEngine()
jwt = JWTManager()
ma = Marshmallow()
|
[
"shili@yizhoucp.cn"
] |
shili@yizhoucp.cn
|
bec79e0333db1af0c3c99276cfa189282d7dd9c2
|
8e040542e4796b4f70352e0162b7a75b18034b6c
|
/blogapp/myblog/views.py
|
9de86d4f964331945786fea38fc8d7247771b1b4
|
[] |
no_license
|
Atul18341/django-blog-app
|
4e4e688a4e9559227db40b68824e7b69b6cffa80
|
149c1d9952da079d1764c11bd91f53e03930f299
|
refs/heads/main
| 2023-03-28T02:11:01.620590
| 2021-04-01T06:46:11
| 2021-04-01T06:46:11
| 347,375,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,539
|
py
|
from django.shortcuts import render,redirect
from .models import blog
from .forms import NewUserForm
from django.contrib.auth import login,authenticate
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
# Create your views here.
def blogs(request):
blogs=blog.objects.all()
return render(request,"index.html",{'blogs':blogs})
def user_login(request):
if request.method=="POST":
form=AuthenticationForm(request,request.POST)
if form.is_valid():
username=form.cleaned_data.get('username')
password=form.cleaned_data.get('password')
user=authenticate(username=username,password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as:{username}.")
return redirect("Blog")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid usename or password.Correct it")
form=AuthenticationForm()
return render(request,"login.html",{'login_form':form})
def signup(request):
if request.method=="POST":
form=NewUserForm(request.POST)
if form.is_valid():
user=form.save()
login(request, user)
messages.success(request, "Registration Successful.")
return redirect("Login")
messages.error(request, "Unsuccessful registration. Invalid information.")
form=NewUserForm
return render(request,"signup.html",{'register_form':form})
|
[
"atulkumar987613@gmail.com"
] |
atulkumar987613@gmail.com
|
9a83f9b0df96736170c0b233641592d12a80cb24
|
4fcf20223f971875c24c1be921ed86681063070a
|
/taco/migrations/0002_auto_20210227_1527.py
|
6af811ad7aada9f01bea2363f3fe02024d6254d0
|
[] |
no_license
|
victorhausen/taco-api
|
52ee266256d5e128f00b6ac2d045fe51ee1c7aa5
|
0596ba16cb454fef0cb4b56bea5b086b48e1ab98
|
refs/heads/main
| 2023-03-18T11:24:02.600897
| 2021-02-27T15:28:51
| 2021-02-27T15:28:51
| 342,873,953
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,546
|
py
|
# Generated by Django 3.1.7 on 2021-02-27 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taco', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Taco',
fields=[
('id', models.TextField(blank=True, db_column='id', primary_key=True, serialize=False)),
('nomedoalimento', models.TextField(blank=True, db_column='NomedoAlimento', null=True)),
('categoria', models.TextField(blank=True, db_column='Categoria', null=True)),
('umidade_field', models.TextField(blank=True, db_column='Umidade(%)', null=True)),
('energia_kcal_field', models.TextField(blank=True, db_column='Energia(kcal)', null=True)),
('energia_kj_field', models.TextField(blank=True, db_column='Energia(kJ)', null=True)),
('proteina_g_field', models.TextField(blank=True, db_column='Proteina(g)', null=True)),
('lipideos_g_field', models.TextField(blank=True, db_column='Lipideos(g)', null=True)),
('colesterol_mg_field', models.TextField(blank=True, db_column='Colesterol(mg)', null=True)),
('carboidrato_g_field', models.TextField(blank=True, db_column='Carboidrato(g)', null=True)),
('fibraalimentar_g_field', models.TextField(blank=True, db_column='FibraAlimentar(g)', null=True)),
('cinzas_g_field', models.TextField(blank=True, db_column='Cinzas(g)', null=True)),
('calcio_mg_field', models.TextField(blank=True, db_column='Calcio(mg)', null=True)),
('magnesio_mg_field', models.TextField(blank=True, db_column='Magnesio(mg)', null=True)),
('manganes_mg_field', models.TextField(blank=True, db_column='Manganes(mg)', null=True)),
('fosforo_mg_field', models.TextField(blank=True, db_column='Fosforo(mg)', null=True)),
('ferro_mg_field', models.TextField(blank=True, db_column='Ferro(mg)', null=True)),
('sodio_mg_field', models.TextField(blank=True, db_column='Sodio(mg)', null=True)),
('potassio_mg_field', models.TextField(blank=True, db_column='Potassio(mg)', null=True)),
('cobre_mg_field', models.TextField(blank=True, db_column='Cobre(mg)', null=True)),
('zinco_mg_field', models.TextField(blank=True, db_column='Zinco(mg)', null=True)),
('retinol_mg_field', models.TextField(blank=True, db_column='Retinol(mg)', null=True)),
('re_mcg_field', models.TextField(blank=True, db_column='RE(mcg)', null=True)),
('rae_mcg_field', models.TextField(blank=True, db_column='RAE(mcg)', null=True)),
('tiamina_mg_field', models.TextField(blank=True, db_column='Tiamina(mg)', null=True)),
('riboflavina_mg_field', models.TextField(blank=True, db_column='Riboflavina(mg)', null=True)),
('piridoxina_mg_field', models.TextField(blank=True, db_column='Piridoxina(mg)', null=True)),
('niacina_mg_field', models.TextField(blank=True, db_column='Niacina(mg)', null=True)),
('vitaminac_mg_field', models.TextField(blank=True, db_column='VitaminaC(mg)', null=True)),
],
options={
'db_table': 'Taco_4a_edicao_2011',
'managed': False,
},
),
migrations.DeleteModel(
name='Taco4AEdicao2011',
),
]
|
[
"hausen.victor@gmail.com"
] |
hausen.victor@gmail.com
|
d15f3e485d2f5d87da7fe840c9e7b3791feac78f
|
0b6dcd0135f50aa96f49e436e947927442450e19
|
/scan.py
|
1082eae9ce6e4f5639a481e4abab6441f7a2e2c3
|
[] |
no_license
|
Andarko/Micros
|
922dc3dea6f9af22e00509e895c4c6ac985a743a
|
62b342725ae6fd1f8795eade5d1dbcb1b6ae9337
|
refs/heads/master
| 2021-07-01T02:16:04.635700
| 2021-05-05T10:50:36
| 2021-05-05T10:50:36
| 230,379,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90,680
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import asyncio
# import shutil
import subprocess
import time
import os
import websockets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QWidget, QMainWindow, QSizePolicy, QFileDialog, QMessageBox
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout
from PyQt5.QtWidgets import QAction, QInputDialog, QLineEdit, QLabel, QPushButton, QSpinBox, QFormLayout
from PyQt5.QtWidgets import QAbstractSpinBox
from PyQt5.QtWidgets import QScrollArea, QScrollBar
from PyQt5.QtCore import QEvent, Qt, QTimer, QThread, pyqtSignal, QRect
import numpy as np
import cv2
import datetime
import zipfile
from PyQt5 import QtGui
from vassal import Terminal
from threading import Thread
import json
import xml.etree.ElementTree as Xml
from imutils.video import VideoStream
from scan_settings_dialog import SettingsDialog, ProgramSettings
# Класс главного окна
class ScanWindow(QMainWindow):
# Инициализация
def __init__(self, main_window):
super().__init__()
self.test = False
self.test_only_camera = False
self.main_window = main_window
# self.micros_controller = TableController('localhost', 5001)
self.loop = asyncio.get_event_loop()
self.program_settings = ProgramSettings(self.test)
# self.lbl_img = LabelImg()
self.lbl_img = QLabel(self)
self.scroll_area_img = QScrollArea(self)
self.scrollbar_img_hor = QScrollBar(Qt.Horizontal, self)
self.scrollbar_img_vert = QScrollBar(Qt.Vertical, self)
self.dir_for_img = "SavedImg"
self.path_for_xml_file = os.path.join(self.dir_for_img, "settings.xml")
# Перенос параметров с MicrosController
if self.test:
self.test_img_path = os.path.join("TEST", "MotherBoard_3.jpg")
# self.test_img_path = os.path.join("TEST", "MotherBoard_2.jpg")
# self.test_img_path = os.path.join("TEST", "MotherBoard_5.jpg")
self.test_img = cv2.imread(self.test_img_path)[:, :, :]
# self.frame = list()
self.video_img = None
self.video_check = False
if not self.test:
max_video_streams = 5
video_stream_index = -1
# vs = VideoStream(src=video_stream_index).start()
check_next_stream = True
while check_next_stream:
video_stream_index += 1
if video_stream_index > max_video_streams:
time.sleep(1.0)
video_stream_index = 0
# self.video_stream = VideoStream(src=video_stream_index).start()
# self.video_stream = VideoStream(src=video_stream_index, usePiCamera=True,
# resolution=(1920, 1080)).start()
self.video_stream = cv2.VideoCapture(video_stream_index)
self.video_stream.set(3, 1920)
self.video_stream.set(4, 1080)
# noinspection PyBroadException
try:
self.video_check, self.video_img = self.video_stream.read()
if not self.video_check:
continue
# check_frame = img[:, :, :]
check_next_stream = False
except Exception:
# self.video_stream.stop()
check_next_stream = True
else:
self.video_stream = None
self.vidik = VideoStreamThread(self.video_stream, self.video_img, self)
if not self.test:
self.vidik.changePixmap.connect(self.lbl_img.setPixmap)
self.vidik.start()
self.table_controller = TableController(self.loop, self.program_settings, self.vidik, self.test)
# TEST Для удобства тестирования передаю в контроллер стола контроллер камеры
# self.micros_controller = MicrosController(self.program_settings, self.test, self.lbl_img)
# if self.table_controller.test:
# self.table_controller.micros_controller = self.micros_controller
# self.table_controller.program_settings = self.program_settings
# if not self.table_controller.thread_server or not self.table_controller.thread_server.is_alive():
if not self.test and not self.test_only_camera:
self.table_controller.thread_server.start()
time.sleep(2.0)
# self.micros_controller.coord_check()
self.continuous_mode = False
self.closed = False
self.key_shift_pressed = False
# self.keyboard_buttons = {Qt.Key_Up: KeyboardButton(), Qt.Key_Right: KeyboardButton(),
# Qt.Key_Down: KeyboardButton(), Qt.Key_Left: KeyboardButton(),
# Qt.Key_Plus: KeyboardButton(), Qt.Key_Minus: KeyboardButton()}
self.keyboard_buttons = {Qt.Key_W: KeyboardButton(), Qt.Key_D: KeyboardButton(),
Qt.Key_S: KeyboardButton(), Qt.Key_A: KeyboardButton(),
Qt.Key_Plus: KeyboardButton(), Qt.Key_Minus: KeyboardButton()}
# Пока отключу лишний процесс ручного управления temp
# self.thread_continuous = Thread(target=self.continuous_move)
# self.thread_continuous = QThread()
# self.thread_continuous.started.connect(self.continuous_move)
self.timer_continuous = QTimer()
self.timer_continuous.setInterval(1)
self.timer_continuous.timeout.connect(self.continuous_move)
# if not self.test:
# self.thread_continuous.start()
# self.thread_video = Thread(target=self.video_thread)
# self.thread_video.start()
# self.table_controller.steps_in_mm = self.program_settings.table_settings.steps_in_mm
# self.table_controller.limits_mm = self.program_settings.table_settings.limits_mm
# self.table_controller.limits_step = self.program_settings.table_settings.limits_step
# self.table_controller.limits_step = list()
# for limit_mm in self.table_controller.limits_mm:
# self.table_controller.limits_step.append(limit_mm * self.table_controller.steps_in_mm)
# self.pixels_in_mm = self.program_settings.snap_settings.pixels_in_mm
# self.snap_width = self.program_settings.snap_settings.snap_width
# self.snap_height = self.program_settings.snap_settings.snap_height
# self.snap_width_mm = self.snap_width / self.pixels_in_mm
# self.snap_height_mm = self.snap_height / self.pixels_in_mm
# self.work_height = self.program_settings.snap_settings.work_height
#
# # self.micros_controller.frame = self.program_settings.snap_settings.frame
# self.frame_width = self.micros_controller.frame[2] - self.micros_controller.frame[0]
# self.frame_height = self.micros_controller.frame[3] - self.micros_controller.frame[1]
# self.frame_width_mm = self.frame_width / self.pixels_in_mm
# self.frame_height_mm = self.frame_height / self.pixels_in_mm
#
# self.delta_x = int(self.frame_width / 10)
# self.delta_y = int(self.frame_height / 10)
# Наличие несохраненного изображения
self.unsaved = False
if self.test:
print("Внимание! Программа работает в тестовом режиме!")
# Доступные для взаимодействия компоненты формы
self.lbl_coord = QLabel("Текущие координаты:")
self.btn_init = QPushButton("Инициализация")
self.btn_move_work_height = QPushButton("Занять рабочую высоту")
self.btn_move_mid = QPushButton("Двигать в середину")
self.btn_move = QPushButton("Двигать в ...")
self.btn_manual = QPushButton("Ручной режим")
self.edt_border_x1 = QSpinBox()
self.edt_border_y1 = QSpinBox()
self.edt_border_x2 = QSpinBox()
self.edt_border_y2 = QSpinBox()
self.btn_border = QPushButton("Определить границы")
self.btn_scan = QPushButton("Новая съемка")
self.btn_scan_without_borders = QPushButton("Съемка без границ")
self.btn_save_scan = QPushButton("Сохранить съемку")
self.clear_test_data()
self.init_ui()
# Создание элементов формы
def init_ui(self):
# keyboard.add_hotkey("Ctrl + 1", lambda: print("Left"))
# Основное меню
menu_bar = self.menuBar()
# Меню "Станок"
device_menu = menu_bar.addMenu("&Станок")
device_menu_action_init = QAction("&Инициализация", self)
device_menu_action_init.setShortcut("Ctrl+I")
device_menu_action_init.triggered.connect(self.device_init)
device_menu.addAction(device_menu_action_init)
device_menu.addSeparator()
device_menu_action_check = QAction("&Проверка", self)
device_menu_action_check.setShortcut("Ctrl+C")
device_menu_action_check.triggered.connect(self.device_check)
device_menu.addAction(device_menu_action_check)
device_menu.addSeparator()
device_menu_action_move = QAction("&Двигать", self)
device_menu_action_move.setShortcut("Ctrl+M")
device_menu_action_move.triggered.connect(self.device_move)
device_menu.addAction(device_menu_action_move)
device_menu.addSeparator()
device_menu_action_test_circle = QAction("&Круг", self)
device_menu_action_test_circle.triggered.connect(self.test_circle)
device_menu.addAction(device_menu_action_test_circle)
device_menu.addSeparator()
device_menu_action_exit = QAction("&Выйти", self)
device_menu_action_exit.setShortcut("Ctrl+Q")
device_menu_action_exit.setStatusTip("Закрыть приложение")
device_menu_action_exit.triggered.connect(self.close)
device_menu.addAction(device_menu_action_exit)
menu_bar.addMenu(device_menu)
# Меню "Настройки"
services_menu = menu_bar.addMenu("&Сервис")
services_menu_action_settings = QAction("&Настройки", self)
services_menu_action_settings.triggered.connect(self.services_menu_action_settings_click)
services_menu.addAction(services_menu_action_settings)
menu_bar.addMenu(services_menu)
# установка центрального виджета и лайаута
main_widget = QWidget(self)
central_layout = QHBoxLayout()
main_widget.setLayout(central_layout)
self.setCentralWidget(main_widget)
# левый лайаут с изображением
left_layout = QVBoxLayout()
central_layout.addLayout(left_layout)
self.scroll_area_img.setWidget(self.lbl_img)
self.scroll_area_img.setWidgetResizable(True)
# self.scroll_area_img.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# self.scroll_area_img.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollbar_img_hor.setMaximum(self.scroll_area_img.horizontalScrollBar().maximum())
self.scrollbar_img_hor.valueChanged.connect(self.sync_scroll)
self.scrollbar_img_vert.setMaximum(self.scroll_area_img.verticalScrollBar().maximum())
self.scrollbar_img_vert.valueChanged.connect(self.sync_scroll)
self.lbl_img.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.lbl_img.setStyleSheet("border: 1px solid red")
# left_layout.addWidget(self.lbl_img)
left_layout.addWidget(self.scroll_area_img)
# left_layout.addWidget(self.scrollbar_img_hor)
# left_layout.addWidget(self.scrollbar_img_vert)
# правый лайаут с панелью
right_layout = QVBoxLayout()
central_layout.addLayout(right_layout)
right_layout.addWidget(self.lbl_coord)
self.btn_init.clicked.connect(self.device_init)
right_layout.addWidget(self.btn_init)
self.btn_move.clicked.connect(self.device_move)
right_layout.addWidget(self.btn_move)
self.btn_move_work_height.clicked.connect(self.device_move_work_height)
right_layout.addWidget(self.btn_move_work_height)
self.btn_move_mid.clicked.connect(self.device_move_mid)
right_layout.addWidget(self.btn_move_mid)
self.btn_manual.setCheckable(True)
self.btn_manual.setChecked(False)
self.btn_manual.toggled["bool"].connect(self.device_manual)
right_layout.addWidget(self.btn_manual)
# self.edt_border_x1.setLineWrapMode(QTextEdit_LineWrapMode=QTextEdit.NoWrap)
border_layout = QVBoxLayout()
right_layout.addStretch()
right_layout.addLayout(border_layout)
border_layout.addWidget(QLabel("Границы съемки:"))
# border_form_layout = QGridLayout()
border_form_layout = QFormLayout()
# self.edt_border_x1.setWordWrapMode(QtGui.QTextOption.NoWrap)
for edt in [self.edt_border_x1, self.edt_border_y1, self.edt_border_x2, self.edt_border_y2]:
edt.setMaximumHeight(30)
edt.setMinimum(0)
edt.setSuffix(" mm")
edt.setButtonSymbols(QAbstractSpinBox.NoButtons)
edt.setSingleStep(0)
self.edt_border_x1.setMaximum(self.program_settings.table_settings.limits_mm[0])
self.edt_border_x2.setMaximum(self.program_settings.table_settings.limits_mm[0])
self.edt_border_y1.setMaximum(self.program_settings.table_settings.limits_mm[1])
self.edt_border_y2.setMaximum(self.program_settings.table_settings.limits_mm[1])
border_form_layout.addRow(QLabel("x1"), self.edt_border_x1)
border_form_layout.addRow(QLabel("y1"), self.edt_border_y1)
border_form_layout.addRow(QLabel("x2"), self.edt_border_x2)
border_form_layout.addRow(QLabel("y2"), self.edt_border_y2)
border_form_layout.setSpacing(0)
# border_form_layout.setSpacing(2)
# border_form_layout.addWidget(QLabel("x1"), 0, 0)
# border_form_layout.addWidget(self.edt_border_x1, 1, 0)
# border_form_layout.addWidget(QLabel("y1"), 2, 0)
# border_form_layout.addWidget(self.edt_border_y1, 3, 0)
# border_form_layout.addWidget(QLabel("x2"), 0, 1)
# border_form_layout.addWidget(self.edt_border_x2, 1, 1)
# border_form_layout.addWidget(QLabel("y2"), 2, 1)
# border_form_layout.addWidget(self.edt_border_y2, 3, 1)
border_layout.addLayout(border_form_layout)
right_layout.addWidget(self.btn_border)
self.btn_border.clicked.connect(self.border_find)
right_layout.addWidget(self.btn_scan)
self.btn_scan.clicked.connect(self.scan)
right_layout.addWidget(self.btn_scan_without_borders)
self.btn_scan_without_borders.clicked.connect(self.scan_without_borders)
right_layout.addWidget(self.btn_save_scan)
self.btn_save_scan.clicked.connect(self.save_scan)
self.btn_save_scan.setEnabled(False)
self.installEventFilter(self)
self.resize(1280, 720)
self.move(300, 300)
self.setMinimumSize(800, 600)
# self.show()
# print(self.pixels_in_mm)
def sync_scroll(self):
self.scroll_area_img.horizontalScrollBar().setValue(self.scrollbar_img_hor.value())
def __get_pixels_in_mm(self):
return self.program_settings.snap_settings.pixels_in_mm
pixels_in_mm = property(__get_pixels_in_mm)
def __get_snap_width(self):
return self.program_settings.snap_settings.snap_width
snap_width = property(__get_snap_width)
def __get_snap_height(self):
return self.program_settings.snap_settings.snap_height
snap_height = property(__get_snap_height)
def __get_snap_width_mm(self):
return self.snap_width / self.pixels_in_mm[0]
snap_width_mm = property(__get_snap_width_mm)
def __get_snap_height_mm(self):
return self.snap_height / self.pixels_in_mm[1]
snap_height_mm = property(__get_snap_height_mm)
def __get_work_height(self):
return self.program_settings.snap_settings.work_height
work_height = property(__get_work_height)
def __get_delta_x(self):
return int(self.frame_width / 10)
delta_x = property(__get_delta_x)
# def delta_x(self):
# return int(self.frame_width / 10)
def __get_delta_y(self):
return int(self.frame_height / 10)
delta_y = property(__get_delta_y)
# def delta_y(self):
# return int(self.frame_height / 10)
def __get_frame_width(self):
return self.program_settings.snap_settings.frame[2] - self.program_settings.snap_settings.frame[0]
frame_width = property(__get_frame_width)
# def frame_width(self):
# return self.program_settings.snap_settings.frame[2] - self.program_settings.snap_settings.frame[0]
def __get_frame_height(self):
return self.program_settings.snap_settings.frame[3] - self.program_settings.snap_settings.frame[1]
frame_height = property(__get_frame_height)
# def frame_height(self):
# return self.program_settings.snap_settings.frame[3] - self.program_settings.snap_settings.frame[1]
def __get_frame_width_mm(self):
return self.frame_width / self.pixels_in_mm[0]
frame_width_mm = property(__get_frame_width_mm)
def __get_frame_height_mm(self):
return self.frame_height / self.pixels_in_mm[1]
frame_height_mm = property(__get_frame_height_mm)
def __get_frame(self):
return self.program_settings.snap_settings.frame
frame = property(__get_frame)
def snap(self, x1: int, y1: int, x2: int, y2: int, crop=False):
if self.test:
time.sleep(0.05)
# return np.copy(self.test_img[y1:y2, x1:x2, :])
# Переворачиваем координаты съемки
y2_r = 6400 - y1
y1_r = 6400 - y2
return np.copy(self.test_img[y1_r:y2_r, x1:x2, :])
else:
# self.video_timer.stop()
time.sleep(0.15)
# for i in range(10):
# self.video_stream.read()
# Прогревочные съемки
for i in range(10):
self.video_stream.read()
check, img = self.video_stream.read()
self.lbl_img.setPixmap(self.vidik.numpy_to_pixmap(img))
self.lbl_img.repaint()
# img = self.vidik.video_img
# self.video_timer.start()
if crop:
# return np.copy(img[self.frame[3]-1:self.frame[1]:-1, self.frame[2]-1:self.frame[0]:-1, :])
# return np.copy(img[self.frame[1]:self.frame[3], self.frame[0]:self.frame[2], :][::-1, ::-1, :])
return np.copy(img[self.frame[1]:self.frame[3], self.frame[0]:self.frame[2], :])
else:
# return np.copy(img[::-1, ::-1, :])
return np.copy(img)
# Тестовая обертка функции движения, чтобы обходиться без подключенного станка
def coord_move(self, coord, mode="discrete", crop=False):
if not self.test and mode != "continuous":
self.vidik.work = False
self.table_controller.coord_move(coord, mode)
self.setWindowTitle(str(self.table_controller))
if self.table_controller.test or mode != "continuous":
# snap = self.micros_controller.snap(int(self.pixels_in_mm * (self.table_controller.coord_mm[0]
# - self.snap_width_mm_half)),
# int(self.pixels_in_mm * (self.table_controller.coord_mm[1]
# - self.snap_height_mm_half)),
# int(self.pixels_in_mm * (self.table_controller.coord_mm[0]
# + self.snap_width_mm_half)),
# int(self.pixels_in_mm * (self.table_controller.coord_mm[1]
# + self.snap_height_mm_half)),
# crop=crop)
snap = self.snap(int(self.pixels_in_mm[0] * (self.table_controller.coord_mm[0])),
int(self.pixels_in_mm[1] * (self.table_controller.coord_mm[1])),
int(self.pixels_in_mm[0] * (self.table_controller.coord_mm[0] + self.snap_width_mm)),
int(self.pixels_in_mm[1] * (self.table_controller.coord_mm[1] + self.snap_height_mm)),
crop=crop)
if self.test:
self.lbl_img.setPixmap(self.vidik.numpy_to_pixmap(snap))
self.lbl_img.repaint()
return snap
# self.lbl_img.setPixmap(self.vidik.numpy_to_pixmap(snap))
# self.lbl_img.repaint()
self.vidik.work = True
return None
def closeEvent(self, event):
if self.unsaved:
dlg_result = QMessageBox.question(self,
"Confirm Dialog",
"Данные последней съемки не сохранены. Хотите сперва их сохранить?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,
QMessageBox.Yes)
if dlg_result == QMessageBox.Yes:
if not self.save_scan():
event.ignore()
return
elif dlg_result == QMessageBox.Cancel:
event.ignore()
return
self.main_window.show()
time.sleep(0.01)
self.hide()
event.ignore()
self.closed = True
def services_menu_action_settings_click(self):
if self.program_settings.test:
QMessageBox.warning(self, "Warning!",
"Программа работает в тестовом режиме. Настройки не будут сохраняться!",
QMessageBox.Ok, QMessageBox.Ok)
settings_dialog = SettingsDialog(self.program_settings)
settings_dialog.setAttribute(Qt.WA_DeleteOnClose)
dlg_result = settings_dialog.exec()
if dlg_result > 0:
self.table_controller.server_status = 'uninitialized'
# def device_init(self):
# init_thread = Thread(target=self.device_init_in_thread)
# init_thread.start()
# print("init start")
def device_init(self):
self.vidik.work = False
self.control_elements_enabled(False)
self.table_controller.coord_init()
self.setWindowTitle(str(self.table_controller))
self.coord_move(self.table_controller.coord_mm, mode="discrete", crop=True)
self.control_elements_enabled(True)
self.vidik.work = True
def device_check(self):
self.table_controller.coord_check()
self.setWindowTitle(str(self.table_controller))
# def device_move(self):
# move_thread = Thread(target=self.device_move_in_thread)
# move_thread.start()
# print("move start")
def device_move(self):
self.vidik.work = False
self.control_elements_enabled(False)
input_dialog = QInputDialog()
text, ok = input_dialog.getText(self,
"Введите координаты в миллиметрах",
"Координаты:",
QLineEdit.Normal,
"{0:.2f}; {1:.2f}; {2:.2f}".format(self.table_controller.coord_mm[0],
self.table_controller.coord_mm[1],
self.table_controller.coord_mm[2]))
if ok:
coord = [float(item) for item in str.replace(str.replace(text, ',', '.'), ' ', '').split(';')]
if len(coord) == 3:
self.coord_move(coord)
self.setWindowTitle(str(self.table_controller))
self.control_elements_enabled(True)
self.vidik.work = True
def device_move_mid(self):
self.vidik.work = False
self.control_elements_enabled(False)
x = int(self.table_controller.limits_mm[0] / 2)
y = int(self.table_controller.limits_mm[1] / 3)
self.coord_move([x, y, self.table_controller.coord_mm[2]])
self.setWindowTitle(str(self.table_controller))
self.control_elements_enabled(True)
self.vidik.work = True
def device_move_work_height(self):
self.vidik.work = False
self.control_elements_enabled(False)
self.coord_move([self.table_controller.coord_mm[0],
self.table_controller.coord_mm[1],
self.work_height])
self.setWindowTitle(str(self.table_controller))
self.control_elements_enabled(True)
self.vidik.work = True
def device_manual(self, status):
if status:
self.continuous_mode = True
# self.thread_continuous.start()
self.timer_continuous.start()
# print("thread started")
else:
# self.thread_continuous.terminate()
self.timer_continuous.stop()
self.continuous_mode = False
# print("thread joined")
# self.continuous_mode = status
# self.control_elements_enabled(not status)
# print("device manual finished")
def control_elements_enabled(self, status):
self.btn_init.setEnabled(status)
self.btn_move.setEnabled(status)
self.btn_move_mid.setEnabled(status)
self.btn_move_work_height.setEnabled(status)
self.btn_border.setEnabled(status)
self.btn_scan.setEnabled(status)
self.btn_save_scan.setEnabled(status)
self.edt_border_x1.setEnabled(status)
self.edt_border_y1.setEnabled(status)
self.edt_border_x2.setEnabled(status)
self.edt_border_y2.setEnabled(status)
@staticmethod
def save_test_data(data):
f = open('test.txt', 'a')
now = datetime.datetime.now()
f.write(now.strftime("%d.%m.%Y %H:%M:%S") + "<=" + str(data) + '\r\n')
f.close()
@staticmethod
def clear_test_data():
f = open('test.txt', 'w+')
f.seek(0)
f.close()
# Тестовая функция для рисования круга и спирали
def test_circle(self):
self.main_window.show()
self.close()
# self.table_controller.coord_check()
# count = 200
# r = 0.0
# # r = 20
# for i in range(20*count + 1):
# r += 1 / 10
# alfa = (i / count) * 2 * math.pi
# dx = int(r * math.sin(alfa))
# dy = int(r * math.cos(alfa))
# self.coord_move([dx, dy, 0], mode='continuous')
# self.micros_controller.coord_move([self.micros_controller.coord[0] + dx,
# self.micros_controller.coord[1] + dy,
# self.micros_controller.coord[2]])
# for d in range(1, 7):
# print(d)
# d_steps = int(2 ** d)
# for i in range(100):
# self.micros_controller.coord_move([d_steps, 0, 0], mode='continuous')
# for i in range(100):
# self.micros_controller.coord_move([-d_steps, 0, 0], mode='continuous')
# for i in range(100):
# self.micros_controller.coord_move([0, d_steps, 0], mode='continuous')
# for i in range(100):
# self.micros_controller.coord_move([0, -d_steps, 0], mode='continuous')
# Функция идет по границе изделия и записывает пределы для съемки
def border_find(self):
self.vidik.work = False
self.control_elements_enabled(False)
try:
if self.table_controller.server_status == 'uninitialized':
self.table_controller.coord_init()
# Перевод камеры к позиции, где должна располагаться микросхема
x = int(self.table_controller.limits_mm[0] / 2)
y = int(self.table_controller.limits_mm[1] / 3)
if self.test:
y = int(self.table_controller.limits_mm[1] / 2)
snap = self.coord_move([x, y, self.table_controller.coord_mm[2]], mode="discrete", crop=True)
all_x = list()
all_y = list()
all_x.append(x)
all_y.append(y)
# Направления для поиска краев
# direction_sequence = [[1, 0], [0, 1], [-1, 0], [0, -1], [1, 0], [0, 1]]
# previous_direction = None
direction = Direction()
while direction.abs_index < 6:
print(direction)
# Параметр захода за середину для ускорения обхода (от 0 до 4)
forward_over_move = 4
forward_count_total = 0
previous_direction = direction.previous()
# for direction in direction_sequence:
# Берем следующий фрейм до тех пор, пока не выйдем за границу изделтя
self.save_test_data("direction=" + str(direction))
while True:
# При наличии предыдущего направления движения (все, кроме первого направления)
# проверяем, не смещается ли изделие поперек линии поиска
# if previous_direction:
if direction.abs_index > 0:
# Проверяем - не ушли ли мы вовнутрь или наружу объекта
# stuck - это проверка, что мы попали в петлю
stuck = False
correction_list = list()
while not stuck:
correction_count = self.check_object_middle(snap,
previous_direction,
[self.delta_x, self.delta_y])
correction_list.append(correction_count)
if len(correction_list) >= 4:
# Проверка на повторяющиеся коррекции
if correction_list[-1] == correction_list[-3]:
if correction_list[-2] == correction_list[-4]:
if correction_list[-1] * correction_list[-2] < 0:
correction_count //= 2
stuck = True
self.save_test_data("unstuck doubled!")
# На особенно запущенный случай зацикливания
if len(correction_list) >= 12:
stuck = True
self.save_test_data("unstuck loop repeatedly!")
if correction_count == 0:
break
# Проверяем - не вышли ли мы за пределы стола
while True:
x += int(self.delta_x * correction_count * previous_direction[0] / self.pixels_in_mm[0])
y -= int(self.delta_y * correction_count * previous_direction[1] / self.pixels_in_mm[1])
if x < 0 or y < 0 or x > self.table_controller.limits_mm[0] or y > \
self.table_controller.limits_mm[1]:
x = all_x[-1]
y = all_y[-1]
correction_count -= int(abs(correction_count) / correction_count)
if correction_count == 0:
break
else:
break
if correction_count == 0:
break
# x += int(self.delta_x * correction_count * previous_direction[0] / self.pixels_in_mm)
# y -= int(self.delta_y * correction_count * previous_direction[1] / self.pixels_in_mm)
all_x.append(x)
all_y.append(y)
snap = self.coord_move([x, y, self.table_controller.coord_mm[2]], mode="discrete", crop=True)
if correction_count > 0:
self.save_test_data('x = ' + str(x) + '; y = ' + str(y) + ' inside correction')
elif correction_count < 0:
self.save_test_data('x = ' + str(x) + '; y = ' + str(y) + ' outside correction')
forward_count = self.find_border_in_image(snap,
direction,
[self.delta_x, self.delta_y],
forward_over_move)
# Можно идти в направлении поиска границы еще
if forward_count > 0:
# Проверяем - не вышли ли мы за пределы стола
while True:
x += int(self.delta_x * direction[0] * forward_count / self.pixels_in_mm[0])
y -= int(self.delta_y * direction[1] * forward_count / self.pixels_in_mm[1])
if x < 0 or y < 0 or x > self.table_controller.limits_mm[0] or y > \
self.table_controller.limits_mm[1]:
x = all_x[-1]
y = all_y[-1]
forward_count -= 1
if forward_count <= 0:
break
else:
break
if forward_count <= 0:
break
# x += int(self.delta_x * direction[0] * forward_count / self.pixels_in_mm)
# y -= int(self.delta_y * direction[1] * forward_count / self.pixels_in_mm)
all_x.append(x)
all_y.append(y)
forward_count_total += forward_count
snap = self.coord_move([x, y, self.table_controller.coord_mm[2]], mode="discrete", crop=True)
self.save_test_data('x = ' + str(x) + '; y = ' + str(y))
else:
if forward_count_total > forward_over_move:
all_x.pop()
all_y.pop()
x += int(self.delta_x * direction[0] * (-forward_over_move) / self.pixels_in_mm[0])
y -= int(self.delta_y * direction[1] * (-forward_over_move) / self.pixels_in_mm[1])
all_x.append(x)
all_y.append(y)
snap = self.coord_move([x, y, self.table_controller.coord_mm[2]], mode="discrete", crop=True)
self.save_test_data('x = ' + str(x) + '; y = ' + str(y) + ' forward correction')
break
# previous_direction = direction
direction = direction.next()
print("all_x=" + str(all_x))
print("all_y=" + str(all_y))
min_x = min(all_x) + 3 * self.delta_x / self.pixels_in_mm[0]
min_y = min(all_y) + 3 * self.delta_y / self.pixels_in_mm[1]
max_x = max(all_x) - 3 * self.delta_x / self.pixels_in_mm[0]
max_y = max(all_y) - 3 * self.delta_y / self.pixels_in_mm[1]
# self.edt_border_x1.setValue(min(all_x))
# self.edt_border_y1.setValue(min(all_y))
# self.edt_border_x2.setValue(max(all_x))
# self.edt_border_y2.setValue(max(all_y))
self.edt_border_x1.setValue(min_x)
self.edt_border_y1.setValue(min_y)
self.edt_border_x2.setValue(max_x)
self.edt_border_y2.setValue(max_y)
except Exception as e:
raise
QMessageBox.critical(self, "Критическая ошибка", "Произошла ошибка выполнения" + str(e),
QMessageBox.Ok, QMessageBox.Ok)
finally:
self.control_elements_enabled(True)
QMessageBox.information(self, "Info Dialog", "Границы определены", QMessageBox.Ok, QMessageBox.Ok)
self.vidik.work = True
def exp_border_find(self):
pass
# Вспомогательная функция для определения - достигла ли камера границы при поиске в заданном направлении
@staticmethod
def find_border_in_image(img, direction, delta, forward_over_move=0):
index = abs(direction[1])
# Проверяем - не стало ли по направлению движения "чисто" (все линии)
middle = int(img.shape[1 - index] / 2)
if direction[index] > 0:
middle -= 1
coord = [0, 0]
for i in range(5, -4, -1):
coord[index] = middle + i * delta[index] * direction[index]
for j in range(img.shape[index]):
coord[1 - index] = j
for k in range(3):
if img[coord[1]][coord[0]][k] < 128:
return i + forward_over_move
return 0
@staticmethod
# Комбинированный метод, следящий за тем, чтобы граница объекта при поиске находилась в середине изображения
# (в направлении поперек обхода)
# Возвращает число линий, на которое смещена граница объекта от середины изображения
def check_object_middle(img, direction, delta):
index = abs(direction[1])
non_white_limit = int(0.03 * img.shape[index])
middle = int(img.shape[1 - index] / 2)
# По-моему это смещение мида на 1 пиксель - фигня. Ниже сделал перестраховку от выхода за пределы картинки
if direction[index] > 0:
middle -= 1
# Ищем пиксели объекта по цвету
coord = [0, 0]
# for i in range(0, 6):
for i in range(5, -6, -1):
coord[index] = middle + i * delta[index] * direction[index]
if coord[index] >= img.shape[1 - index]:
coord[index] -= img.shape[1 - index] - 1
if coord[index] < 0:
coord[index] = 0
non_white_count = 0
for j in range(img.shape[index]):
coord[1 - index] = j
for k in range(3):
if img[coord[1]][coord[0]][k] < 128:
non_white_count += 1
break
if non_white_count > non_white_limit:
return i
return -5
# Сканирование по указанным координатам
def scan(self):
self.vidik.work = False
if self.unsaved:
dlg_result = QMessageBox.question(self,
"Confirm Dialog",
"Данные последней съемки не сохранены. Хотите сперва их сохранить?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,
QMessageBox.Yes)
if dlg_result == QMessageBox.Yes:
if not self.save_scan():
return
elif dlg_result == QMessageBox.Cancel:
return
try:
coord = [float(self.edt_border_x1.value()), float(self.edt_border_y1.value()),
float(self.edt_border_x2.value()), float(self.edt_border_y2.value())]
except ValueError:
print("Неверный формат данных")
return
if self.table_controller.server_status == 'uninitialized':
self.table_controller.coord_init()
frame_size_mm = [self.frame_width_mm, self.frame_height_mm]
count = [0, 0]
for i in range(2):
# Определяем на сколько мм выступает все поле съемки из целого числа кадров
x_overage = (coord[i + 2] - coord[i]) % frame_size_mm[i]
count[i] = int((coord[i + 2] - coord[i]) // frame_size_mm[i]) + 1
if x_overage > 0:
# А это - сколько надо добавить к полю съемки, чтобы получилось целое число кадров
x_deficit = frame_size_mm[i] - x_overage
count[i] += 1
coord[i] -= x_deficit / 2
coord[i + 2] += x_deficit / 2
# Проверка выхода за пределы стола
if coord[i] < 0:
coord[i + 2] -= coord[i]
coord[i] = 0
if coord[i + 2] > self.table_controller.limits_mm[i]:
coord[i] -= coord[i + 2] - self.table_controller.limits_mm[i]
coord[i + 2] = self.table_controller.limits_mm[i]
# Если изображение никак не хочет влазить в пределы стола, то надо наоборот его уменьшить...
if coord[i] < 0:
coord[i] += x_overage / 2
coord[i + 2] -= x_overage / 2
count[i] -= 1
print("x1={0}; y1={1}; x2={2}; y2={3}".format(coord[0], coord[1], coord[2], coord[3]))
# Работа с директорией для сохранения изображений
# shutil.rmtree(self.dir_for_img)
if not os.path.exists(self.dir_for_img):
os.mkdir(self.dir_for_img)
for file in os.listdir(self.dir_for_img):
os.remove(os.path.join(self.dir_for_img, file))
# Получение и сохранение изображений в директорию
left_dir = abs(self.table_controller.coord_mm[0] - coord[0]) > abs(self.table_controller.coord_mm[0] - coord[2])
# выбираем обход изображения, исходя из того - ближе мы к его верху или низу
j_start = 0
j_finish = count[1]
j_delta = 1
if abs(self.table_controller.coord_mm[1] - coord[1]) > abs(self.table_controller.coord_mm[1] - coord[3]):
j_start = count[1] - 1
j_finish = -1
j_delta = -1
for j in range(j_start, j_finish, j_delta):
y = coord[1] + j * self.frame_height_mm
# В проге просмотра ось y вернута вниз
j_r = count[1] - 1 - j
if left_dir:
x_range = range(count[0] - 1, -1, -1)
else:
x_range = range(0, count[0], 1)
for i in x_range:
x = coord[0] + i * self.frame_width_mm
snap = self.coord_move([x, y, self.table_controller.coord_mm[2]], mode="discrete")
cv2.imwrite(os.path.join(self.dir_for_img, "S_{0}_{1}.jpg".format(j_r + 1, i + 1)), snap[:, :, :])
print('x = ' + str(x) + '; y = ' + str(y))
left_dir = not left_dir
# Создание файла описания XML
root = Xml.Element("Root")
elem_rc = Xml.Element("RowCount")
elem_rc.text = str(count[1])
root.append(elem_rc)
elem_cc = Xml.Element("ColCount")
elem_cc.text = str(count[0])
root.append(elem_cc)
elem_img = Xml.Element("Image")
root.append(elem_img)
img_format = Xml.SubElement(elem_img, "Format")
img_format.text = "jpg"
img_size = Xml.SubElement(elem_img, "ImgSize")
img_size_width = Xml.SubElement(img_size, "Width")
img_size_width.text = str(self.snap_width)
img_size_height = Xml.SubElement(img_size, "Height")
img_size_height.text = str(self.snap_height)
img_con_area = Xml.SubElement(elem_img, "ConnectionArea")
# HERE orientation param need
ica_x = Xml.SubElement(img_con_area, "X")
# ica_x.text = str(self.micros_controller.frame[0])
ica_x.text = str(self.program_settings.snap_settings.frame[0])
ica_y = Xml.SubElement(img_con_area, "Y")
# ica_y.text = str(self.micros_controller.frame[1])
ica_y.text = str(self.program_settings.snap_settings.frame[1])
ica_width = Xml.SubElement(img_con_area, "Width")
# ica_width.text = str(int(self.frame_width_mm * self.pixels_in_mm))
ica_width.text = str(self.program_settings.snap_settings.frame[2]
- self.program_settings.snap_settings.frame[0])
ica_height = Xml.SubElement(img_con_area, "Height")
# ica_height.text = str(int(self.frame_height_mm * self.pixels_in_mm))
ica_height.text = str(self.program_settings.snap_settings.frame[3]
- self.program_settings.snap_settings.frame[1])
tree = Xml.ElementTree(root)
with open(self.path_for_xml_file, "w"):
tree.write(self.path_for_xml_file)
self.btn_save_scan.setEnabled(True)
# QMessageBox.information(self, "Info Dialog", "Сканирование завершено", QMessageBox.Ok, QMessageBox.Ok)
self.unsaved = True
self.vidik.work = True
self.save_scan()
@staticmethod
# функция проверки, что на изображении - ничего нет
def img_is_empty(img, delta):
# Проверяем горизонтальные и вертикальные линии на пустоту
coord = [0, 0]
for index in range(2):
non_white_limit = int(0.03 * img.shape[index])
middle = int(img.shape[1 - index] / 2)
for i in range(-5, 6):
coord[index] = middle + i * delta[index]
if i == 5:
coord[index] -= 1
non_white_count = 0
for j in range(img.shape[index]):
coord[1 - index] = j
for k in range(3):
if img[coord[1]][coord[0]][k] < 128:
non_white_count += 1
break
if non_white_count > non_white_limit:
return False
return True
# Сканирование без указания координат
def scan_without_borders(self):
self.vidik.work = False
files_img_count = 0
if self.unsaved:
dlg_result = QMessageBox.question(self,
"Confirm Dialog",
"Данные последней съемки не сохранены. Хотите сперва их сохранить?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,
QMessageBox.Yes)
if dlg_result == QMessageBox.Yes:
if not self.save_scan():
return
elif dlg_result == QMessageBox.Cancel:
return
if self.table_controller.server_status == 'uninitialized':
self.table_controller.coord_init()
# Перевод камеры к позиции, где должна располагаться микросхема
x = int(self.table_controller.limits_mm[0] / 2)
y = int(self.table_controller.limits_mm[1] / 3)
if self.test:
y = int(self.table_controller.limits_mm[1] / 2)
# x = int(self.table_controller.limits_mm[0] / 3) - 50
z = self.work_height
snap = self.coord_move([x, y, z], mode="discrete", crop=True)
else:
snap = self.coord_move(self.table_controller.coord_mm, mode="discrete", crop=True)
# 1. Ищем изделие - фотаем под камерой, составляем матрицу стола, разбив пространство на кадры
delta = [self.delta_x, self.delta_y]
current_pos_index = []
coordinates_x_mm = []
x_mm = self.table_controller.coord_mm[0] % self.frame_width_mm
current_pos_index.append(int(self.table_controller.coord_mm[0] // self.frame_width_mm))
while x_mm < self.table_controller.limits_mm[0]:
coordinates_x_mm.append(x_mm)
x_mm += self.frame_width_mm
coordinates_y_mm = []
y_mm = self.table_controller.coord_mm[1] % self.frame_height_mm
current_pos_index.append(int(self.table_controller.coord_mm[1] // self.frame_height_mm))
while y_mm < self.table_controller.limits_mm[1]:
coordinates_y_mm.append(y_mm)
y_mm += self.frame_height_mm
img_file_matrix = []
img_obj_matrix = []
for i in range(len(coordinates_x_mm)):
new_file_x = []
new_obj_x = []
for j in range(len(coordinates_y_mm)):
new_file_x.append('')
new_obj_x.append(False)
img_file_matrix.append(new_file_x)
img_obj_matrix.append(new_obj_x)
# Если кадр пуст, то идем несколько шагов вверх до непустого кадра
# Число кадров поиска от стартовой позиции
check_range = 3
img_empty = self.img_is_empty(snap, delta)
# Если не найдено изделие, то идем вниз от центра, потом ищем справа и слева
offset = [0, 0]
index = 1
if (current_pos_index[0] - check_range >= 0 and current_pos_index[1] - check_range >= 0
and current_pos_index[0] + check_range < len(coordinates_x_mm)
and current_pos_index[1] + check_range < len(coordinates_y_mm)):
while img_empty:
if offset[index] < check_range:
offset[index] += 1
else:
if offset[1 - index] == 0:
offset[1 - index] -= check_range
else:
offset[index] -= check_range
index = 1 - index
if index == 1 and offset[0] == 0 and offset[1] == 0:
break
x = coordinates_x_mm[current_pos_index[0] + offset[0]]
y = coordinates_y_mm[current_pos_index[1] + offset[1]]
z = self.table_controller.coord_mm[2]
snap = self.coord_move([x, y, z], mode="discrete", crop=True)
if not self.img_is_empty(snap, delta):
img_empty = False
self.save_test_data("img_empty=" + str(img_empty))
self.save_test_data("offset=" + str(offset))
if img_empty:
QMessageBox.warning(self, "Внимание!", "Изделие не найдено!", QMessageBox.Ok, QMessageBox.Ok)
return
current_pos_index[0] += offset[0]
current_pos_index[1] += offset[1]
if not os.path.exists(self.dir_for_img):
os.mkdir(self.dir_for_img)
for file in os.listdir(self.dir_for_img):
os.remove(os.path.join(self.dir_for_img, file))
file_name = os.path.join(self.dir_for_img, "scan_{0}.jpg".format(files_img_count))
cv2.imwrite(file_name, snap)
# self.save_test_data("file={0}, x={1}, y={2}".format(file_name, current_pos_index[0], current_pos_index[1]))
img_file_matrix[current_pos_index[0]][current_pos_index[1]] = file_name
img_obj_matrix[current_pos_index[0]][current_pos_index[1]] = True
files_img_count += 1
# 2. Как только найден первый кадр с изделием - идем вверх и фотаем до получения пустого кадра
# Введем еще предельные значения индексов съемки - все-таки мы снимаем прямоугольную область
# snap_area_rect = QRect(current_pos_index[0], current_pos_index[1], 1, 1)
snap_area_limits_x = [current_pos_index[0], current_pos_index[0]]
snap_area_limits_y = [current_pos_index[1], current_pos_index[1]]
# direction_y определяет направление съемки вверх или вниз по y, direction_x - по x: -1 или 1
direction_y = -1
start_x_pos_index = current_pos_index[0]
empty_column = False
# Цикл направлений по x
for direction_x in [-1, 1]:
self.save_test_data("direction_x={0}".format(direction_x))
dir_index_x = direction_x - int((direction_x - 1) / 2)
current_pos_index[0] = start_x_pos_index + dir_index_x
# Цикл шагов по x
while (dir_index_x == 0 and current_pos_index[0] >= 0) \
or (dir_index_x == 1 and current_pos_index[0] <= len(coordinates_x_mm) - 1):
empty_column = True
# Цикл направлений по y
for direction_y in [direction_y, -direction_y]:
# Просто преобразую так -1 в 0, а 1 в 1
self.save_test_data("direction_y={0}".format(direction_y))
dir_index_y = direction_y - int((direction_y - 1) / 2)
# current_pos_index[1] = snap_area_limits_y[dir_index_y]
# Цикл шагов по y
while (dir_index_y == 0 and current_pos_index[1] >= 0) \
or (dir_index_y == 1 and current_pos_index[1] <= len(coordinates_y_mm) - 1):
if not img_file_matrix[current_pos_index[0]][current_pos_index[1]]:
snap = self.coord_move([coordinates_x_mm[current_pos_index[0]],
coordinates_y_mm[current_pos_index[1]],
self.table_controller.coord_mm[2]],
mode="discrete", crop=True)
file_name = os.path.join(self.dir_for_img, "scan_{0}.jpg".format(files_img_count))
cv2.imwrite(file_name, snap)
# self.save_test_data(
# "file={0}, x={1}, y={2}".format(file_name, current_pos_index[0], current_pos_index[1]))
img_file_matrix[current_pos_index[0]][current_pos_index[1]] = file_name
files_img_count += 1
img_is_empty = self.img_is_empty(snap, delta)
self.save_test_data("snap: x={0}, y={1}. empty={2}"
.format(current_pos_index[0], current_pos_index[1], img_is_empty))
else:
img_is_empty = not img_obj_matrix[current_pos_index[0]][current_pos_index[1]]
self.save_test_data("move: x={0}, y={1}. empty={2}"
.format(current_pos_index[0], current_pos_index[1], img_is_empty))
if img_is_empty:
if snap_area_limits_y[dir_index_y] + direction_y == current_pos_index[1]:
break
else:
if current_pos_index[1] * direction_y > snap_area_limits_y[dir_index_y] * direction_y:
snap_area_limits_y[dir_index_y] = current_pos_index[1]
self.save_test_data("limit Y[{0}]={1}".format(dir_index_y, snap_area_limits_y[dir_index_y]))
img_obj_matrix[current_pos_index[0]][current_pos_index[1]] = True
empty_column = False
current_pos_index[1] += direction_y
if empty_column:
# current_pos_index[0] -= direction_x
break
else:
if current_pos_index[0] * direction_x > snap_area_limits_x[dir_index_x] * direction_x:
snap_area_limits_x[dir_index_x] = current_pos_index[0]
self.save_test_data("limit X[{0}]={1}".format(dir_index_x, snap_area_limits_x[dir_index_x]))
current_pos_index[0] += direction_x
# Тут блок "добивания" картинок, которые не засняли в предыдущем блоке из-за недооценки габаритов
while True:
count_missed = 0
closest_cell = [0, 0]
closest_dist = 1000000
for i in range(snap_area_limits_x[0], snap_area_limits_x[1] + 1):
for j in range(snap_area_limits_y[0], snap_area_limits_y[1] + 1):
if not img_file_matrix[i][j]:
count_missed += 1
dist = (abs(i - current_pos_index[0]) * self.frame_width_mm
+ abs(j - current_pos_index[1]) * self.frame_height_mm)
if dist < closest_dist:
closest_dist = dist
closest_cell = [i, j]
if count_missed == 0:
break
else:
delta_x = 0
delta_y = 0
if closest_cell[1] > snap_area_limits_y[0] \
and not img_file_matrix[closest_cell[0]][closest_cell[1] - 1]:
delta_y = -1
elif closest_cell[1] < snap_area_limits_y[1] \
and not img_file_matrix[closest_cell[0]][closest_cell[1] + 1]:
delta_y = +1
elif closest_cell[0] > snap_area_limits_x[0] \
and not img_file_matrix[closest_cell[0] - 1][closest_cell[1]]:
delta_x = -1
elif closest_cell[0] < snap_area_limits_x[1] \
and not img_file_matrix[closest_cell[0] + 1][closest_cell[1]]:
delta_x = +1
while True:
current_pos_index[0] = closest_cell[0]
current_pos_index[1] = closest_cell[1]
snap = self.coord_move([coordinates_x_mm[current_pos_index[0]],
coordinates_y_mm[current_pos_index[1]],
self.table_controller.coord_mm[2]],
mode="discrete", crop=True)
file_name = os.path.join(self.dir_for_img, "scan_{0}.jpg".format(files_img_count))
cv2.imwrite(file_name, snap)
# self.save_test_data(
# "file={0}, x={1}, y={2}".format(file_name, current_pos_index[0], current_pos_index[1]))
img_file_matrix[current_pos_index[0]][current_pos_index[1]] = file_name
files_img_count += 1
img_is_empty = self.img_is_empty(snap, delta)
self.save_test_data("snap+: x={0}, y={1}. empty={2}"
.format(current_pos_index[0], current_pos_index[1], img_is_empty))
closest_cell[0] += delta_x
closest_cell[1] += delta_y
if closest_cell[0] < snap_area_limits_x[0] or closest_cell[0] > snap_area_limits_x[1] \
or closest_cell[1] < snap_area_limits_y[0] or closest_cell[1] > snap_area_limits_y[1]:
break
if img_file_matrix[closest_cell[0]][closest_cell[1]]:
break
# Теперь надо переименовать нужные файлы и удалить все лишние
for i in range(snap_area_limits_x[0], snap_area_limits_x[1] + 1):
for j in range(snap_area_limits_y[0], snap_area_limits_y[1] + 1):
j_r = snap_area_limits_y[1] - j
os.rename(img_file_matrix[i][j], os.path.join(self.dir_for_img,
"S_{0}_{1}.jpg".format(j_r + 1,
i - snap_area_limits_x[0] + 1)))
if not os.path.exists(self.dir_for_img):
os.mkdir(self.dir_for_img)
for file in os.listdir(self.dir_for_img):
if file.find('scan') == 0:
os.remove(os.path.join(self.dir_for_img, file))
# cv2.imwrite(os.path.join(self.dir_for_img, "S_{0}_{1}.jpg".format(j_r + 1, i + 1)), snap[:, :, :])
# print('x = ' + str(x) + '; y = ' + str(y))
# Создание файла описания XML
root = Xml.Element("Root")
elem_rc = Xml.Element("RowCount")
elem_rc.text = str(snap_area_limits_y[1] - snap_area_limits_y[0] + 1)
root.append(elem_rc)
elem_cc = Xml.Element("ColCount")
elem_cc.text = str(snap_area_limits_x[1] - snap_area_limits_x[0] + 1)
root.append(elem_cc)
elem_img = Xml.Element("Image")
root.append(elem_img)
img_format = Xml.SubElement(elem_img, "Format")
img_format.text = "jpg"
img_size = Xml.SubElement(elem_img, "ImgSize")
img_size_width = Xml.SubElement(img_size, "Width")
# img_size_width.text = str(self.snap_width)
img_size_width.text = str(self.program_settings.snap_settings.frame[2]
- self.program_settings.snap_settings.frame[0])
img_size_height = Xml.SubElement(img_size, "Height")
# img_size_height.text = str(self.snap_height)
img_size_height.text = str(self.program_settings.snap_settings.frame[3]
- self.program_settings.snap_settings.frame[1])
img_con_area = Xml.SubElement(elem_img, "ConnectionArea")
# HERE orientation param need
ica_x = Xml.SubElement(img_con_area, "X")
# ica_x.text = str(self.program_settings.snap_settings.frame[0])
ica_x.text = "0"
ica_y = Xml.SubElement(img_con_area, "Y")
# ica_y.text = str(self.program_settings.snap_settings.frame[1])
ica_y.text = "0"
ica_width = Xml.SubElement(img_con_area, "Width")
ica_width.text = str(self.program_settings.snap_settings.frame[2]
- self.program_settings.snap_settings.frame[0])
ica_height = Xml.SubElement(img_con_area, "Height")
ica_height.text = str(self.program_settings.snap_settings.frame[3]
- self.program_settings.snap_settings.frame[1])
tree = Xml.ElementTree(root)
with open(self.path_for_xml_file, "w"):
tree.write(self.path_for_xml_file)
self.btn_save_scan.setEnabled(True)
# QMessageBox.information(self, "Info Dialog", "Сканирование завершено", QMessageBox.Ok, QMessageBox.Ok)
self.unsaved = True
self.vidik.work = True
self.save_scan()
# Сохранение изображений в архивный файл
def save_scan(self):
if not os.path.exists(self.path_for_xml_file):
return False
file_filter = "Microscope scans (*.misc)"
a = QFileDialog.getSaveFileName(self, "Выберите место сохранения файла", "/",
"All files (*.*);;Microscope scans (*.misc)", file_filter)
file_name = a[0]
if len(file_name) > 0:
ext = os.path.splitext(file_name)
if ext[1] == ".misc":
file_name = file_name
else:
file_name = ext[0] + ".misc"
if os.path.exists(file_name):
dlg_result = QMessageBox.question(self, "Confirm Dialog",
"Файл уже существует. " +
"Хотите его перезаписать?" +
" Это удалит данные в нем",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if dlg_result == QMessageBox.No:
return False
else:
return False
self.vidik.work = False
z = zipfile.ZipFile(file_name, 'w')
for root, dirs, files in os.walk(self.dir_for_img):
for file in files:
if file:
z.write(os.path.join(self.dir_for_img, file), file, compress_type=zipfile.ZIP_DEFLATED)
z.close()
QMessageBox.information(self, "Info Dialog", "Файл сохранен", QMessageBox.Ok, QMessageBox.Ok)
self.unsaved = False
self.main_window.open_file(file_name)
self.vidik.work = True
return True
# Обработчики событий формы и ее компонентов
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress:
# print("Press " + str(event.key()))
if event.key() == Qt.Key_Shift:
self.key_shift_pressed = True
elif event.key() in self.keyboard_buttons:
self.keyboard_buttons[event.key()].key_press()
elif event.type() == QEvent.KeyRelease:
# print("Release " + str(event.key()))
if event.key() in self.keyboard_buttons:
self.keyboard_buttons[event.key()].key_release()
elif event.key() == Qt.Key_Shift:
self.key_shift_pressed = False
return QMainWindow.eventFilter(self, obj, event)
# def keyPressEvent(self, event):
# print(event.key())
def continuous_move(self):
# print("continuous start")
# while True:
# if self.continuous_mode:
# someone_clicked = False
steps_count = 24
if self.key_shift_pressed:
steps_count = 8
if self.keyboard_buttons[Qt.Key_W].check_click():
self.coord_move([0, steps_count, 0], mode="continuous")
# someone_clicked = True
if self.keyboard_buttons[Qt.Key_D].check_click():
self.coord_move([steps_count, 0, 0], mode="continuous")
# someone_clicked = True
if self.keyboard_buttons[Qt.Key_S].check_click():
self.coord_move([0, -steps_count, 0], mode="continuous")
# someone_clicked = True
if self.keyboard_buttons[Qt.Key_A].check_click():
self.coord_move([-steps_count, 0, 0], mode="continuous")
# someone_clicked = True
if self.keyboard_buttons[Qt.Key_Plus].check_click():
self.coord_move([0, 0, steps_count], mode="continuous")
# someone_clicked = True
if self.keyboard_buttons[Qt.Key_Minus].check_click():
self.coord_move([0, 0, -steps_count], mode="continuous")
# someone_clicked = True
# if someone_clicked:
time.sleep(0.01)
# i += 1
# print("continuous " + str(i))
# else:
# time.sleep(1)
# print("continuous finish")
# def video_thread(self):
# while True:
# self.micros_controller.video_check, self.micros_controller.video_img \
# = self.micros_controller.video_stream.read()
# self.lbl_img.setPixmap(self.micros_controller.numpy_to_pixmap(self.micros_controller.video_img))
# self.lbl_img.repaint()
# class LabelImg(QLabel):
# def __init__(self):
# super().__init__()
# self.can_set = True
#
# def setPixmapMy(self, a0: QtGui.QPixmap) -> None:
# if self.can_set:
# self.setPixmap(a0)
# Класс направления - умеет выдавать следующее и предыдущее направление
class Direction:
def __init__(self, index=0, direction=None):
self.__abs_index = index
if not direction:
self.__direction = [1, 0]
else:
self.__direction = direction
def __getitem__(self, key):
return self.__direction[key]
def __repr__(self):
return str(self.__abs_index) + str(self.__direction)
def __get_abs_index(self):
return self.__abs_index
abs_index = property(__get_abs_index)
# @property
# def abs_index(self):
# return self.__abs_index
def previous(self):
return Direction(self.__abs_index - 1, [self.__direction[1], -self.__direction[0]])
def next(self):
return Direction(self.__abs_index + 1, [-self.__direction[1], self.__direction[0]])
# Класс-помощник для отслеживания ручного управления установкой клавишами
class KeyboardButton:
def __init__(self):
# Считается ли, что сейчас кнопка нажата (и необходимо выполнять движение установки)
self.clicked = False
# Последний полученный сигнал от кнопки был release? Если нет, то последний сигнал был press
self.released = True
# Время получения последнего сигнала release
self.time_released = 0.0
# Получен сигнал нажатия
def key_press(self):
self.clicked = True
self.released = False
# print(self.clicked)
# Получен сигнал отпуска
def key_release(self):
self.released = True
self.time_released = time.time()
# print(self.clicked)
# Проверка - нажата ли кнопка и обработка таймера
def check_click(self):
if self.clicked:
if self.released:
#
if time.time() - self.time_released > 0.02:
self.clicked = False
else:
# Слишком длительное отсутствие сигнала press воспринимается, как необходимость остановки
if time.time() - self.time_released > 1.00:
self.clicked = False
# print(self.clicked)
return self.clicked
class TableServerThread(QThread):
def __init__(self, hostname, parent=None):
self.hostname = hostname
self.work = True
self.stopped = False
QThread.__init__(self, parent=parent)
def run(self) -> None:
shell = Terminal(["ssh pi@" + self.hostname, "python3 server.py", ])
shell.run()
while self.work:
time.sleep(1)
shell = None
self.stopped = True
# subprocess.run(["ssh", "-tt", "pi@" + self.hostname])
# subprocess.run(["python3", "server.py"])
class VideoStreamThread(QThread):
changePixmap = pyqtSignal(QPixmap)
def __init__(self, video_stream, video_img, parent=None):
self.video_stream = video_stream
self.video_img = video_img
self.work = True
QThread.__init__(self, parent=parent)
def run(self):
while True:
if self.work:
ret, self.video_img = self.video_stream.read()
if ret:
self.changePixmap.emit(self.numpy_to_pixmap(self.video_img))
time.sleep(0.02)
# self.lbl.repaint()
else:
time.sleep(0.1)
@staticmethod
def numpy_to_q_image(image):
q_img = QImage()
if image.dtype == np.uint8:
if len(image.shape) == 2:
channels = 1
height, width = image.shape
bytes_per_line = channels * width
q_img = QImage(
image.data, width, height, bytes_per_line, QImage.Format_Indexed8
)
q_img.setColorTable([QtGui.qRgb(i, i, i) for i in range(256)])
elif len(image.shape) == 3:
if image.shape[2] == 3:
height, width, channels = image.shape
bytes_per_line = channels * width
q_img = QImage(
# image.data, width, height, bytes_per_line, QImage.Format_RGB888
image.data, width, height, bytes_per_line, QImage.Format_BGR888
)
elif image.shape[2] == 4:
height, width, channels = image.shape
bytes_per_line = channels * width
# fmt = QImage.Format_ARGB32
q_img = QImage(
# image.data, width, height, bytes_per_line, QImage.Format_ARGB32
image.data, width, height, bytes_per_line, QImage.Format_BGR888
)
return q_img
@staticmethod
def numpy_to_pixmap(img):
q_img = VideoStreamThread.numpy_to_q_image(img)
pixmap = QPixmap.fromImage(q_img)
return pixmap
# Класс управления микроскопом (пока тестовая подделка)
# class MicrosController:
# def __init__(self, program_settings: ProgramSettings, test: bool, lbl_img: QLabel):
# # vst = VideoStreamThread(self)
# # vst.start()
#
# if test:
# self.test_img_path = "/home/andrey/Projects/MicrosController/TEST/MotherBoard_3.jpg"
# # self.test_img_path = "/home/andrey/Projects/MicrosController/TEST/MotherBoard_2.jpg"
# # self.test_img_path = "/home/andrey/Projects/MicrosController/TEST/MotherBoard_5.jpg"
# self.test_img = cv2.imread(self.test_img_path)[:, :, :]
# self.test = test
# # self.frame = list()
# self.program_settings: ProgramSettings = program_settings
# self.video_img = None
# self.video_check = False
# self.lbl_img = lbl_img
#
# if not self.test:
# max_video_streams = 6
# video_stream_index = -1
# # vs = VideoStream(src=video_stream_index).start()
# check_next_stream = True
# while check_next_stream:
# video_stream_index += 1
# if video_stream_index > max_video_streams:
# time.sleep(1.0)
# video_stream_index = 0
#
# # self.video_stream = VideoStream(src=video_stream_index).start()
# # self.video_stream = VideoStream(src=video_stream_index, usePiCamera=True,
# # resolution=(1920, 1080)).start()
# self.video_stream = cv2.VideoCapture(video_stream_index)
# self.video_stream.set(3, 1920)
# self.video_stream.set(4, 1080)
#
# # noinspection PyBroadException
# try:
# self.video_check, self.video_img = self.video_stream.read()
# if not self.video_check:
# continue
# # check_frame = img[:, :, :]
# check_next_stream = False
# except Exception:
# # self.video_stream.stop()
# check_next_stream = True
# self.video_fps = 60
# self.video_timer = QTimer()
# self.video_timer.timeout.connect(self.next_video_frame)
# self.video_timer.start(1000. / self.video_fps)
#
# def next_video_frame(self):
# self.video_check, self.video_img = self.video_stream.read()
# self.lbl_img.setPixmap(self.numpy_to_pixmap(self.video_img))
# def __get_frame(self):
# return self.program_settings.snap_settings.frame
# frame = property(__get_frame)
# @staticmethod
# def numpy_to_q_image(image):
# q_img = QImage()
# if image.dtype == np.uint8:
# if len(image.shape) == 2:
# channels = 1
# height, width = image.shape
# bytes_per_line = channels * width
# q_img = QImage(
# image.data, width, height, bytes_per_line, QImage.Format_Indexed8
# )
# q_img.setColorTable([QtGui.qRgb(i, i, i) for i in range(256)])
# elif len(image.shape) == 3:
# if image.shape[2] == 3:
# height, width, channels = image.shape
# bytes_per_line = channels * width
# q_img = QImage(
# # image.data, width, height, bytes_per_line, QImage.Format_RGB888
# image.data, width, height, bytes_per_line, QImage.Format_BGR888
# )
# elif image.shape[2] == 4:
# height, width, channels = image.shape
# bytes_per_line = channels * width
# # fmt = QImage.Format_ARGB32
# q_img = QImage(
# # image.data, width, height, bytes_per_line, QImage.Format_ARGB32
# image.data, width, height, bytes_per_line, QImage.Format_BGR888
# )
# return q_img
#
# def numpy_to_pixmap(self, img):
# q_img = self.numpy_to_q_image(img)
# pixmap = QPixmap.fromImage(q_img)
# return pixmap
# def snap(self, x1: int, y1: int, x2: int, y2: int, crop=False):
# if self.test:
# time.sleep(0.3)
# # return np.copy(self.test_img[y1:y2, x1:x2, :])
# # Переворачиваем координаты съемки
# y2_r = 6400 - y1
# y1_r = 6400 - y2
# return np.copy(self.test_img[y1_r:y2_r, x1:x2, :])
# else:
# self.video_timer.stop()
# time.sleep(0.1)
# # for i in range(10):
# # self.video_stream.read()
# # Прогревочные съемки
# for i in range(10):
# self.video_stream.read()
# check, img = self.video_stream.read()
# self.video_timer.start()
# if crop:
# # return np.copy(img[self.frame[3]-1:self.frame[1]:-1, self.frame[2]-1:self.frame[0]:-1, :])
# # return np.copy(img[self.frame[1]:self.frame[3], self.frame[0]:self.frame[2], :][::-1, ::-1, :])
# return np.copy(img[self.frame[1]:self.frame[3], self.frame[0]:self.frame[2], :])
# else:
# # return np.copy(img[::-1, ::-1, :])
# return np.copy(img)
# Класс, который общается с контроллером станка
# 1. Проверяет наличие сервера
# 2. Запускает сервер на Raspberry pi
# 3. Управляет движениями станка
class TableController:
def __init__(self, loop, program_settings: ProgramSettings, vidik: VideoStreamThread, test=False,
hostname="192.168.42.100", port=8080):
self.program_settings = program_settings
self.vidik = vidik
# Параметры подключения к серверу raspberry pi
self.hostname = hostname
self.port = port
# Текущий статус севрера
self.server_status = 'uninitialized'
# Текущий статус станка: работает или нет
self.operation_status = ''
self.coord_step = [-1, -1, -1]
self.coord_mm = [-1.0, -1.0, -1.0]
self.manual_mode = True
self.manual_left_count = 0
self.manual_right_count = 0
self.loop = loop
self.execute = False
# self.thread_server = Thread(target=self.server_start)
self.thread_server = TableServerThread(self.hostname)
# self.thread_server = QThread()
# self.thread_server.started.connect(self.server_start)
# self.steps_in_mm = 80
# self.limits_step = []
# self.limits_mm = []
# self.steps_in_mm = 80
# self.limits_step = (340 * self.steps_in_mm, 640 * self.steps_in_mm, 70 * self.steps_in_mm)
# Режим тестирования - без работы с установкой
self.test = test
# self.micros_controller: MicrosController = None
# self.programSettings: ProgramSettings = None
def __repr__(self):
# return "coord = " + str(self.coord_mm) + "; server status = " + self.server_status \
# + "; last op status = " + self.operation_status
return "coord = [{0:.2f}, {1:.2f}, {2:.2f}]; server status = {3}; last op status = {4}".format(
self.coord_mm[0], self.coord_mm[1], self.coord_mm[2], self.server_status, self.operation_status
)
def __get_steps_in_mm(self):
return self.program_settings.table_settings.steps_in_mm
steps_in_mm = property(__get_steps_in_mm)
def __get_limits_step(self):
return self.program_settings.table_settings.limits_step
limits_step = property(__get_limits_step)
def __get_limits_mm(self):
return self.program_settings.table_settings.limits_mm
limits_mm = property(__get_limits_mm)
async def consumer(self):
url = f"ws://{self.hostname}:{self.port}"
async with websockets.connect(url) as web_socket:
await self.hello(web_socket)
@staticmethod
async def hello(web_socket) -> None:
async for message in web_socket:
print(message)
@staticmethod
async def produce(message: str, host: str, port: int) -> None:
async with websockets.connect(f"ws://{host}:{port}")as ws:
await ws.send(message)
result = await ws.recv()
return result
def get_request(self, x_step: int, y_step: int, z_step: int, mode: str):
self.execute = True
# self.vidik.work = False
data = {
"x": -x_step,
"y": y_step,
"z": z_step,
"mode": mode # continuous/discrete/init/check
}
data_string = json.dumps(data)
return data_string
def result_unpack(self, result):
result_str = json.loads(result)
# Переворот по оси Х
self.coord_step = [self.limits_step[0] - result_str['x'], result_str['y'], result_str['z']]
self.coord_mm = [(self.coord_step[0] / self.steps_in_mm),
(self.coord_step[1] / self.steps_in_mm),
(self.coord_step[2] / self.steps_in_mm)]
self.operation_status = result_str['status']
self.server_status = result_str['status']
self.execute = False
# self.vidik.work = True
# def coord_init(self):
# init_thread = Thread(target=self.coord_init_in_thread)
# init_thread.start()
def coord_init(self):
if not self.test:
data = self.get_request(x_step=0, y_step=0, z_step=0, mode="init")
result = self.loop.run_until_complete(self.produce(message=data, host=self.hostname, port=self.port))
self.result_unpack(result)
else:
self.coord_step = [self.limits_step[0], 0, 0]
self.coord_mm = [self.limits_mm[0], 0, 0]
self.operation_status = 'init'
self.server_status = 'init'
def coord_check(self):
if not self.test:
# loop = asyncio.get_event_loop()
data = self.get_request(x_step=0, y_step=0, z_step=0, mode="check")
result = self.loop.run_until_complete(self.produce(message=data, host=self.hostname, port=self.port))
self.result_unpack(result)
# Команда движения установки
def coord_move(self, coord, mode="discrete"):
if not self.test:
if min(self.coord_step) < 0:
return
# В режиме точечного перемещения надо передавать миллиметры
if mode == "discrete":
dx = coord[0] * self.steps_in_mm - self.coord_step[0]
dy = coord[1] * self.steps_in_mm - self.coord_step[1]
dz = coord[2] * self.steps_in_mm - self.coord_step[2]
# В режиме непрерывного перемещения надо передавать шаги
else:
# if mode == "continuous"
dx = coord[0]
dy = coord[1]
dz = coord[2]
# loop = asyncio.get_event_loop()
data = self.get_request(x_step=int(dx), y_step=int(dy), z_step=int(dz), mode=mode)
result = self.loop.run_until_complete(self.produce(message=data, host=self.hostname, port=self.port))
self.result_unpack(result)
else:
if mode == "discrete":
self.coord_mm[0] = coord[0]
self.coord_mm[1] = coord[1]
self.coord_mm[2] = coord[2]
self.coord_step[0] = int(self.coord_mm[0] * self.steps_in_mm)
self.coord_step[1] = int(self.coord_mm[1] * self.steps_in_mm)
self.coord_step[2] = int(self.coord_mm[2] * self.steps_in_mm)
else:
# if mode == "continuous"
coord[0] = -coord[0]
for i in range(3):
self.coord_step[i] += coord[i]
if self.coord_step[i] < 0:
self.coord_step[i] = 0
if self.coord_step[i] > self.limits_step[i]:
self.coord_step[i] = self.limits_step[i]
self.coord_mm[i] = self.coord_step[i] / self.steps_in_mm
# snap = self.micros_controller.snap(self.programSettings.pixels_in_mm * (self.coord_mm[0] -
# self.programSettings.snap_width_half),
# self.programSettings.pixels_in_mm * (self.coord_mm[1] -
# self.programSettings.snap_height_half),
# self.programSettings.pixels_in_mm * (self.coord_mm[0] +
# self.programSettings.snap_width_half),
# self.programSettings.pixels_in_mm * (self.coord_mm[1] +
# self.programSettings.snap_height_half))
# self.lbl_img.setPixmap(self.micros_controller.numpy_to_pixmap(snap))
# self.lbl_img.repaint()
self.operation_status = 'init'
self.server_status = 'init'
def server_check(self):
pass
def server_start(self):
# os.system("python3 /home/andrey/Projects/MicrosController/ServerExamples/server.py")
# shell = Terminal(["python3 /home/andrey/Projects/MicrosController/ServerExamples/server.py"])
shell = Terminal(["ssh pi@" + self.hostname, "python3 server.py", ])
shell.run()
def server_connect(self):
pass
# def init(self):
# return self.send_json_request("init request")
# # функция отправки json для управления станком
# @staticmethod
# def send_json_request(json_request):
# answer = "ok"
# return answer
# Press the green button in the gutter to run the script.
# if __name__ == '__main__':
# app = QApplication(sys.argv)
# ex = ScanWindow()
# sys.exit(app.exec_())
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"Andarko@bk.ru"
] |
Andarko@bk.ru
|
bd889e11569d36e3109b85c5a0a51fcde69bafc1
|
14a853584c0c1c703ffd8176889395e51c25f428
|
/sem1/csa/project-euler/1/1.py
|
2781f342cd824654222ed7b2a8bc9e4e36f07637
|
[] |
no_license
|
harababurel/homework
|
d0128f76adddbb29ac3d805c235cdedc9af0de71
|
16919f3b144de2d170cd6683d54b54bb95c82df9
|
refs/heads/master
| 2020-05-21T12:25:29.248857
| 2018-06-03T12:04:45
| 2018-06-03T12:04:45
| 43,573,199
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
print(sum([x for x in range(1, 100000001) if x % 3 == 0 or x % 5 == 0]))
|
[
"srg.pscs@gmail.com"
] |
srg.pscs@gmail.com
|
02fec4aed47becb9f92da777026cef2e2df0cec4
|
094639d004fd3c1342b53cd311e295fcc61c82ee
|
/inputBoxes.py
|
66ed12032a546c5668edeeaefb9d2761f737cf4c
|
[] |
no_license
|
IdanErgaz/Pytest
|
cd0d4420094c8f25cd9dc351d98a3ff7c667c0f7
|
a501a2431a1a9973ea7ebaf85a23041a04a1c1e5
|
refs/heads/main
| 2023-02-05T05:55:02.858991
| 2020-12-31T11:53:54
| 2020-12-31T11:53:54
| 319,232,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver=webdriver.Chrome(executable_path="C:\Projects\Automation\Drivers\chromedriver.exe")
driver.get("https://fs2.formsite.com/meherpavan/form2/index.html?1537702596407")
status1=driver.find_element_by_id("RESULT_TextField-1").is_displayed()
print("Field1 status is:" ,status1) #print status
status2=driver.find_element_by_id("RESULT_TextField-1").is_enabled()
print("Field1 enabled status:" ,status1) #print ENABLED status
driver.find_element_by_id("RESULT_TextField-1").send_keys("pavan")
driver.find_element_by_id("RESULT_TextField-2").send_keys("kumar")
driver.find_element_by_id("RESULT_TextField-3").send_keys("123456789")
driver.quit()
|
[
"noreply@github.com"
] |
IdanErgaz.noreply@github.com
|
1b2ba10d76817a94e1225ffb2400157c386d970e
|
b908c5116edc954342561663ee15f235562943a3
|
/eih-raspberrypi-body-detect/draft/auth.py
|
1364d2ef48081f9f616e881f286d8e1f32b8b113
|
[
"CC0-1.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
OAbouHajar/projectEIH
|
578465037f22aed6e13e5311629d7f52582cb501
|
2fcf072a03f8b0b86991abf26cfa9597db5560ff
|
refs/heads/master
| 2023-04-13T18:08:19.788985
| 2021-04-26T10:47:16
| 2021-04-26T10:47:16
| 214,017,815
| 1
| 0
|
MIT
| 2020-07-02T09:17:45
| 2019-10-09T20:31:30
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
from firebase import firebase
firebase = firebase.FirebaseApplication('https://projecteih.firebaseio.com', authentication=None)
result = firebase.get('/users', None, {'print': 'pretty'})
print (result)
|
[
"smsm.sy@hotmail.com"
] |
smsm.sy@hotmail.com
|
dcb914d3f2a8ae52a2f670667bd89a5fd0671f3c
|
e446918cc531f839706b63bf38269dd7b3c37432
|
/scrapy_distributed/settings.py
|
449546f2f8a2354986ec14124e31a1b88f073e28
|
[] |
no_license
|
leosudalv2010/scrapy-redis-distributed
|
5a99547cc7f35a016f4242ddf92e4a11aaa5d59a
|
e790d5144088f325d0d52d85781081b8e0fa2bf2
|
refs/heads/master
| 2020-03-21T02:35:53.784444
| 2018-06-20T08:48:15
| 2018-06-20T08:57:25
| 138,006,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scrapy_distributed project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapy_distributed'
SPIDER_MODULES = ['scrapy_distributed.spiders']
NEWSPIDER_MODULE = 'scrapy_distributed.spiders'
KEYWORDS = ['shirt']
MAXPAGE = 100
DOWNLOADER_MIDDLEWARES = {
'scrapy_distributed.middlewares.SeleniumMiddleware': 300
}
ITEM_PIPELINES = {
'scrapy_distributed.pipelines.MySQLPipeline': 300
}
FEED_EXPORT_ENCODING = 'utf8'
# Scrapy-Redis related settings
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
REDIS_URL = 'redis://:sd89fjmn12s5dsf5x@192.168.2.200:6379'
LOG_FILE = 'log'
LOG_LEVEL = 'INFO'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
|
[
"leosudalv2010@163.com"
] |
leosudalv2010@163.com
|
207bee7e203e906fc119bb7df61d83adcdec1d35
|
d49f28ea7867cf9ce9512c0521b136934e97b7d2
|
/tests/backends/base/test_client.py
|
4573bbe97bfb174d2998b800e8ce5e119a7d4da8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
tamirverthim/django
|
cdbc198a055deeb526caff6b18ae874445f217c5
|
666b7048a0dc6b067c1e3f58653f3c7ca00371a2
|
refs/heads/master
| 2023-04-14T00:51:11.507226
| 2020-12-07T12:19:20
| 2020-12-07T12:19:20
| 319,310,225
| 0
| 0
|
BSD-3-Clause
| 2023-04-03T23:53:00
| 2020-12-07T12:17:41
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
from django.db import connection
from django.db.backends.base.client import BaseDatabaseClient
from django.test import SimpleTestCase
class SimpleDatabaseClientTests(SimpleTestCase):
def setUp(self):
self.client = BaseDatabaseClient(connection=connection)
def test_settings_to_cmd_args_env(self):
msg = (
'subclasses of BaseDatabaseClient must provide a '
'settings_to_cmd_args_env() method or override a runshell().'
)
with self.assertRaisesMessage(NotImplementedError, msg):
self.client.settings_to_cmd_args_env(None, None)
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
a2fc15d839aa393c58038566f984b77acf7cd3c5
|
ef5706049bc847cab93cd43a8ff42a5b987b2fbf
|
/poem.py
|
f3648d6a5ff2a5cfc50a85c593de49dddf2725c2
|
[] |
no_license
|
justakaigood/banana
|
77ea89fc78b8557b444127312fbf95ce9671033f
|
a6994656fb5574f81ebbe977a1531ee96197f89f
|
refs/heads/master
| 2020-08-04T18:44:25.023640
| 2020-06-08T18:12:19
| 2020-06-08T18:12:19
| 212,241,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# coding: utf-8
from numpy.random import randint
for i in range(10):
print(randint(1,10))
words=""
dfj
aasf
afdefa
afefa
afdf
sgs
sgrfg
hjh
hbk
njbjb
|
[
"108701036@nccu.edu.tw"
] |
108701036@nccu.edu.tw
|
570d5e5d5fbd8600a45c78d01b6b02a8b09ce153
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/set_database_user_privilege_request.py
|
150b872cab2546ae4611dfa32d9ac8d91350c989
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,906
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SetDatabaseUserPrivilegeRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'x_language': 'str',
'body': 'SetDatabaseUserPrivilegeReqV3'
}
attribute_map = {
'instance_id': 'instance_id',
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, instance_id=None, x_language=None, body=None):
"""SetDatabaseUserPrivilegeRequest
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param x_language: 语言
:type x_language: str
:param body: Body of the SetDatabaseUserPrivilegeRequest
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._instance_id = None
self._x_language = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:return: The instance_id of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:param instance_id: The instance_id of this SetDatabaseUserPrivilegeRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def x_language(self):
"""Gets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:return: The x_language of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:param x_language: The x_language of this SetDatabaseUserPrivilegeRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this SetDatabaseUserPrivilegeRequest.
:return: The body of this SetDatabaseUserPrivilegeRequest.
:rtype: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SetDatabaseUserPrivilegeRequest.
:param body: The body of this SetDatabaseUserPrivilegeRequest.
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetDatabaseUserPrivilegeRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
7c77aa89634056fa43a39b2384cd8d53186bca2f
|
f264800326fe36503ea115861b4ebe4ebf4f22ef
|
/499proj/matrixMain.py
|
21da14235b4a75873ffdcc24a3d3c029773ec13f
|
[] |
no_license
|
Jett-Ma/499-Final-Project
|
c3b6d7908c98ba48e845b2b4457cb3c59058ff52
|
378fc24e04f127060c7fd04ecd4e98411493ff47
|
refs/heads/main
| 2023-03-19T12:48:10.314639
| 2021-03-24T01:17:47
| 2021-03-24T01:17:47
| 350,908,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,311
|
py
|
# coding:utf-8
import sys
import numpy
import pandas as pd
from sqlalchemy import create_engine
import pymysql
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.tokenize import MWETokenizer
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction import DictVectorizer
import copy
def pre_processing(list):
# 导入用来删除停用词和stemming的包
porter_stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
sr = stopwords.words('english')
sr_append = ["rt", "http", "com", ]
# 用re正则表达式删除网页
results = re.compile(r'[http|https]*://[a-zA-Z0-9.?/&=:]*', re.S)
# 这里就是删除停用词,做lemmatize,做stem,做分词的地方,word_tokenize就是分词
sentences = list.lower()
grammar = "NP: {<DT>?<JJ>*<NN>|<NNP>*}"
cp = nltk.RegexpParser(grammar)
words = word_tokenize(sentences)
sentence = nltk.pos_tag(word_tokenize(sentences))
tree = cp.parse(sentence)
#print
#"\nNoun phrases:"
list_of_noun_phrases = extract_phrases(tree, 'NP')
for phrase in list_of_noun_phrases:
word = "_".join([x[0] for x in phrase.leaves()])
if word not in words:
words.append(word)
#print(words)
test_temp = []
for z in words:
# filter web link
z = re.sub(results, '', z)
# alphabet characters only
z = re.sub('[^A-Za-z0-9_]+', '', z)
z = lemmatizer.lemmatize(z)
# z = porter_stemmer.stem(z)
# filter stopwords
if z in sr:
continue
if z == '':
continue
if z in sr_append:
continue
test_temp.append(z)
# print("After pre-process : ")
# print(test_temp)
return test_temp
def extract_phrases(my_tree, phrase):
my_phrases = []
if my_tree.label() == phrase:
my_phrases.append(my_tree.copy(True))
for child in my_tree:
if type(child) is nltk.Tree:
list_of_phrases = extract_phrases(child, phrase)
if len(list_of_phrases) > 0:
my_phrases.extend(list_of_phrases)
return my_phrases
if __name__ == '__main__':
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
numpy.set_printoptions(threshold=sys.maxsize)
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',30)
con_engine = create_engine('mysql+pymysql://root:@localhost/499db2?charset=utf8')
# 数据库的设置
sql_ = 'select * from zctweets;'
df_data = pd.read_sql_query(sql_, con_engine)
del df_data['id']
del df_data['screen_name']
del df_data['source']
del df_data['in_reply_to_screen_name']
del df_data['in_reply_to_status_id_str']
del df_data['retweet_count']
del df_data['favorite_count']
# 删除无用的列
df_sort = df_data.sort_values('userid_str')
# 按照userid 进行排序,相当于自动分类了
user_list = df_sort['userid_str'].to_list() # 变成列表方便
time_list = df_sort['created_at'].to_list()
text_list = df_sort['text'].to_list()
time_list = [i.date() for i in time_list] # 这里就把所有的时间都变成日期了
# 初始化一些列表
user_result = []
time_result = []
text_result = []
aready = []
# 这里的目的就是按照每个id进行分类,把时间,文本填好
for i in range(len(user_list)) :
if i not in aready:
time_now = time_list[i]
aready.append(i)
user_result.append(user_list[i])
tem_time_list = [time_list[i]]
tem_text_list = [text_list[i]]
for j in range(len(user_list)):
if j not in aready:
time_tem = time_list[j]
if user_list[j] == user_list[i] and time_now == time_tem:
tem_time_list.append(time_list[j])
tem_text_list.append(text_list[j])
aready.append(j)
time_result.append(tem_time_list)
text_result.append(tem_text_list)
text_clean_list = copy.deepcopy(text_result)
for i in range(len(text_clean_list)):
for j in range(len(text_clean_list[i])):
text_clean_list[i][j] = pre_processing(text_clean_list[i][j])
print(text_clean_list[i][j])
df_tem_1 = pd.DataFrame({'user_id':user_result,
'time':time_result,
'text':text_result,
'perticiple':text_clean_list})
# 设置sparse=False获得numpy ndarray形式的结果
v = DictVectorizer(sparse=False)
word_pre = []
all_word = []
# 对text进行处理
# 把同一个user的text放在一起
for i in range(len(text_clean_list)):
for j in range(len(text_clean_list[i])):
for z in text_clean_list[i][j]:
all_word.append(z)
# print(all_word)
# 对每个单词进行去重
all_word = set(all_word)
tem_dict = {}
# 用字典的形式存储好单词,word做key,频率做value
for i in all_word:
tem_dict[i] = 0
#
for i in range(len(text_clean_list)):
# 这里要做深拷贝,防止改变原始数据
tem_dict_i = copy.deepcopy(tem_dict)
for j in range(len(text_clean_list[i])):
for z in text_clean_list[i][j]:
tem_dict_i[z] = text_clean_list[i][j].count(z)
word_pre.append(tem_dict_i)
# print(word_pre)
# print(len(word_pre))
df_tem_1['word_pre'] = word_pre
# 去重
user_id_set = set(df_tem_1['user_id'].to_list())
text_list_2 = []
word_freq = []
# list of key
first_pre = list(df_tem_1['word_pre'][0].keys())
# 处理df_tem_2
# 主要做的是把同一个user的所有时间的都放在一起
for user in user_id_set:
# user_id column in dataframe
tem_df = df_tem_1[df_tem_1['user_id']==user]
tem_text = ''
tem_word_freq = {}
for key in first_pre:
# set each value to 0
tem_word_freq[key] = 0
# get all word
for text in tem_df['text']:
for j in text:
tem_text += j
for i in tem_df['word_pre']:
for j in first_pre:
tem_word_freq[j] += i[j]
text_list_2.append(tem_text)
word_freq.append(tem_word_freq)
df_tem_2 = pd.DataFrame({'user_id':list(user_id_set),
'text':text_list_2,
'word_freq':word_freq
})
# df_tem_2.to_csv('4.csv')
# df_tem_1.to_csv('3.csv')
time_orin_list = df_tem_1['time'].to_list()
for j in range(len(time_orin_list)):
time_orin_list[j] = time_orin_list[j][0]
df_tem_1['time'] = time_orin_list
del df_tem_1['text']
del df_tem_1['perticiple']
df_tem_1.to_csv('./doc/6_l.csv')
del df_tem_2['text']
df_tem_2.to_csv('./doc/7_l.csv')
|
[
"68134569+Jett-Ma@users.noreply.github.com"
] |
68134569+Jett-Ma@users.noreply.github.com
|
f0365d989dd7c876fa5c7fca77f76477b90906d6
|
44baa6621306c6b9810db48b3c1479cb8db294b3
|
/test/test_summaries.py
|
890a49aaf4ebb8b1bd8020b972c18679946c46be
|
[
"Apache-2.0"
] |
permissive
|
codeninja/tensorforce
|
ecc216e2970194d086209fb726fc64b4b9cd8e93
|
212b115d10a21b8241e1d9df56c4851ffd370f34
|
refs/heads/master
| 2020-08-13T08:16:11.046478
| 2019-10-18T17:36:03
| 2019-10-18T17:36:03
| 214,937,969
| 2
| 0
|
Apache-2.0
| 2019-10-18T17:36:04
| 2019-10-14T03:15:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import unittest
from test.unittest_base import UnittestBase
class TestSummaries(UnittestBase, unittest.TestCase):
exclude_bounded_action = True # TODO: shouldn't be necessary!
require_observe = True
directory = 'test-summaries'
def test_summaries(self):
# FEATURES.MD
self.start_tests()
# 'dropout', 'kl-divergence'
reward_estimation = dict(horizon=2, estimate_horizon='late')
baseline_policy = dict(network=dict(type='auto', size=8, internal_rnn=1))
baseline_objective = 'policy_gradient'
baseline_optimizer = 'adam'
self.unittest(
summarizer=dict(directory=self.__class__.directory, labels='all', frequency=2),
reward_estimation=reward_estimation, baseline_policy=baseline_policy,
baseline_objective=baseline_objective, baseline_optimizer=baseline_optimizer
)
for directory in os.listdir(path=self.__class__.directory):
directory = os.path.join(self.__class__.directory, directory)
for filename in os.listdir(path=directory):
os.remove(path=os.path.join(directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=directory)
os.rmdir(path=self.__class__.directory)
self.finished_test()
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
fc420d11845612290556385f4ef93a72a5b9d5d1
|
454365e5c77ff9e3c2fba0d60766e2ee0dac1ac6
|
/noticias_ner/api/teste_clliente.py
|
95d706990bf32127292eb16f18502afc54f7b6b2
|
[] |
no_license
|
SecexSaudeTCU/noticias_ner
|
6aa48f9b076cb20a784244cef58ac270a53471c5
|
2d64041bc18c8c53d463d34e41553b5c2ad4f48e
|
refs/heads/master
| 2023-05-04T00:30:53.364722
| 2021-05-25T19:28:31
| 2021-05-25T19:28:31
| 296,355,526
| 14
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import requests
headers = {
'accept': 'application/json',
'Content-Type': 'text/plain',
}
params = (
('tipos', 'ORGANIZAÇÃO,INSTITUIÇÃO PÚBLICA,LOCAL,PESSOA'),
('buscar-cnpj', 'N'),
)
texto = 'O Tribunal de Contas da União é um órgão público sediado em Brasília, com atribuição de julgamento de contas de' \
' gestores que utilizam recursos públicos. Também aprecia as contas do Presidente da República. A empresa ' \
'SKY LINE teve suas contas julgadas irregulares por má gestão de recurso público.'
r = requests.post('http://localhost:5000/ner/entidades-texto', headers=headers, params=params,
data=texto.encode(encoding='utf-8'))
r.json()
|
[
"moniquelouise@gmail.com"
] |
moniquelouise@gmail.com
|
760d04f4f37ec49446c5810324797d3ef73de59c
|
c947a71a16ed180c920d4b362347f980d93bd2fe
|
/src/Classes/MSDS400/Module 3/workout.py
|
c7f40dafdf59f5c1f52238d5010dc1fa5ddcbc10
|
[
"MIT"
] |
permissive
|
bmoretz/Python-Playground
|
b69cac015e95d97f46ebd678c4493a44befb556f
|
a367ec7659b85c24363c21b5c0ac25db08ffa1f6
|
refs/heads/master
| 2021-05-13T23:35:31.986884
| 2019-11-23T19:07:58
| 2019-11-23T19:07:58
| 116,520,816
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
# As part of a weight reduction program, a man designs a monthly exercise program consisting of bicycling, jogging, and swimming.
# He would like to
# exercise at most 28 hours,
# devote at most 6 hours to swimming,
# and jog for no more than the total number of hours bicycling and swimming.
# The calories burned by this person per hour by bicycling, jogging, and swimming are 200, 427, and 283, respectively.
# How many hours should be allotted to each activity to maximize the number of calories burned? What is the maximum number of calories he will burn?
# (Hint: Write the constraint involving jogging in the form less than or equals 0.)
# Let x 1 be the number of hours spent bicycling,
# let x 2 be the number of hours spent jogging,
# and let x 3 be the number of hours spent swimming.
#
# What is the objective function?
from pulp import *
workout = LpProblem( "Workout Problem", LpMaximize )
x1 = LpVariable( "x1", 0 ) # Bicycling
x2 = LpVariable( "x2", 0 ) # Jogging
x3 = LpVariable( "x3", 0 ) # Swimming
w = LpVariable( "w" )
workout += 200*x1 + 427*x2 + 283*x3
# Constraints
workout += x1 + x2 + x3 <= 28 # no more than total hours
workout += x3 <= 6 # at most hours swimming
workout += x2 <= x1 + x3 # jog no more than Bicycling + Swimming
workout.solve()
workout.LpStatus[ workout.status ]
for variable in workout.variables():
print("{0} = {1}".format( variable.name, variable.varValue ))
print( 'Optimal Sln: {0}'.format(pulp.value( workout.objective )))
|
[
"bmoretz@ionicsolutions.net"
] |
bmoretz@ionicsolutions.net
|
26a21150eb40414f26615bc32ee8f6ff76b8e9bc
|
102f8a77f7e16d5df7775f97e741adf3fa43f8c3
|
/practice-painting/painting.py
|
20add441638a39bfc6b33f077ddf5dc29c69d390
|
[] |
no_license
|
Horgix/google-hashcode-2016
|
f516d489d7b9a9af570e1ebfc6a7f7dea9ec7eb5
|
6b256ce9e9f23276fa1c03ae6979f77befb7e6e5
|
refs/heads/master
| 2021-01-20T11:23:24.646360
| 2016-02-19T11:07:30
| 2016-02-19T11:07:30
| 51,439,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
#! /usr/bin/env python3
from enum import Enum
class Cell(Enum):
painted = '#'
clear = '.'
class Surface:
def __init__(self):
self.rows = 0
self.columns = 0
self.matrix = {}
def import_from_file(self, filename):
with open('simple.in', 'r') as f:
self.rows, self.columns = tuple(f.readline().split())
self.rows = int(self.rows)
self.columns = int(self.columns)
for lineNb, line in enumerate(f.readlines()):
if lineNb >= self.rows:
raise Exception("Line number out of bounds")
line = line.rstrip('\n')
self.matrix[lineNb] = {}
for columnNb, cell in enumerate(line):
if columnNb >= self.columns:
raise Exception("Column number out of bounds")
self.matrix[lineNb][columnNb] = Cell(cell)
def __str__(self):
out = ""
for i in range(self.rows):
for j in range(self.columns):
out += self.matrix[i][j].value
out += '\n'
return out
s = Surface()
s.import_from_file('simple.in')
print(s)
|
[
"alexis.horgix.chotard@gmail.com"
] |
alexis.horgix.chotard@gmail.com
|
b01a5b2a81825618861d0c4319567fa150a4eb6b
|
c7b958c683f916b924e3f4e74e41561e037ef34c
|
/sneeu/apps/tumble/urls.py
|
51565e3a303ad7295f4fdc58dab634b8b11bf0bf
|
[] |
no_license
|
sneeu/sneeu_com
|
c32f6d044a598830d53e6334611f2b5a2c8b4c2f
|
653388c50f00966369fd5a1a43bd4ff910300633
|
refs/heads/master
| 2021-01-25T04:08:53.556659
| 2009-04-26T19:45:06
| 2009-04-26T19:45:06
| 56,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from django.conf.urls.defaults import *
from models import Log
import views
info_dict = {
'queryset': Log.objects.all().select_related(),
'paginate_by': 20,
}
urlpatterns = patterns('',
url(r'^$',
'django.views.generic.list_detail.object_list', info_dict, name='log_list'),
url(r'^tumble/update/$',
views.update, name='update'),
# url(r'^tumble/(?P<year>\d{4})/(?P<month>1[012]?|[2-9])/(?P<slug>[^/]+)/$',
# views.post_detail, name='post_detail'),
# url(r'^tumble/(?P<year>\d{4})/(?P<month>1[012]?|[2-9])/(?P<slug>[^/]+)/add-comment/$',
# views.add_comment, name='add_comment'),
# url(r'^tumble/(?P<url>[a-z]+)/$', 'django.contrib.syndication.views.feed',
# {'feed_dict': feeds.FEEDS}),
)
|
[
"john@sneeu.com"
] |
john@sneeu.com
|
172d2609dd65bb545dff186364df94b7fd883faf
|
4dca9fd1f26e7cb58cf6133c13acf585ca9fda66
|
/LanaBackend/asgi.py
|
4d8a67ca46cd9120225667ed6fa2e1de891dd7fe
|
[] |
no_license
|
Codes-Cleans-Transports/LanaBackend
|
a2215036bbefded7509b6655a855b63d82f21c4e
|
cdb0c6be53ca726ea483b64d02fe65002ec1e7df
|
refs/heads/main
| 2023-03-22T18:59:46.567104
| 2021-02-28T13:17:56
| 2021-02-28T13:17:56
| 342,724,184
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for LanaBackend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LanaBackend.settings')
application = get_asgi_application()
|
[
"rrhubenov@gmail.com"
] |
rrhubenov@gmail.com
|
d7c32cef7fa6a9d3bb7a7a05a8ea899c77750ba8
|
90207cc0222440c069b261795bba1e902834f545
|
/MAGIC-UNICORNS/project/main/apps/course/models.py
|
bf35453ec0a8d046ddcb4ece77d2ef64c1138387
|
[] |
no_license
|
Stormlight-Coding/random-projects
|
13d1cc3d32cb86399296b923ab450034891979c0
|
605e196337dfa9bd5480b428a5a92ce193081871
|
refs/heads/MASTER
| 2021-06-14T09:44:03.150283
| 2020-04-09T22:25:35
| 2020-04-09T22:25:35
| 254,489,696
| 0
| 0
| null | 2021-06-10T22:45:08
| 2020-04-09T22:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
from __future__ import unicode_literals
from django.db import models
class Course(models.Model):
name = models.CharField(max_length=255)
desc = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
|
[
"jonposo.music@gmail.com"
] |
jonposo.music@gmail.com
|
8cfd3c66b9a03394e87c6cbbac0e72ae02d96b6b
|
77ae7c76d36009daa01b2317439c1f975f7932b2
|
/exercicios/ex115/arquivo.py
|
dbcbd133583ca6ae2edba87857cfb65ef4e83003
|
[] |
no_license
|
MatheusOldAccount/Exerc-cios-de-Python-do-Curso-em-Video
|
5f26b5a2867fa1a2e36b486a809dfbe8b107b8c2
|
5696c49d3caf5cae817217a2da0598d1cf794f5b
|
refs/heads/master
| 2022-03-22T10:49:33.666660
| 2019-11-25T21:24:43
| 2019-11-25T21:24:43
| 224,052,682
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
def verPessoas():
print('-' * 30)
arq = open('lista.txt', 'r')
print(arq.read())
arq.close()
def adicionarPessoas():
print('-' * 30)
arq = open('lista.txt', 'a')
nome = str(input('Nome: ')).strip().capitalize()
válido = False
while True:
try:
idade = int(input('Idade: '))
except:
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
else:
print(f'Novo registro de {nome} adicionado')
arq.write(f'\n{nome:<30}{idade} anos')
válido = True
if válido:
break
arq.close()
|
[
"matheustavares1165@gmail.com"
] |
matheustavares1165@gmail.com
|
3985abdfdeb1870b995aaeaa8ca347149181b77c
|
0cf39bf6a4a5aee36c8229a2f527a77ea3cd3a3d
|
/notebooks/analysis.py
|
ef59efd2cb500d1ea589f25a85b4ceb931407462
|
[
"MIT"
] |
permissive
|
vipinsharma0586/Bank-deposit-predictive-model
|
78812a8b37ceeeec2dc8b4ca0b976ecf60363a54
|
1916267e71b58ca2d5082ca36da6dc3765c09931
|
refs/heads/master
| 2022-12-10T23:18:56.248394
| 2020-08-29T09:53:13
| 2020-08-29T09:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,537
|
py
|
# -*- coding: utf-8 -*-
"""Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1F77mSkc3dvmjKeeoW7o6CfEHZvJhH9uq
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import cnames
from pyod.models.knn import KNN
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import xgboost as xgb
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
# %matplotlib inline
path = "../data/raw/bank-additional-full.csv"
df = pd.read_csv(path, sep= ';')
df.drop("duration", axis=1)
df.rename(columns={'y':'deposit'}, inplace=True)
df.dtypes
# y column
# Binary Encoding
df['deposit'] = np.where(df.deposit == 'yes', 1, 0)
"""###### CLEANING OUTLIERS USING PYOD"""
import random
from matplotlib.colors import cnames
corr = df.corr()['deposit'].abs().sort_values(ascending=False)
h_corr_cols = corr[corr < 1].index.tolist()
colors = list(cnames.keys())
sns.set_style('darkgrid')
fig , ax = plt.subplots(4,3,figsize = (16,12))
ax = ax.ravel()
for i,col in enumerate(h_corr_cols):
sns.boxplot(df[col], ax = ax[i],color = random.choice(colors))
x = df[h_corr_cols].values
model = KNN(contamination=.1)
model.fit(x)
predicted = model.predict(x)
outliers = df.loc[(predicted == 1),:]
inliers = df.loc[(predicted == 0),:]
df = df.drop(index = df.loc[(predicted == 1),:].index )
"""###### Treating imbalance data"""
df.education.value_counts().to_frame()
df['education'].replace({'basic.9y': 'basic','basic.4y': 'basic','basic.6y':'basic'},inplace=True)
df['education'].value_counts().to_frame()
df.job.value_counts().to_frame()
df['job'].replace({'entrepreneur': 'self-employed', 'technician': 'blue-collar',
'admin.': 'white-collar', 'management': 'white-collar',
'services': 'pink-collar', 'housemaid': 'pink-collar'}, inplace=True)
df.job.value_counts().to_frame()
df.shape
# categorical columns
# OneHotEncoding
cat_cols = df.select_dtypes(include=[
'object']).columns
df = pd.get_dummies(df, columns=cat_cols)
#standard Scaler for Numerical Variables
scaler = StandardScaler()
num_cols = df.select_dtypes(include=['float64', 'int64']).columns
num_cols = num_cols.drop('deposit')
df[num_cols] = scaler.fit_transform(df[num_cols])
df.head(2)
df.shape
X = df.drop(columns=['duration', 'deposit'])
y = df['deposit']
print(X.shape)
print(y.shape)
y.value_counts().to_frame()
sampler = RandomOverSampler(random_state=42)
X_sampled, y_sampled = sampler.fit_resample(X, y)
pd.Series(y_sampled).value_counts().to_frame()
"""###### Dimensionality Reduction: Principal Component Analysis"""
from sklearn.decomposition import PCA
pca = PCA(n_components = 10)
pca.fit(X_sampled)
X = pca.transform(X_sampled)
print(X_sampled.shape)
print(y_sampled.shape)
print(X.shape)
df_y = pd.DataFrame(data = y_sampled, columns = ['deposit'])
df_X = pd.DataFrame(data = X, columns = ['PC_1', 'PC_2','PC_3', 'PC_4','PC_5','PC_6', 'PC_7','PC_8', 'PC_9','PC_10'])
df_X
df_y.to_csv('../data/processed/results.csv', index=False)
df_X.to_csv('../data/processed/features.csv', index=False)
|
[
"lotomej12@gmail.com"
] |
lotomej12@gmail.com
|
4fa582c5ada4c2d880f47491a4e012018bc74dbb
|
8c38028da7a6c9443c3b9163a1db64773e39e755
|
/users.py
|
2680f52e307d0118cb8595d737a3149390923672
|
[] |
no_license
|
Vanhatai/PWS-B4.12
|
bf8e8f230c7f547256e7f8be4628dd3342769ae3
|
a1061d7989e7c18bd9d6b84238a99a506cb91345
|
refs/heads/master
| 2020-07-06T18:54:06.837311
| 2019-08-19T06:16:24
| 2019-08-19T06:16:24
| 203,109,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,987
|
py
|
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# константа, указываюзая на способ соединения с базой
DB_PATH = "sqlite:///sochi_athletes.sqlite3"
# базовый класс моделей таблиц
Base = declarative_base()
class User(Base):
"""
Описывает структуру таблицы user для хранения регистрационных данных пользователей
"""
# задаем название таблицы
__tablename__ = "user"
# идентификатор пользователя, первичный ключ
id = sa.Column(sa.Integer, primary_key=True)
first_name = sa.Column(sa.Text)
last_name = sa.Column(sa.Text)
email = sa.Column(sa.Text)
gender = sa.Column(sa.Text)
birthdate = sa.Column(sa.Text)
height = sa.Column(sa.Float)
def connect_db():
"""
Устанавливает соединение с базой данных, создает таблицы, если их еще нет и возвращает обьект сессии
"""
# создаем соединение с базой данных
engine = sa.create_engine(DB_PATH)
# создаем описанные таблицы
Base.metadata.create_all(engine)
# создаем фабрику сессий
session = sessionmaker(engine)
# возвращаем сессию
return session()
def request_data():
"""
Запрашивает у пользователя данные и добавляет их в список users
"""
# запрашиваем данные
first_name = input("Введите имя: ")
last_name = input("Введите фамилию: ")
email = input("Адрес электронной почты: ")
gender = input("Пол (Male/Female): ")
birthdate = input("Дата рождения (YYYY-MM-DD): ")
height = float(input("Рост (м): "))
# создаем нового пользователя
user = User(
first_name=first_name,
last_name=last_name,
email=email,
gender=gender,
birthdate=birthdate,
height=height,
)
# возвращаем созданного пользователя
return user
def main():
"""
Осуществляет взаимодействие с пользователем и обрабатывает пользовательский ввод
"""
session = connect_db()
# запрашиваем данные пользователя
user = request_data()
# добавляем нового пользователя в сессию
session.add(user)
session.commit()
print("Спасибо, данные сохранены!")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Vanhatai.noreply@github.com
|
fb5e14362c54bc9ed160c239f7c153c7f418275d
|
8d5fac378cb1f7c826996e442375c7ee8cb842d5
|
/ExpressSuiteTools/ExpressSuiteCore.py
|
260f425a82a9434266342c857f1a9fc2b60b8c4d
|
[] |
no_license
|
ichar/Express-Suite-DMS
|
6f4cf7064b774894995b2224a3ca1a13ac4aa64a
|
bdf3ad7c1ec4bcdec08000bf4ac5315ca6a0ad19
|
refs/heads/master
| 2021-01-11T10:59:15.101637
| 2018-02-16T02:09:12
| 2018-02-16T02:09:12
| 72,807,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,181
|
py
|
"""
ExpressSuiteCore and PortalGenerator classes
$Id: ExpressSuiteCore.py, v 1.0 2007/08/30 12:00:00 Exp $
*** Checked 09/06/2009 ***
"""
__version__ = '$Revision: 1.0 $'[11:-2]
import Zope2
import sys, os
from copy import copy
from locale import setlocale, getlocale, LC_ALL
from string import join
from urllib import splittype, splitport
from urlparse import urlparse
from types import StringType, UnicodeType
from Globals import HTMLFile, DTMLFile, package_home, get_request
from AccessControl import ClassSecurityInfo
from Acquisition import aq_get
from ZPublisher import Publish
from ZPublisher.HTTPRequest import default_port
from ZPublisher.BeforeTraverse import NameCaller, registerBeforeTraverse, queryBeforeTraverse
from Products.CMFCore import permissions as CMFCorePermissions
from Products.CMFCore.FSDTMLMethod import FSDTMLMethod
from Products.CMFCore.FSImage import FSImage
from Products.CMFCore.PortalObject import PortalObjectBase
from Products.CMFCore.DirectoryView import addDirectoryViews, createDirectoryView
from Products.CMFCore.utils import getToolByName, _checkPermission, _getAuthenticatedUser
from Products.CMFDefault import DiscussionItem, SkinnedFolder
from Products.CMFDefault import cmfdefault_globals
from Products.CMFDefault.DublinCore import DefaultDublinCoreImpl
try: from Products.AppTracker.AppTracker import AppTracker
except ImportError: AppTracker = None
from logging import getLogger
logger = getLogger( 'ExpressSuiteCore' )
import Config
if Config.IsSQLCatalog:
import ZSQLCatalogTool as CatalogTool
from Products.ZMySQLDA.DA import Connection as SQLConnection
else:
import CatalogTool
import ActionsTool
import BackupFSRoot
import CommentsTool
import DTMLDocument, DefaultCategories, DepartmentDictionary
import ErrorLogTool, Exceptions, FSFile, FSFolder, Features, GuardedTable
import HTMLDocument, HTMLCard
import Mail, MailFolder, MemberDataTool, MetadataTool
import PropertiesTool, Registry, SearchProfile, ServicesTool, Shortcut
import TaskItem, TypesTool
import UserFolder
# these may need to be upgraded
#from MigrationTool import MigrationTool
from Config import Roles
from Heading import Heading, factory_type_information as Heading_factory_type_information
from ManageCMFContent import ManageCMFContent
from SimpleObjects import ContainerBase
from Utils import InitializeClass, getLanguageInfo, makepath, joinpath, pathdelim, formatComments, \
GetSessionValue, SetSessionValue, ExpireSessionValue
import CustomDefinitions
from CustomObjects import CustomDefs, ObjectHasCustomCategory, ObjectShouldBeCleanedBeforePaste, \
CustomCheckPermission, CustomCookedTableTranslit, getJSCleanerAttrs
factory_type_information = ( \
DTMLDocument.factory_type_information
+ FSFile.factory_type_information
+ FSFolder.factory_type_information
+ GuardedTable.factory_type_information
+ Heading_factory_type_information
+ HTMLDocument.factory_type_information
+ HTMLCard.factory_type_information
+ MailFolder.factory_type_information
+ Registry.factory_type_information
+ SearchProfile.factory_type_information
+ Shortcut.factory_type_information
+ TaskItem.factory_type_information
)
DiscussionItem_fti = copy( DiscussionItem.factory_type_information )
DiscussionItem_fti[0]['disallow_manual'] = 1
SkinnedFolder_fti = copy( SkinnedFolder.factory_type_information )
SkinnedFolder_fti[0]['disallow_manual'] = 1
cmf_factory_type_information = DiscussionItem_fti + SkinnedFolder_fti
class ExpressSuiteCore( ContainerBase, PortalObjectBase, DefaultDublinCoreImpl ):
"""
Functions of this class help in the setup of a new ExpressSuiteCore
"""
_class_version = 1.01
meta_type = 'ExpressSuiteCore'
__implements__ = ( Features.isPortalRoot,
Features.isPrincipiaFolderish,
PortalObjectBase.__implements__,
DefaultDublinCoreImpl.__implements__,
)
isPrincipiaFolderish = 1
security = ClassSecurityInfo()
manage_options = PortalObjectBase.manage_options + \
ContainerBase.manage_options
_properties = (
{'id':'title', 'type':'string', 'mode':'w'},
{'id':'description', 'type':'text', 'mode':'w'},
{'id':'server_url', 'type':'string', 'mode':'w'},
{'id':'stemmer', 'type':'string', 'mode':'w'},
{'id':'product_version', 'type':'string', 'mode':'w'},
)
# overriden by Implicit in ItemBase
__of__ = PortalObjectBase.__of__
# overriden by ObjectManager in ContainerBase
_checkId = PortalObjectBase._checkId
_verifyObjectPaste = PortalObjectBase._verifyObjectPaste
# default attribute values
title = ''
description = ''
server_url = None
product_version = None
service_unavailable = DTMLFile( 'dtml/service_unavailable', globals() )
def __init__( self, id, title='' ):
"""
Initializes class instance
"""
ContainerBase.__init__( self )
PortalObjectBase.__init__( self, id, title )
DefaultDublinCoreImpl.__init__( self )
def _initstate( self, mode ):
"""
Initializes instance attributes
"""
if not ContainerBase._initstate( self, mode ):
return 0
# install our before_traverse hook
if not queryBeforeTraverse( self, __name__ ):
registerBeforeTraverse( self, NameCaller('_beforeTraverseHook'), __name__ )
if not mode:
return 1
if getattr( self, 'server_url', None ) is None:
REQUEST = get_request()
self._setPropValue( 'server_url', REQUEST and REQUEST.physicalPathToURL('') or '' )
self._upgrade( 'portal_actions', ActionsTool.ActionsTool )
self._upgrade( 'portal_catalog', CatalogTool.CatalogTool )
self._upgrade( 'portal_memberdata', MemberDataTool.MemberDataTool )
self._upgrade( 'portal_metadata', MetadataTool.MetadataTool )
self._upgrade( 'portal_properties', PropertiesTool.PropertiesTool )
self._upgrade( 'portal_types', TypesTool.TypesTool )
for view in self.portal_skins.objectValues():
if getattr( view, '_isDirectoryView', None ):
view._dirpath = view._dirpath.replace( '\\', pathdelim )
if not hasattr( self, 'portal_errorlog' ):
tool = ErrorLogTool.ErrorLogTool()
self._setObject( tool.getId(), tool )
if not hasattr( self, 'portal_comments' ):
tool = CommentsTool.CommentsTool()
self._setObject( tool.getId(), tool )
if not hasattr( self, 'portal_services' ):
tool = ServicesTool.ServicesTool()
self._setObject( tool.getId(), tool )
gen = PortalGenerator()
gen.setupMail( self )
return 1
def _afterValidateHook( self, user, published=None, REQUEST=None ):
"""
Prepares global enviroment after the user is authenticated
"""
self.setContentCharset( REQUEST )
self.fixFormLanguage( REQUEST )
if isinstance( published, FSImage ):
REQUEST.RESPONSE.setHeader( 'Cache-Control', 'public, max-age=7200, must-revalidate' )
elif isinstance( published, FSDTMLMethod ):
REQUEST.RESPONSE.setHeader('Expires', 'Tue, 22 Jan 1980 01:01:01 GMT')
def _beforeTraverseHook( self, container, REQUEST, *args ):
"""
Prepares global enviroment before any object inside is accessed
"""
try:
self.fixProxiedRequest( REQUEST )
self.setPortalLocale()
self.setContentCharset( REQUEST )
except:
pass
try: mpath = list( Config.MaintainanceMode.get( self._p_oid ) or [] )
except: mpath = None
if not mpath:
return
stack = REQUEST['TraversalRequestNameStack']
mpath.reverse()
if stack and ( stack[-1] in ['portal_errorlog', 'scripts.js', 'styles.css'] or \
stack[0] == 'manage' or stack[0].startswith('manage_') ):
return
if stack[ -len(mpath): ] != mpath:
REQUEST['TraversalRequestNameStack'] = ['maintainance']
def _containment_onAdd( self, item, container ):
"""
Is called after our parent *item* is added to the *container*
"""
# Not calling base class's methods from here avoids reinitialization
# of all the content objects after product version change.
# Setup is carried by generator anyway.
# need to realize same as Scheduler schema to provide non-conflict database backup
# if more than one ExpressSuiteCore in ZODB is presented.
loop_app = self.getPhysicalRoot()
if not hasattr( loop_app, 'ExpressSuiteBackup' ):
try:
b = BackupFSRoot.BackupFSRoot()
loop_app._setObject( b.id, b )
except:
pass
def _containment_onDelete( self, item, container ):
"""
Is called before our parent *item* is deleted from its *container*
"""
root = self.getPhysicalRoot()
backupFSRoot = getattr(root, 'ExpressSuiteBackup', None)
if backupFSRoot is not None:
backupFSRoot.unregistryAppBackup( joinpath( item.getPhysicalPath() ) )
PortalObjectBase.manage_beforeDelete( self, item, container )
def _instance_onCreate( self ):
self.product_version = Config.ProductVersion
security.declareProtected( CMFCorePermissions.View, 'maintainance' )
def maintainance( self, REQUEST=None ):
"""
Maintainance mode
"""
if _checkPermission( CMFCorePermissions.ManagePortal, self ):
mpath = Config.MaintainanceMode.get( self._p_oid )
return self.redirect( action='/'.join(mpath) )
return self.service_unavailable( self, REQUEST )
#
# ==========================================================================================================
#
def view( self, REQUEST=None ):
""" Invokes the default view of the content storage """
REQUEST = REQUEST or self.REQUEST
return self.storage(REQUEST)
security.declarePrivate( 'fixProxiedRequest' )
def fixProxiedRequest( self, REQUEST ):
""" Fixes environment if request was processed by frontend server """
# mod_proxy: X-Forwarded-Server
# mod_accel: X-Host, X-Real-IP, X-URI, X-Method
server = REQUEST.get('SERVER_URL')
real_host = REQUEST.get('HTTP_X_FORWARDED_SERVER') or REQUEST.get('HTTP_X_HOST')
real_addr = REQUEST.get('HTTP_X_REAL_IP')
real_uri = REQUEST.get('HTTP_X_URI')
# change SERVER_URL to frontend server's address and protocol
if server and real_host:
proto = REQUEST.get('HTTP_X_METHOD') or splittype( server )[0]
host, port = splitport( real_host )
REQUEST.setServerURL( proto, host, port or default_port.get( proto ) )
# set REMOTE_ADDR to the real client's address
if real_addr:
REQUEST.environ['REMOTE_ADDR'] = real_addr
# modify SCRIPT_NAME for proxied requests like
# http://frontend/prefix/portal -> http://backend/portal
if real_uri:
# TODO: handle different portal name on frontend
pos = real_uri.find( REQUEST['PATH_INFO'] )
if pos > 0:
REQUEST._script = real_uri[ 1:pos ].split('/')
security.declarePrivate( 'setPortalLocale' )
def setPortalLocale( self ):
""" Changes system locale according to the portal language """
info = getLanguageInfo( self )
# find default and effective locale settings
def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )
cur_locale = getlocale()
cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''
# check whether locale is already ok
if def_locale is None or cur_locale.lower() == def_locale.lower():
return
# change effective locale
try:
setlocale( LC_ALL, def_locale )
except Exceptions.LocaleError:
pass
security.declarePublic( 'setContentCharset' )
def setContentCharset( self, REQUEST=None ):
""" Sets response charset according to the user's selected language """
REQUEST = REQUEST or aq_get( self, 'REQUEST', None )
if REQUEST is None:
return
lang = REQUEST.cookies.get( 'LOCALIZER_LANGUAGE' )
info = getLanguageInfo( lang, None )
if lang is None or info is None:
membership = getToolByName( self, 'portal_membership', None )
if membership is not None:
lang = membership.getLanguage( preferred=1, REQUEST=REQUEST )
info = getLanguageInfo( lang )
REQUEST.set( 'LOCALIZER_LANGUAGE', lang )
if not membership.isAnonymousUser():
path = joinpath( '', REQUEST._script, self.absolute_url( relative=1 ) )
REQUEST.RESPONSE.setCookie( 'LOCALIZER_LANGUAGE', lang, path=path )
charset = info['http_charset']
REQUEST.set( 'LOCALIZER_CHARSET', charset )
REQUEST.set( 'management_page_charset', charset )
REQUEST.RESPONSE.setHeader( 'content-type', 'text/html; charset=%s' % charset )
security.declarePublic( 'fixFormLanguage' )
def fixFormLanguage( self, REQUEST ):
"""
Replaces HTML-encoded entities with their corresponding
characters in the POST form data
"""
if REQUEST is None:
return
lang = REQUEST.get( 'LOCALIZER_LANGUAGE' )
map = Config.LanguageEntitiesMap.get( lang )
if map is None:
return
for key, value in REQUEST.form.items():
if type(value) in ( StringType, UnicodeType, ):
for entity, char in map.items():
value = value.replace( entity, char )
REQUEST.form[ key ] = value
if REQUEST.REQUEST_METHOD == 'PUT':
value = REQUEST.other.get('BODY')
if value is not None:
for entity, char in map.items():
value = value.replace( entity, char )
REQUEST.other['BODY'] = value
security.declareProtected( CMFCorePermissions.View, 'isEffective' )
def isEffective( self, date ):
""" Override DefaultDublinCoreImpl's test, since we are always viewable """
return 1
def reindexObject( self, idxs=[] ):
""" Overrides DefaultDublinCoreImpl's method """
pass
def productVersion( self ):
""" Returns version string of the product """
return Config.ProductVersion
#
# Portal global utilities ==================================================================================
#
security.declarePublic( 'getPortalObject' )
def getPortalObject( self ):
""" Returns the portal object itself """
return self
security.declarePublic( 'getPortalConfiguration' )
def getPortalConfiguration( self ):
""" Returns the PortalConfiguration object """
return CustomDefinitions.portalConfiguration
security.declarePublic( 'getDepartmentDictionary' )
def getDepartmentDictionary( self ):
""" Returns the DepartmentDictionary object """
return DepartmentDictionary.departmentDictionary
security.declarePublic( 'getCustomDefinitions' )
def getCustomDefinitions( self, defs, *args, **kw ):
""" Returns given custom definition value """
return CustomDefs( defs, *args, **kw )
security.declarePublic( 'hasCustomCategory' )
def hasCustomCategory( self, context ):
""" Returns given custom definition value """
return ObjectHasCustomCategory( context )
def shouldBeCleanedBeforePaste( self, context ):
""" Verifies whether content body should be cleaned before paste """
return ObjectShouldBeCleanedBeforePaste( context )
security.declarePublic( 'getJSCleanerForCategory' )
def getJSCleanerAttrsForCategory( self, context, category, **kw ):
""" Returns js cleaner attrs """
return getJSCleanerAttrs( context, category, **kw )
security.declarePublic( 'getCustomCookedTableTranslit' )
def getCustomCookedTableTranslit( self, context, id, values ):
""" Returns translitted custom data table values """
return CustomCookedTableTranslit( context, id, values )
security.declarePublic( 'getFormattedComments' )
def getFormattedComments( self, text, mode=None ):
""" Returns formatted comments text """
return formatComments( text, mode )
security.declarePublic( 'hasCustomPermissions' )
def hasCustomPermissions( self, context, permission ):
""" Returns given custom definition value """
return CustomCheckPermission( context, permission )
security.declarePublic( 'getSession' )
def getSession( self, name, default=None, REQUEST=None, cookie=None ):
""" Returns session data value """
return GetSessionValue( self, name, default, REQUEST, cookie )
security.declarePublic( 'setSession' )
def setSession( self, name, value, REQUEST=None, cookie=None ):
""" Stores session data value """
SetSessionValue( self, name, value, REQUEST, cookie )
InitializeClass( ExpressSuiteCore )
class PortalGenerator:
klass = ExpressSuiteCore
def setupTools( self, p ):
"""
Setup initial tools
"""
addCMFCoreTool = p.manage_addProduct['CMFCore'].manage_addTool
addCMFCoreTool( 'CMF Skins Tool', None )
addCMFCoreTool( 'CMF Undo Tool', None )
addCMFCoreTool( 'CMF URL Tool', None )
addCMFDefaultTool = p.manage_addProduct['CMFDefault'].manage_addTool
addCMFDefaultTool( 'Default Discussion Tool', None )
addCMFDefaultTool( 'Default Registration Tool', None )
addExpressSuiteTool = p.manage_addProduct['ExpressSuiteTools'].manage_addTool
addExpressSuiteTool( 'ExpressSuite Actions Tool', None )
addExpressSuiteTool( 'ExpressSuite Catalog Tool', None )
addExpressSuiteTool( 'ExpressSuite Comments Tool', None )
addExpressSuiteTool( 'ExpressSuite DocumentLink Tool', None )
addExpressSuiteTool( 'ExpressSuite ErrorLog Tool', None )
addExpressSuiteTool( 'ExpressSuite Followup Actions Tool', None )
addExpressSuiteTool( 'ExpressSuite Help Tool', None )
addExpressSuiteTool( 'ExpressSuite Member Data Tool', None )
addExpressSuiteTool( 'ExpressSuite Membership Tool', None )
addExpressSuiteTool( 'ExpressSuite Metadata Tool', None )
addExpressSuiteTool( 'ExpressSuite Properties Tool', None )
addExpressSuiteTool( 'ExpressSuite Types Tool', None )
addExpressSuiteTool( 'ExpressSuite Workflow Tool', None )
addExpressSuiteTool( 'ExpressSuite Services Tool', None )
addExpressSuiteTool( 'Portal Scheduler Tool', None )
#addExpressSuiteTool( 'ExpressSuite Migration Tool', None )
def setupMessageCatalog( self, p, language ):
langs = Config.Languages
p.manage_addProduct['Localizer'].manage_addMessageCatalog( 'msg', 'Messages', langs.keys())
msg = p._getOb( 'msg' )
path = joinpath( package_home( globals() ), 'locale' )
msg.manage_changeDefaultLang( language or Config.DefaultLanguage )
for lang, info in langs.items():
charset = info['python_charset'].upper()
msg.update_po_header( lang, '', '', '', charset )
# import PO file into the Message Catalog
try:
file = open( joinpath( path, '%s.po' % lang ), 'rt' )
except IOError:
pass
else:
msg.manage_import( lang, file )
file.close()
# fix empty string (just in case...)
msg.manage_editLS( '', (lang, '') )
# select default language
p.setPortalLocale()
p.setContentCharset()
def setupMail( self, p ):
"""
Create mail objects
"""
mh = getattr( p, 'MailHost', None )
if not ( mh is None or isinstance( mh, Mail.MailServerBase ) ):
p._delObject( 'MailHost' )
mh = None
if mh is None:
Mail.manage_addMailSender( p, 'MailHost', host='' )
if getattr( p, 'MailServer', None ) is None:
Mail.manage_addMailServer( p, 'MailServer', host='' )
def setupUserFolder( self, p ):
p.manage_addProduct['ExpressSuiteTools'].addUserFolder()
def setupCookieAuth( self, p ):
p.manage_addProduct['CMFCore'].manage_addCC( id='cookie_authentication' )
p.cookie_authentication.auto_login_page = ''
def setupRoles( self, p ):
p.__ac_roles__ = ( 'Member', 'Visitor', 'Editor', 'Writer', 'Reader', 'Author', 'VersionOwner' )
def setupPermissions( self, p ):
"""
Setup some suggested roles to permission mappings
"""
mp = p.manage_permission
for entry in Config.PortalPermissions:
apply( mp, entry )
def setupDefaultSkins( self, p ):
"""
Setup portal skins
"""
pstool = getToolByName( p, 'portal_skins', None )
#pstool = getattr( p, 'portal_skins', None )
if pstool is None:
return
cmf_manager = ManageCMFContent()
for view in Config.SkinViews:
cmf_manager.register_view( pstool, 'skins/%s' % view )
# these skin elements are available for anonymous visitors
#for name in Config.PublicViews:
# pstool[ name ].manage_permission( CMFCorePermissions.View, [Roles.Anonymous], 1 )
addDirectoryViews( pstool, 'skins', cmfdefault_globals )
pstool.manage_addProduct['OFSP'].manage_addFolder( id='custom' )
default_skins = ', '.join( ['custom'] + Config.SkinViews )
pstool.addSkinSelection( 'Site', default_skins, make_default=1 )
pstool.addSkinSelection( 'Mail', 'mail_templates' )
p.setupCurrentSkin()
def setupTypes( self, p, initial_types=factory_type_information ):
"""
Setup portal types
"""
tptool = getToolByName( p, 'portal_types', None )
#tptool = getattr( p, 'portal_types', None )
if tptool is None:
return
for x in initial_types:
if not tptool.getTypeInfo( x['id'] ):
tptool.addType( x['id'], x )
def setupCategories( self, p, categories=None, **kw ):
"""
Setup default categories
"""
metadata = getToolByName( p, 'portal_metadata', None )
if metadata is None:
return
if not categories:
categories = ['Document', 'SimpleDocs']
default_categories = DefaultCategories.DefaultCategories()
for id in categories:
if metadata.getCategoryById( id ):
continue
category = DefaultCategories.setupCategory( default_categories, id, metadata )
if category is None:
continue
workflow = category.getWorkflow()
if workflow is None:
continue
DefaultCategories.setupWorkflow( default_categories, workflow, id, metadata )
del default_categories
def setupMimetypes( self, p ):
"""
Setup mime types
"""
p.manage_addProduct[ 'CMFCore' ].manage_addRegistry()
reg = p.content_type_registry
reg.addPredicate( 'dtml', 'extension' )
reg.getPredicate( 'dtml' ).edit( extensions="dtml" )
reg.assignTypeName( 'dtml', 'DTMLDocument' )
reg.addPredicate( 'link', 'extension' )
reg.getPredicate( 'link' ).edit( extensions="url, link" )
reg.assignTypeName( 'link', 'Link' )
reg.addPredicate( 'news', 'extension' )
reg.getPredicate( 'news' ).edit( extensions="news" )
reg.assignTypeName( 'news', 'News Item' )
reg.addPredicate( 'document', 'major_minor' )
reg.getPredicate( 'document' ).edit( major="text", minor="" )
reg.assignTypeName( 'document', 'HTMLDocument' )
reg.addPredicate( 'image', 'major_minor' )
reg.getPredicate( 'image' ).edit( major="image", minor="" )
reg.assignTypeName( 'image', 'Site Image' )
reg.addPredicate( 'file', 'major_minor' )
reg.getPredicate( 'file' ).edit( major="application", minor="" )
reg.assignTypeName( 'file', 'File' )
def setupWorkflow( self, p, check=0 ):
"""
Setup default workflow
"""
workflow = getToolByName( p, 'portal_workflow', None )
tptool = getToolByName( p, 'portal_types', None )
if workflow is None or tptool is None:
return
cbt = workflow._chains_by_type
count = 0
seen = []
for chain, types in Config.WorkflowChains.items():
seen.extend( types )
for pt in types:
if not cbt or cbt.get( pt ) != chain:
count += 1
if not check:
wf_id = 'heading_workflow'
workflow.createWorkflow( wf_id )
workflow.setChainForPortalTypes( Config.WorkflowChains['heading_workflow'], ( wf_id, ) )
workflow.setChainForPortalTypes( Config.WorkflowChains['__empty__'], ('', ) )
DefaultCategories.setupHeadingWorkflow( workflow.getWorkflowById( wf_id ) )
return count
def setupDefaultMembers( self, p, lang='ru' ):
"""
Adds default members and groups
"""
membership = getToolByName( p, 'portal_membership', None )
msg = getToolByName( p, 'msg', None )
if None in ( membership, msg ):
return None
membership._addGroup( 'all_users', msg.gettext( 'All users', lang=lang ) )
membership._addGroup( '_managers_', msg.gettext( 'Managers', lang=lang ) )
username = None
try: username = _getAuthenticatedUser().getUserName()
except: pass
if not username:
username = 'admin'
roles = ( 'Member', 'Manager', )
properties = { 'lname' : msg.gettext( 'admin', lang=lang ) }
membership.addMember( id=username, password='123', roles=roles, domains='', properties=properties )
member = membership.getMemberById( username )
if member is None:
return None
users = [ username ]
membership.manage_changeGroup( group='all_users', group_users=users )
membership.manage_changeGroup( group='_managers_', group_users=users )
return member
def setupStorage( self, p, create_userfolder=None ):
"""
Setup storage folders
"""
if p is None:
return
base = p.manage_addProduct['ExpressSuiteTools']
if base is None:
return
msg = getToolByName( p, 'msg', None )
if msg is None:
return
lang = msg.get_default_language()
member = create_userfolder and self.setupDefaultMembers( p, lang ) or None
storage = self._makeHeading( p.manage_addProduct['ExpressSuiteTools'], 'storage', \
msg.gettext( 'Content storage', lang=lang ) )
if storage:
self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'members', \
msg.gettext( 'Home folders', lang=lang ) )
self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'user_defaults', \
msg.gettext( 'Default content', lang=lang ) )
system = self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'system', \
msg.gettext( 'System folders', lang=lang ) )
else:
system = None
if system:
self._makeHeading( p.storage.system.manage_addProduct['ExpressSuiteTools'], 'templates', \
msg.gettext( 'Document templates', lang=lang ) )
if storage:
mp = p.storage.manage_permission
mp('List folder contents', ['Owner','Manager', 'Editor', 'Writer', 'Reader', 'Author'], 0)
mp('View', ['Owner','Manager', 'Member'], 1)
if create_userfolder and member is not None:
home = member.getHomeFolder( create=1 )
# add access rights for system folder
if system:
p.storage.system.manage_setLocalGroupRoles( 'all_users', ['Reader'] )
if storage:
if member is not None:
p.storage.changeOwnership( member, recursive=1 )
p.storage.reindexObject( recursive=1 ) #idxs=['allowedRolesAndUsers'],
def setupTracker( self, p ):
"""
Setup tracker
"""
pass
def setupActions( self, p ):
"""
Setup portal actions
"""
actions = getToolByName( p, 'portal_actions', None )
if actions is None:
return
actions.action_providers = ( \
'portal_comments'
, 'portal_discussion'
, 'portal_help'
, 'portal_membership'
, 'portal_metadata'
, 'portal_properties'
, 'portal_registration'
, 'portal_services'
, 'portal_scheduler'
, 'portal_undo'
, 'portal_workflow'
)
def setupCatalog( self, p ):
"""
Setup portal catalogs
"""
tool_ids = ( 'portal_catalog', 'portal_followup', 'portal_links', )
for id in tool_ids:
ob = getToolByName( p, id, None )
if ob is None:
return
if Config.IsSQLCatalog and ob.implements('IZSQLCatalog'):
ob.sql_db_name = p.getId()
ob.sql_prefix = ''.join([ x[0:1] for x in id.split('_') ] )
ob.sql_root = '_Root'
ob.sql_user = Config.SQLDBUser
ob.setup()
ob.setupIndexes()
def setup( self, p, language, create_userfolder ):
"""
Setup portal object
"""
logger.info('Setup new ExpressSuite instance, id: %s, IsSQLCatalog: %s' % ( p.getId(), Config.IsSQLCatalog ) )
if Config.IsSQLCatalog:
id = Config.SQLDBConnectorID
addZMySQLConnection( p, id, 'Z MySQL Database Connection', 1 )
self.setupTools( p )
self.setupCatalog( p )
self.setupMessageCatalog( p, language )
self.setupMail( p )
if int(create_userfolder) != 0: self.setupUserFolder( p )
self.setupCookieAuth( p )
self.setupRoles( p )
self.setupPermissions( p )
self.setupDefaultSkins( p )
# SkinnedFolders are only for customization;
# they aren't a default type.
default_types = tuple( filter( lambda x: x['id'] != 'Skinned Folder', factory_type_information ) )
self.setupTypes( p, default_types )
self.setupTypes( p, cmf_factory_type_information )
self.setupCategories( p )
self.setupMimetypes( p )
self.setupWorkflow( p )
self.setupActions( p )
self.setupManual( p, 'manual' )
logger.info('Successfully created new instance')
def setupManual( self, target, path, ctype=None ):
"""
Setup manual
"""
createDirectoryView( target, makepath( path ) )
def create( self, parent, id, language, create_userfolder ):
"""
Creates an instance
"""
id = str(id)
portal = self.klass( id=id )
parent._setObject( id, portal )
# Return the fully wrapped object
p = parent.this()._getOb( id )
self.setup( p, language, create_userfolder )
return p
def setupDefaultProperties( self, p, id, title, description, email_from_address, email_from_name,
validate_email, server_url, stemmer ):
"""
Setup default portal properties
"""
p._setProperty( 'email_from_address', email_from_address, 'string' )
p._setProperty( 'email_from_name', email_from_name, 'string' )
p._setProperty( 'validate_email', validate_email and 1 or 0, 'boolean' )
p._setProperty( 'email_antispam', '', 'string' )
p._setProperty( 'email_error_address', '', 'string' )
p._setProperty( 'instance', id, 'string' )
p._setProperty( 'remote_url', '', 'string' )
p._setProperty( 'apply_threading', 1, 'boolean' )
p._setProperty( 'use_timeout', 1, 'boolean' )
p._setProperty( 'duration', 0.001, 'float' )
p._setProperty( 'p_resolve_conflict', 0, 'boolean' )
p._setProperty( 'max_involved_users', 10, 'int' )
p._setProperty( 'service_timeout', 30, 'int' )
p._setProperty( 'created_search_interval', 999, 'int' )
p._setProperty( 'common_url', '', 'string' )
p._setProperty( 'send_to_support', 0, 'boolean' )
p._setProperty( 'member_activity', 1, 'boolean' )
p._setProperty( 'emergency_service', 0, 'boolean' )
p._setProperty( 'p_log', 0, 'boolean' )
p._setProperty( 'suspended_mail', 1, 'boolean' )
p._setProperty( 'mail_frequency', 1, 'int' )
p._setProperty( 'mail_threshold', 500, 'int' )
p._setPropValue( 'server_url', server_url )
p._setPropValue( 'stemmer', stemmer )
p.title = title
p.description = description
def setupAfterCreate( self, p, create_userfolder ):
"""
Setup portal catalog and folders storage
"""
self.setupStorage( p, create_userfolder )
def _makeHeading( self, ob, id, title=None ):
"""
Creates Heading instance
"""
try:
folder = Heading( id=id, title=title )
if folder is not None:
ob._setObject( id, folder, set_owner=1 )
return 1
except:
raise
return 0
def addZMySQLConnection( dispatcher, id, title='', check=None ):
"""
Adds MySQL DB Connection
"""
connection_string = '-mysql root'
conn = SQLConnection( id, title, connection_string, check )
if conn.connected():
DB = conn._v_database_connection
if DB is not None and DB.is_opened():
instance = dispatcher.getId()
if instance:
DB.query( "CREATE DATABASE IF NOT EXISTS %s" % instance )
acl_users = aq_get(dispatcher, 'acl_users', None, 1)
if acl_users is not None:
userid = Config.SQLDBUser
user = acl_users.getUserById( userid )
passwd = user.__
servers = ( 'localhost', '%', )
for x in servers:
DB.query( "GRANT ALL PRIVILEGES ON %s.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION" % ( \
instance, userid, x, passwd ) )
DB.query( "SET PASSWORD FOR '%s'@'%s' = OLD_PASSWORD('%s')" % ( \
userid, x, passwd ) )
DB.close()
if instance and userid:
connection_string = Config.connection_string % { \
'instance' : instance,
'user' : userid,
'passwd' : passwd
}
Publish.setupProduct( DB, connection_string, dispatcher )
dispatcher._setObject(id, conn)
def manage_addExpressSuiteForm( self ):
"""
Returns ExpressSuite instance generator form
"""
add_expresssuite_form = HTMLFile('dtml/addExpressSuite', globals())
all_languages = []
for lang, info in Config.Languages.items():
all_languages.append( {
'id' : lang,
'title' : info['title'],
'default' : lang == Config.DefaultLanguage,
} )
try:
from Products.TextIndexNG2 import allStemmers
all_stemmers = allStemmers(self)
except ImportError:
all_stemmers = []
return add_expresssuite_form( self, all_languages=all_languages, all_stemmers=all_stemmers )
#manage_addExpressSuiteForm.__name__ = 'addExpressSuite'
def manage_addExpressSuite( self, id='common', title='Express Suite DMS', description='',
create_userfolder=1,
email_from_address=None,
email_from_name=None,
validate_email=0,
language=None,
stemmer=None,
REQUEST=None
):
"""
Adds ExpressSuite instance
"""
id = id.strip()
server_url = self.getPhysicalRoot().absolute_url()
if email_from_address is None:
email_from_address = 'postmaster@%s' % urlparse( server_url )[1].split(':')[0]
if email_from_name is None:
email_from_name = title
gen = PortalGenerator()
p = gen.create( self, id, language, create_userfolder )
gen.setupDefaultProperties( p, id, title, description, email_from_address, email_from_name,
validate_email, server_url, stemmer )
gen.setupAfterCreate( p, create_userfolder )
if REQUEST is not None:
REQUEST.RESPONSE.redirect(p.absolute_url() + '/finish_site_construction')
|
[
"ichar@g2.ru"
] |
ichar@g2.ru
|
b2329edad1a265e6327257d7b599655e15dc6cfd
|
3015b07ab56da859507abc3881385f4995980600
|
/fisher/spider/fisher_book.py
|
ca3eb35af32edf97a292eac9bc822d59581a2186
|
[] |
no_license
|
xuewen1696/fisher-book-practice
|
f65c559651f5a51d08cfdcb96a4fc8f96f481238
|
93ce16de333381196aaa2de4811559d5c27d7e0c
|
refs/heads/master
| 2022-12-10T06:31:25.912229
| 2018-07-28T07:26:32
| 2018-07-28T07:26:32
| 142,654,555
| 0
| 1
| null | 2022-12-08T02:22:41
| 2018-07-28T06:53:34
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
from fisher.libs.http_fisher import HTTP
from flask import current_app
class FisherBook:
pre_page = 15
isbn_url = 'http://t.yushu.im/v2/book/isbn/{}'
keyword_url = 'http://t.yushu.im/v2/book/search?q={}&start={}&count={}'
def __init__(self):
self.total = 0
self.books = []
def search_by_isbn(self, isbn):
url = self.isbn_url.format(isbn)
##self.isbn_url 也可以取到isbn_url---链式查找
result = HTTP.get(url)
self.__fill_single(result)
def search_by_keyword(self, keyword, page=1):
# url = cls.keyword_url.format(keyword, current_app.config['PRE_PAGE'], cls.calculate_start(page))
url = self.keyword_url.format(keyword, self.calculate_start(page), current_app.config['PRE_PAGE'])
result = HTTP.get(url)
self.__fill_collection(result)
@staticmethod
def calculate_start(page):
return (page - 1)*current_app.config['PRE_PAGE']
def __fill_single(self, data):
if data:
self.total = 1
self.books.append(data)
def __fill_collection(self, data):
self.total = data['total']
self.books = data['books']
@property
def first(self):
return self.books[0] if self.total >= 1 else None
|
[
"xuewen1696@163.com"
] |
xuewen1696@163.com
|
bebb7ac47a7598ad344f55ae7d57daba858e56ea
|
c07380914a44df334194f234c33858f357365c19
|
/ENV/lib/python2.7/site-packages/theano/sandbox/gpuarray/neighbours.py
|
1f0c7529213f8f6c6d23f989bd3a641915b97fa9
|
[] |
no_license
|
damianpolan/Music-Genre-Classification
|
318952ae7de5d0b0bdf5676e28071c7b38d0e1c5
|
acd723ae1432ce798866ebb97ef3c484db37e971
|
refs/heads/master
| 2022-12-24T09:23:55.514337
| 2016-03-22T14:49:28
| 2016-03-22T14:49:28
| 42,965,899
| 4
| 4
| null | 2022-12-12T20:26:24
| 2015-09-22T23:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 18,919
|
py
|
import numpy
from theano import Op, Apply, config
from theano.gof import local_optimizer
from theano.tensor.nnet.neighbours import Images2Neibs
import theano.tensor as T
try:
import pygpu
from pygpu import gpuarray, elemwise
except ImportError:
pass
from theano.sandbox.gpuarray.basic_ops import (as_gpuarray_variable,
host_from_gpu, gpu_from_host)
from theano.sandbox.gpuarray.opt import register_opt as register_gpu_opt
from theano.sandbox.gpuarray.opt import op_lifter as op_lifter
from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.comp import NVCC_compiler
class GpuImages2Neibs(Images2Neibs, Op):
def __init__(self, mode='valid'):
if mode not in ['valid', 'ignore_borders', 'wrap_centered']:
raise NotImplementedError("Only the mode valid, ignore_borders"
" and wrap_centered"
" have been implemented for the op"
" GpuImages2Neibs")
self.mode = mode
def make_node(self, ten4, neib_shape, neib_step):
ten4 = as_gpuarray_variable(ten4)
neib_shape = T.as_tensor_variable(neib_shape)
neib_step = T.as_tensor_variable(neib_step)
assert ten4.ndim == 4
assert neib_shape.ndim == 1
assert neib_step.ndim == 1
assert "int" in neib_shape.dtype
assert "int" in neib_step.dtype
return Apply(self, [ten4, neib_shape, neib_step],
[GpuArrayType(broadcastable=(False, False),
dtype=ten4.type.dtype)()])
def c_code_cache_version(self):
return (9,1)
def c_headers(self):
return ['cuda.h', '<gpuarray/extension.h>', '<numpy_compat.h>',
'<gpuarray/ext_cuda.h>']
def c_compiler(self):
return NVCC_compiler
def c_init_code(self):
return ['setup_ext_cuda();']
def c_support_code_apply(self, node, nodename):
dtype_ten4 = node.inputs[0].dtype
dtype_z = node.outputs[0].dtype
mode = self.mode
return """
//a version that use less register but don't work in all case.
static __global__ void k_multi_warp_less_%(nodename)s(
const int nb_batch,
const int nb_stack,
const int height,
const int width,
const int c,
const int d,
const int step_x,
const int step_y,
const int grid_c,
const int grid_d,
const int stride0, const int stride1,
const int stride2, const int stride3,
npy_%(dtype_ten4)s * global_ten4,
const int out_s0, const int out_s1,
npy_%(dtype_z)s * global_out
)
{
const int wrap_centered_idx_shift_x = c/2;
const int wrap_centered_idx_shift_y = d/2;
for(int tblock = blockIdx.x*blockDim.z+threadIdx.z;
tblock<nb_batch*nb_stack*grid_c*grid_d;
tblock+=gridDim.x*blockDim.z){
const int b = tblock%%grid_d;
int left = tblock/grid_d;
const int a = left%%grid_c;
left = left/grid_c;
const int s = left%%nb_stack;
left = left/nb_stack;
const int n = left;
if(n>nb_batch)continue;
if(s>nb_stack)continue;
if(a>grid_c)continue;
if(b>grid_d)continue;
int z_row = b + grid_d*(a + grid_c*
(s + nb_stack*n));
int i = threadIdx.y; // loop over c
{
int ten4_2 = i + a * step_x;
if("%(mode)s"=="wrap_centered"){
ten4_2 -= wrap_centered_idx_shift_x;
if ( ten4_2 < 0 )
ten4_2 += height;
else if (ten4_2 >= height)
ten4_2 -= height;
}
int j = threadIdx.x; // loop over d
{
int ten4_3 = j + b * step_y;
if("%(mode)s"=="wrap_centered"){
ten4_3 -= wrap_centered_idx_shift_y;
if ( ten4_3 < 0 )
ten4_3 += width;
else if (ten4_3 >= width)
ten4_3 -= width;
}
int ten4_idx = stride3*ten4_3 +
stride2*ten4_2 +
stride1*s + stride0*n;
int z_col = j + d * i;
int z_idx = z_col * out_s1 +
z_row * out_s0;
global_out[z_idx] = global_ten4[ten4_idx];
}
}
}
}
static __global__ void k_multi_warp_%(nodename)s(
const int nb_batch,
const int nb_stack,
const int height,
const int width,
const int c,
const int d,
const int step_x,
const int step_y,
const int grid_c,
const int grid_d,
const int stride0, const int stride1,
const int stride2, const int stride3,
npy_%(dtype_ten4)s * global_ten4,
const int out_s0, const int out_s1,
npy_%(dtype_z)s * global_out
)
{
const int wrap_centered_idx_shift_x = c/2;
const int wrap_centered_idx_shift_y = d/2;
for(int tblock = blockIdx.x*blockDim.z+threadIdx.z;
tblock<nb_batch*nb_stack*grid_c*grid_d;
tblock+=gridDim.x*blockDim.z){
const int b = tblock%%grid_d;
int left = tblock/grid_d;
const int a = left%%grid_c;
left = left/grid_c;
const int s = left%%nb_stack;
left = left/nb_stack;
const int n = left;
if(n>nb_batch)continue;
if(s>nb_stack)continue;
if(a>grid_c)continue;
if(b>grid_d)continue;
int z_row = b + grid_d*(a + grid_c*
(s + nb_stack*n));
// loop over c
for (int i = threadIdx.y; i < c; i+=blockDim.y)
{
int ten4_2 = i + a * step_x;
if("%(mode)s"=="wrap_centered"){
ten4_2 -= wrap_centered_idx_shift_x;
if ( ten4_2 < 0 )
ten4_2 += height;
else if (ten4_2 >= height)
ten4_2 -= height;
}
// loop over d
for (int j = threadIdx.x; j < d; j+=blockDim.x)
{
int ten4_3 = j + b * step_y;
if("%(mode)s"=="wrap_centered"){
ten4_3 -= wrap_centered_idx_shift_y;
if ( ten4_3 < 0 )
ten4_3 += width;
else if (ten4_3 >= width)
ten4_3 -= width;
}
int ten4_idx = stride3*ten4_3 +
stride2*ten4_2 +
stride1*s + stride0*n;
int z_col = j + d * i;
int z_idx = z_col * out_s1 +
z_row * out_s0;
global_out[z_idx] = global_ten4[ten4_idx];
}
}
}
}
""" % locals()
def c_code(self, node, name, inp, out, sub):
dtype_ten4 = node.inputs[0].dtype
dtype_neib_shape = node.inputs[1].dtype
dtype_neib_step = node.inputs[2].dtype
dtype_z = node.outputs[0].dtype
itemsize_ten4 = numpy.dtype(dtype_ten4).itemsize
itemsize_z = numpy.dtype(dtype_z).itemsize
typecode_z = pygpu.gpuarray.dtype_to_typecode(node.outputs[0].dtype)
ten4, neib_shape, neib_step = inp
z, = out
fail = sub['fail']
mode = self.mode
if config.gpuarray.sync:
cnda_thread_sync = "GpuArray_sync(&%(z)s->ga);" % dict(z=z)
else:
cnda_thread_sync = ""
return """
#ifndef CEIL_INTDIV
#define CEIL_INTDIV(a, b) ((a/b) + ((a %% b) ? 1: 0))
#endif
int grid_c = -1;
int grid_d = -1;
{
if (PyGpuArray_NDIM(%(ten4)s) != 4)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: pvals wrong rank");
%(fail)s;
}
if (PyArray_NDIM(%(neib_shape)s) != 1)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: unis wrong rank");
%(fail)s;
}
if (PyArray_DIMS(%(neib_shape)s)[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"GpuImages2Neibs: neib_shape has to contain two"
" elements");
%(fail)s;
}
const int c = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 0);
const int d = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 1);
const npy_intp step_x = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 0);
const npy_intp step_y = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 1);
if ( "%(mode)s" == "wrap_centered") {
if (c%%2!=1 || d%%2!=1){
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: in mode wrap_centered need patch with odd shapes");
%(fail)s;
}
if ( PyGpuArray_DIMS(%(ten4)s)[2] < c ||
PyGpuArray_DIMS(%(ten4)s)[3] < d)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: in wrap_centered mode,"
" don't support image shapes smaller then"
" the patch shapes: neib_shape=(%%d,%%d),"
" ten4[2:]=[%%d,%%d]",
c, d, PyGpuArray_DIMS(%(ten4)s)[2],
PyGpuArray_DIMS(%(ten4)s)[3]);
%(fail)s;
}
grid_c = CEIL_INTDIV(((PyGpuArray_DIMS(%(ten4)s))[2]),
step_x);
grid_d = CEIL_INTDIV(((PyGpuArray_DIMS(%(ten4)s))[3]),
step_y);
}else if ( "%(mode)s" == "valid") {
if ( ((PyGpuArray_DIMS(%(ten4)s))[2] < c) ||
((((PyGpuArray_DIMS(%(ten4)s))[2]-c) %% step_x)!=0))
{
PyErr_Format(PyExc_TypeError, "GpuImages2Neibs:"
" neib_shape[0]=%%d, neib_step[0]=%%d and"
" ten4.shape[2]=%%d not consistent",
c, step_x,
PyGpuArray_DIMS(%(ten4)s)[2]);
%(fail)s;
}
if ( ((PyGpuArray_DIMS(%(ten4)s))[3] < d) ||
((((PyGpuArray_DIMS(%(ten4)s))[3]-d) %% step_y)!=0))
{
PyErr_Format(PyExc_TypeError, "GpuImages2Neibs:"
" neib_shape[1]=%%d, neib_step[1]=%%d and"
" ten4.shape[3]=%%d not consistent",
d, step_y,
PyGpuArray_DIMS(%(ten4)s)[3]);
%(fail)s;
}
//number of patch in height
grid_c = 1+(((PyGpuArray_DIMS(%(ten4)s))[2]-c)/step_x);
//number of patch in width
grid_d = 1+(((PyGpuArray_DIMS(%(ten4)s))[3]-d)/step_y);
}else if ( "%(mode)s" == "ignore_borders") {
//number of patch in height
grid_c = 1+(((PyGpuArray_DIMS(%(ten4)s))[2]-c)/step_x);
//number of patch in width
grid_d = 1+(((PyGpuArray_DIMS(%(ten4)s))[3]-d)/step_y);
}else{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs:: unknown mode '%(mode)s'");
%(fail)s;
}
// new dimensions for z
const int z_dim1 = c * d;
const int z_dim0 = grid_c
* grid_d
* PyGpuArray_DIMS(%(ten4)s)[1]
* PyGpuArray_DIMS(%(ten4)s)[0];
if ((NULL == %(z)s)
|| (PyGpuArray_DIMS(%(z)s)[0] != z_dim0)
|| (PyGpuArray_DIMS(%(z)s)[1] != z_dim1))
{
Py_XDECREF(%(z)s);
size_t dims[2];
dims[0] = z_dim0;
dims[1] = z_dim1;
%(z)s = pygpu_empty(2, dims, %(typecode_z)s,
GA_C_ORDER, pygpu_default_context(),
Py_None);
if (!%(z)s)
{
PyErr_SetString(PyExc_MemoryError, "GpuImages2Neibs:"
" failed to alloc z output");
%(fail)s;
}
}
}
{ // NESTED SCOPE
const int nb_batch = PyGpuArray_DIMS(%(ten4)s)[0];
const int nb_stack = PyGpuArray_DIMS(%(ten4)s)[1];
const int height = PyGpuArray_DIMS(%(ten4)s)[2];
const int width = PyGpuArray_DIMS(%(ten4)s)[3];
const int c = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 0);
const int d = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 1);
const npy_intp step_x = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 0);
const npy_intp step_y = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 1);
dim3 n_threads(d,c,1);
//Their is a max of 512 threads per blocks
while(n_threads.x*n_threads.y>512 && n_threads.y>1)n_threads.y--;
while(n_threads.x*n_threads.y>512 && n_threads.x>1)n_threads.x--;
//Make bigger block to have better memory access pattern and
//a higher core utilisation. for smaller patch size
while(c*d*(n_threads.z+1) < 128 && n_threads.z<64 &&
n_threads.z<PyGpuArray_DIMS(%(z)s)[0]){
n_threads.z++;
}
int nb_block;
if (PyGpuArray_DIMS(%(z)s)[0] %% n_threads.z == 0)
nb_block = PyGpuArray_DIMS(%(z)s)[0] / n_threads.z;
else
nb_block = (PyGpuArray_DIMS(%(z)s)[0] / n_threads.z) + 1;
dim3 n_blocks(std::min(32*1024,nb_block));
int n_shared = 0;
void (*f)(int, int, int ,int,
int, int, int ,int,
int, int,
int, int, int, int,
npy_%(dtype_ten4)s*,
int, int,
npy_%(dtype_z)s*);
if(n_threads.x==d && n_threads.y==c){
f = k_multi_warp_less_%(name)s;
}else{
f = k_multi_warp_%(name)s;
}
f<<<n_blocks, n_threads, n_shared>>>(
nb_batch,
nb_stack,
height, width,
c, d, step_x, step_y,
grid_c, grid_d,
PyGpuArray_STRIDES(%(ten4)s)[0] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[1] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[2] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[3] / %(itemsize_ten4)s,
(npy_%(dtype_ten4)s*)(
((char *)cuda_get_ptr(%(ten4)s->ga.data)) +
%(ten4)s->ga.offset),
PyGpuArray_STRIDES(%(z)s)[0] / %(itemsize_z)s,
PyGpuArray_STRIDES(%(z)s)[1] / %(itemsize_z)s,
(npy_%(dtype_z)s*)(((char *)cuda_get_ptr(%(z)s->ga.data)) +
%(z)s->ga.offset)
);
%(cnda_thread_sync)s
cudaError_t sts = cudaGetLastError();
if (cudaSuccess != sts)
{
PyErr_Format(PyExc_RuntimeError, "GpuImages2Neibs:"
" Cuda error: %%s: %%s. (grid: %%i x %%i;"
" block: %%i x %%i x %%i; shared: %%i)\\n",
"k_multi_warp_%(name)s",
cudaGetErrorString(sts),
n_blocks.x,
n_blocks.y,
n_threads.x,
n_threads.y,
n_threads.z,
n_shared);
%(fail)s;
}
} // END NESTED SCOPE
""" % locals()
@op_lifter([Images2Neibs])
def use_gpu_images2neibs(node):
if node.op.mode in ['valid', 'ignore_borders', 'wrap_centered']:
return GpuImages2Neibs(node.op.mode)
register_gpu_opt()(use_gpu_images2neibs)
|
[
"damian.polan@gmail.com"
] |
damian.polan@gmail.com
|
b0dcde257cf60b3ff95c8d677121bbedec3ea846
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/cytoolz-0.7.5-py27_0/lib/python2.7/site-packages/cytoolz/tests/test_none_safe.py
|
62f6280f931530d908a7249f648b54df00f1d677
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 11,403
|
py
|
""" Test that functions are reasonably behaved with None as input.
Typed Cython objects (like dict) may also be None. Using functions from
Python's C API that expect a specific type but receive None instead can cause
problems such as throwing an uncatchable SystemError (and some systems may
segfault instead). We obviously don't what that to happen! As the tests
below discovered, this turned out to be a rare occurence. The only changes
required were to use `d.copy()` instead of `PyDict_Copy(d)`, and to always
return Python objects from functions instead of int or bint (so exceptions
can propagate).
The vast majority of functions throw TypeError. The vast majority of
functions also behave the same in `toolz` and `cytoolz`. However, there
are a few minor exceptions. Since passing None to functions are edge cases
that don't have well-established behavior yet (other than raising TypeError),
the tests in this file serve to verify that the behavior is at least
reasonably well-behaved and don't cause SystemErrors.
"""
# XXX: This file could be back-ported to `toolz` once unified testing exists.
import cytoolz
from cytoolz import *
from cytoolz.utils import raises
from operator import add
class GenException(object):
def __init__(self, exc):
self.exc = exc
def __iter__(self):
return self
def __next__(self):
raise self.exc
def next(self):
raise self.exc
def test_dicttoolz():
tested = []
assert raises((TypeError, AttributeError), lambda: assoc(None, 1, 2))
tested.append('assoc')
assert raises((TypeError, AttributeError), lambda: dissoc(None, 1))
tested.append('dissoc')
# XXX
assert (raises(TypeError, lambda: get_in(None, {})) or
get_in(None, {}) is None)
assert raises(TypeError, lambda: get_in(None, {}, no_default=True))
assert get_in([0, 1], None) is None
assert raises(TypeError, lambda: get_in([0, 1], None, no_default=True))
tested.append('get_in')
assert raises(TypeError, lambda: keyfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: keyfilter(identity, None))
tested.append('keyfilter')
# XXX
assert (raises(TypeError, lambda: keymap(None, {1: 2})) or
keymap(None, {1: 2}) == {(1,): 2})
assert raises((AttributeError, TypeError), lambda: keymap(identity, None))
tested.append('keymap')
assert raises(TypeError, lambda: merge(None))
assert raises((TypeError, AttributeError), lambda: merge(None, None))
tested.append('merge')
assert raises(TypeError, lambda: merge_with(None, {1: 2}, {3: 4}))
assert raises(TypeError, lambda: merge_with(identity, None))
assert raises((TypeError, AttributeError),
lambda: merge_with(identity, None, None))
tested.append('merge_with')
assert raises(TypeError, lambda: update_in({1: {2: 3}}, [1, 2], None))
assert raises(TypeError, lambda: update_in({1: {2: 3}}, None, identity))
assert raises((TypeError, AttributeError),
lambda: update_in(None, [1, 2], identity))
tested.append('update_in')
assert raises(TypeError, lambda: valfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: valfilter(identity, None))
tested.append('valfilter')
# XXX
assert (raises(TypeError, lambda: valmap(None, {1: 2})) or
valmap(None, {1: 2}) == {1: (2,)})
assert raises((AttributeError, TypeError), lambda: valmap(identity, None))
tested.append('valmap')
assert (raises(TypeError, lambda: itemmap(None, {1: 2})) or
itemmap(None, {1: 2}) == {1: (2,)})
assert raises((AttributeError, TypeError), lambda: itemmap(identity, None))
tested.append('itemmap')
assert raises(TypeError, lambda: itemfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: itemfilter(identity, None))
tested.append('itemfilter')
s1 = set(tested)
s2 = set(cytoolz.dicttoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_functoolz():
tested = []
assert raises(TypeError, lambda: complement(None)())
tested.append('complement')
assert compose(None) is None
assert raises(TypeError, lambda: compose(None, None)())
tested.append('compose')
assert raises(TypeError, lambda: curry(None))
tested.append('curry')
assert raises(TypeError, lambda: do(None, 1))
tested.append('do')
assert identity(None) is None
tested.append('identity')
assert raises(TypeError, lambda: juxt(None))
assert raises(TypeError, lambda: list(juxt(None, None)()))
tested.append('juxt')
assert memoize(identity, key=None)(1) == 1
assert memoize(identity, cache=None)(1) == 1
tested.append('memoize')
assert raises(TypeError, lambda: pipe(1, None))
tested.append('pipe')
assert thread_first(1, None) is None
tested.append('thread_first')
assert thread_last(1, None) is None
tested.append('thread_last')
assert flip(lambda a, b: (a, b))(None)(None) == (None, None)
tested.append('flip')
s1 = set(tested)
s2 = set(cytoolz.functoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_itertoolz():
tested = []
assert raises(TypeError, lambda: list(accumulate(None, [1, 2])))
assert raises(TypeError, lambda: list(accumulate(identity, None)))
tested.append('accumulate')
assert raises(TypeError, lambda: concat(None))
assert raises(TypeError, lambda: list(concat([None])))
tested.append('concat')
assert raises(TypeError, lambda: list(concatv(None)))
tested.append('concatv')
assert raises(TypeError, lambda: list(cons(1, None)))
tested.append('cons')
assert raises(TypeError, lambda: count(None))
tested.append('count')
# XXX
assert (raises(TypeError, lambda: list(drop(None, [1, 2]))) or
list(drop(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(drop(1, None)))
tested.append('drop')
assert raises(TypeError, lambda: first(None))
tested.append('first')
assert raises(TypeError, lambda: frequencies(None))
tested.append('frequencies')
assert raises(TypeError, lambda: get(1, None))
assert raises(TypeError, lambda: get([1, 2], None))
tested.append('get')
assert raises(TypeError, lambda: groupby(None, [1, 2]))
assert raises(TypeError, lambda: groupby(identity, None))
tested.append('groupby')
assert raises(TypeError, lambda: list(interleave(None)))
assert raises(TypeError, lambda: list(interleave([None, None])))
assert raises(TypeError,
lambda: list(interleave([[1, 2], GenException(ValueError)],
pass_exceptions=None)))
tested.append('interleave')
assert raises(TypeError, lambda: list(interpose(1, None)))
tested.append('interpose')
assert raises(TypeError, lambda: isdistinct(None))
tested.append('isdistinct')
assert isiterable(None) is False
tested.append('isiterable')
assert raises(TypeError, lambda: list(iterate(None, 1)))
tested.append('iterate')
assert raises(TypeError, lambda: last(None))
tested.append('last')
# XXX
assert (raises(TypeError, lambda: list(mapcat(None, [[1], [2]]))) or
list(mapcat(None, [[1], [2]])) == [[1], [2]])
assert raises(TypeError, lambda: list(mapcat(identity, [None, [2]])))
assert raises(TypeError, lambda: list(mapcat(identity, None)))
tested.append('mapcat')
assert raises(TypeError, lambda: list(merge_sorted(None, [1, 2])))
tested.append('merge_sorted')
assert raises(TypeError, lambda: nth(None, [1, 2]))
assert raises(TypeError, lambda: nth(0, None))
tested.append('nth')
assert raises(TypeError, lambda: partition(None, [1, 2, 3]))
assert raises(TypeError, lambda: partition(1, None))
tested.append('partition')
assert raises(TypeError, lambda: list(partition_all(None, [1, 2, 3])))
assert raises(TypeError, lambda: list(partition_all(1, None)))
tested.append('partition_all')
assert raises(TypeError, lambda: list(pluck(None, [[1], [2]])))
assert raises(TypeError, lambda: list(pluck(0, [None, [2]])))
assert raises(TypeError, lambda: list(pluck(0, None)))
tested.append('pluck')
assert raises(TypeError, lambda: reduceby(None, add, [1, 2, 3], 0))
assert raises(TypeError, lambda: reduceby(identity, None, [1, 2, 3], 0))
assert raises(TypeError, lambda: reduceby(identity, add, None, 0))
tested.append('reduceby')
assert raises(TypeError, lambda: list(remove(None, [1, 2])))
assert raises(TypeError, lambda: list(remove(identity, None)))
tested.append('remove')
assert raises(TypeError, lambda: second(None))
tested.append('second')
# XXX
assert (raises(TypeError, lambda: list(sliding_window(None, [1, 2, 3]))) or
list(sliding_window(None, [1, 2, 3])) == [])
assert raises(TypeError, lambda: list(sliding_window(1, None)))
tested.append('sliding_window')
# XXX
assert (raises(TypeError, lambda: list(take(None, [1, 2])) == [1, 2]) or
list(take(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(take(1, None)))
tested.append('take')
# XXX
assert (raises(TypeError, lambda: list(tail(None, [1, 2])) == [1, 2]) or
list(tail(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(tail(1, None)))
tested.append('tail')
# XXX
assert (raises(TypeError, lambda: list(take_nth(None, [1, 2]))) or
list(take_nth(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(take_nth(1, None)))
tested.append('take_nth')
assert raises(TypeError, lambda: list(unique(None)))
assert raises(TypeError, lambda: list(unique([1, 1, 2], key=None)))
tested.append('unique')
assert raises(TypeError, lambda: join(first, None, second, (1, 2, 3)))
assert raises(TypeError, lambda: join(first, (1, 2, 3), second, None))
tested.append('join')
assert raises(TypeError, lambda: topk(None, [1, 2, 3]))
assert raises(TypeError, lambda: topk(3, None))
tested.append('topk')
assert raises(TypeError, lambda: list(diff(None, [1, 2, 3])))
assert raises(TypeError, lambda: list(diff(None)))
assert raises(TypeError, lambda: list(diff([None])))
assert raises(TypeError, lambda: list(diff([None, None])))
tested.append('diff')
assert raises(TypeError, lambda: peek(None))
tested.append('peek')
s1 = set(tested)
s2 = set(cytoolz.itertoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_recipes():
tested = []
# XXX
assert (raises(TypeError, lambda: countby(None, [1, 2])) or
countby(None, [1, 2]) == {(1,): 1, (2,): 1})
assert raises(TypeError, lambda: countby(identity, None))
tested.append('countby')
# XXX
assert (raises(TypeError, lambda: list(partitionby(None, [1, 2]))) or
list(partitionby(None, [1, 2])) == [(1,), (2,)])
assert raises(TypeError, lambda: list(partitionby(identity, None)))
tested.append('partitionby')
s1 = set(tested)
s2 = set(cytoolz.recipes.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
4201aaf82a13c985bc5ed36fc69b99f462bf3731
|
7edbf1eb8a991e91192ab8ecf28d801080b2e230
|
/english/models.py
|
9072c4bef8292fd3cc1343d173be5d2012aa759f
|
[
"MIT"
] |
permissive
|
johncadigan/myenglishcloud
|
5b63d0d079ded93fbb539127a011ee17c3cb17d9
|
c698243866ce3edf864ad0e0c9a126aee57a54c0
|
refs/heads/master
| 2021-01-10T09:55:30.506142
| 2015-12-21T00:06:22
| 2015-12-21T00:06:22
| 48,341,436
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52,603
|
py
|
# -*- coding: utf-8 -*-
import hashlib
import random
import string
import transaction
import json
import os
import datetime
import Image
import errno
from datetime import date
import re
from random import shuffle, randint
from cryptacular.bcrypt import BCRYPTPasswordManager
from slugify import slugify
import glob
from pyramid.threadlocal import get_current_request
from pyramid.util import DottedNameResolver
from pyramid.security import (Everyone,
Allow,
Deny
)
from sqlalchemy import (Column,
ForeignKey,
event,
Index,
Table,
types,
Unicode,
select,
func,
case)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import synonym
from sqlalchemy.sql.expression import func
from sqlalchemy_utils import URLType
#from velruse.store.sqlstore import SQLBase
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
##### Helper Functs
QUIZ_DIRECTORY = 'english/static/uploads/quizzes/'
PICTURE_DIRECTORY = 'english/static/uploads/pictures/'
PICTURE_SIZES = [(256, 256), (128, 128), (64,64)]
PICTURE_SUBDIRECTORIES = ["original"] + ["{0}x{1}".format(x[0], x[1]) for x in PICTURE_SIZES]
PICTURE_DIRECTORIES = [os.path.join(PICTURE_DIRECTORY, s) for s in PICTURE_SUBDIRECTORIES]
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#for x in PICTURE_DIRECTORIES: make_sure_path_exists(x)
"""USER MODELS"""
auth_group_table = Table('auth_auth_groups', Base.metadata,
Column('auth_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('group_id', types.Integer(), \
ForeignKey('auth_groups.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('auth_group', auth_group_table.c.auth_id, auth_group_table.c.group_id)
class AuthGroup(Base):
""" Table name: auth_groups
::
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(80), unique=True, nullable=False)
description = Column(Unicode(255), default=u'')
"""
__tablename__ = 'auth_groups'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(80), unique=True, nullable=False)
description = Column(Unicode(255), default=u'')
users = relationship('AuthID', secondary=auth_group_table, \
backref='auth_groups')
def __repr__(self):
return u'%s' % self.name
def __unicode__(self):
return self.name
user_finished_content = Table('user_finished_content', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_finished_content', user_finished_content.c.user_id, user_finished_content.c.content_id)
user_added_content_vocab = Table('user_added_content_vocab', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_added_content_vocab', user_added_content_vocab.c.user_id, user_added_content_vocab.c.content_id)
user_voted_content_difficulty = Table('user_voted_content_difficulty', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_voted_content_difficulty', user_voted_content_difficulty.c.user_id, user_voted_content_difficulty.c.content_id)
user_voted_content_quality = Table('user_voted_content_quality', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_voted_content_quality', user_voted_content_quality.c.user_id, user_voted_content_quality.c.content_id)
class AuthID(Base):
""" Table name: auth_id
::
id = Column(types.Integer(), primary_key=True)
display_name = Column(Unicode(80), default=u'')
active = Column(types.Enum(u'Y',u'N',u'D', name=u"active"), default=u'Y')
created = Column(types.DateTime(), default=func.now())
"""
__tablename__ = 'auth_id'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
display_name = Column(Unicode(80), default=u'')
active = Column(types.Enum(u'Y',u'N',u'D', name=u"active"), default=u'Y')
created = Column(types.DateTime(), default=func.now())
groups = relationship('AuthGroup', secondary=auth_group_table, \
backref='auth_users')
users = relationship('AuthUser')
preferred_language = Column(types.Integer, ForeignKey('languages.id'))
added_vocab = relationship('Content', secondary=user_added_content_vocab, \
backref='vocab_adders')
finished_content = relationship('Content', secondary=user_finished_content, \
backref='finishers')
rated_content_difficulty = relationship('Content', secondary=user_voted_content_difficulty, \
backref='difficulty_raters')
rated_content_quality = relationship('Content', secondary=user_voted_content_quality, \
backref='quality_raters')
flashcards = relationship('Flashcard')
"""
Fix this to use association_proxy
groups = association_proxy('auth_group_table', 'authgroup')
"""
last_login = relationship('AuthUserLog', \
order_by='AuthUserLog.id.desc()')
login_log = relationship('AuthUserLog', \
order_by='AuthUserLog.id')
def in_group(self, group):
"""
Returns True or False if the user is or isn't in the group.
"""
return group in [g.name for g in self.groups]
def sorted_flashcards(self):
flashcards = {'all' : len(self.flashcards)}
i = 0
flashcards.setdefault('toAdd', [])
flashcards.setdefault('toPractice', [])
flashcards.setdefault('overdue', [])
flashcards.setdefault('today', [])
flashcards.setdefault('due', [])
flashcards.setdefault('tomorrow', [])
flashcards.setdefault('next_week', [])
flashcards.setdefault('this_week', [])
for flashcard in self.flashcards:
if flashcard.due.toordinal()-datetime.datetime.now().toordinal() < 0:
flashcards['overdue'].append(flashcard)
flashcards['due'].append(flashcard)
if flashcard.level == 'Show':
flashcards['toAdd'].append(flashcard)
else:
flashcards['toPractice'].append(flashcard)
elif flashcard.due.toordinal()-datetime.datetime.now().toordinal() == 0:
flashcards['today'].append(flashcard)
flashcards['due'].append(flashcard)
if flashcard.level == 'Show':
flashcards['toAdd'].append(flashcard)
else:
flashcards['toPractice'].append(flashcard)
elif 0 < flashcard.due.toordinal()-datetime.datetime.now().toordinal() <= 1:
flashcards['tomorrow'].append(flashcard)
elif flashcard.due.toordinal()-datetime.datetime.now().toordinal() <= 6:
flashcards['this_week'].append(flashcard)
elif 6 < flashcard.due.toordinal()-datetime.datetime.now().toordinal() <= 13:
flashcards['next_week'].append(flashcard)
flashcards['due#'] = len(flashcards['due'])
flashcards['toAdd#'] = len(flashcards['toAdd'])
flashcards['toPractice#'] = len(flashcards['toPractice'])
return flashcards
@classmethod
def get_by_id(cls, id):
"""
Returns AuthID object or None by id
.. code-block:: python
from apex.models import AuthID
user = AuthID.get_by_id(1)
"""
return DBSession.query(cls).filter(cls.id==id).first()
@classmethod
def get_by_display_name(cls, display_name):
"""
Returns AuthUser object or None by login
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_login('login')
"""
return DBSession.query(cls).filter(cls.display_name==display_name).first()
def get_profile(self, request=None):
"""
Returns AuthUser.profile object, creates record if it doesn't exist.
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_id(1)
profile = user.get_profile(request)
in **development.ini**
.. code-block:: python
apex.auth_profile =
"""
if not request:
request = get_current_request()
auth_profile = request.registry.settings.get('apex.auth_profile')
if auth_profile:
resolver = DottedNameResolver(auth_profile.split('.')[0])
profile_cls = resolver.resolve(auth_profile)
return get_or_create(DBSession, profile_cls, auth_id=self.id)
@property
def group_list(self):
group_list = []
if self.groups:
for group in self.groups:
group_list.append(group.name)
return ','.join( map( str, group_list ) )
class AuthUser(Base):
""" Table name: auth_users
::
id = Column(types.Integer(), primary_key=True)
login = Column(Unicode(80), default=u'', index=True)
_password = Column('password', Unicode(80), default=u'')
email = Column(Unicode(80), default=u'', index=True)
active = Column(types.Enum(u'Y',u'N',u'D'), default=u'Y')
"""
__tablename__ = 'auth_users'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
auth_id = Column(types.Integer, ForeignKey(AuthID.id), index=True)
provider = Column(Unicode(80), default=u'local', index=True)
login = Column(Unicode(80), default=u'', index=True)
salt = Column(Unicode(24))
_password = Column('password', Unicode(80), default=u'')
email = Column(Unicode(80), default=u'', index=True)
created = Column(types.DateTime(), default=func.now())
active = Column(types.Enum(u'Y',u'N',u'D', name=u"active"), default=u'Y')
def _set_password(self, password):
self.salt = self.get_salt(24)
password = password + self.salt
self._password = BCRYPTPasswordManager().encode(password, rounds=12)
def _get_password(self):
return self._password
password = synonym('_password', descriptor=property(_get_password, \
_set_password))
def get_salt(self, length):
m = hashlib.sha256()
word = ''
for i in xrange(length):
word += random.choice(string.ascii_letters)
m.update(word)
return unicode(m.hexdigest()[:length])
@classmethod
def get_by_id(cls, id):
"""
Returns AuthUser object or None by id
.. code-block:: python
from apex.models import AuthID
user = AuthID.get_by_id(1)
"""
return DBSession.query(cls).filter(cls.id==id).first()
@classmethod
def get_by_login(cls, login):
"""
Returns AuthUser object or None by login
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_login('login')
"""
return DBSession.query(cls).filter(cls.login==login).first()
@classmethod
def get_by_email(cls, email):
"""
Returns AuthUser object or None by email
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_email('email@address.com')
"""
return DBSession.query(cls).filter(cls.email==email).first()
@classmethod
def check_password(cls, **kwargs):
if kwargs.has_key('id'):
user = cls.get_by_id(kwargs['id'])
if kwargs.has_key('login'):
user = cls.get_by_login(kwargs['login'])
if not user:
return False
try:
if BCRYPTPasswordManager().check(user.password,
'%s%s' % (kwargs['password'], user.salt)):
return True
except TypeError:
pass
request = get_current_request()
# fallback_auth = request.registry.settings.get('apex.fallback_auth')
# if fallback_auth:
# resolver = DottedNameResolver(fallback_auth.split('.', 1)[0])
#fallback = resolver.resolve(fallback_auth)
#return fallback().check(DBSession, request, user, \
#kwargs['password'])
return False
class AuthUserLog(Base):
"""
event:
L - Login
R - Register
P - Password
F - Forgot
"""
__tablename__ = 'auth_user_log'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
auth_id = Column(types.Integer, ForeignKey(AuthID.id), index=True)
user_id = Column(types.Integer, ForeignKey(AuthUser.id), index=True)
time = Column(types.DateTime(), default=func.now())
ip_addr = Column(Unicode(39), nullable=False)
event = Column(types.Enum(u'L',u'R',u'P',u'F', name=u"event"), default=u'L')
class Country(Base):
__tablename__= 'countries'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
name = Column(Unicode(50), nullable=False)
image = Column(Unicode(50), nullable=False)
language_profile_pairs = Table('language_profile_pairs', Base.metadata,
Column('language_id', types.Integer(), \
ForeignKey('languages.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('profile_id', types.Integer(), \
ForeignKey('profiles.id', onupdate='CASCADE', ondelete='CASCADE'))
)
Index('language_profile', language_profile_pairs.c.language_id, language_profile_pairs.c.profile_id)
class Profile(Base):
__tablename__= 'profiles'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
owner = Column(types.Integer, ForeignKey(AuthID.id), index=True)
picture_id = Column(types.Integer, ForeignKey('pictures.id'))
name = Column(Unicode(50))
date_of_birth = Column(types.Date())
country_id = Column(types.Integer, ForeignKey('countries.id'))
city = Column(Unicode(50))
about_me = Column(Unicode(1000))
languages = relationship('Language', secondary=language_profile_pairs, \
backref='languages')
class Language(Base):
__tablename__= 'languages'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
english_name = Column(Unicode(50), nullable=False)
native_name = Column(Unicode(50), nullable=False)
iso_lang = Column(Unicode(10))
goog_translate = Column(Unicode(10))
profiles = relationship('Profile', secondary=language_profile_pairs, \
backref='profiles')
class Card(Base):
__tablename__='cards'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
lemma_id = Column(ForeignKey('english_lemmas.id', onupdate='CASCADE', ondelete='CASCADE'))
language_id = Column(ForeignKey('languages.id', onupdate='CASCADE', ondelete='CASCADE'))
translations = relationship('Translation')
class Translation(Base):
__tablename__='translations'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
card_id = Column(ForeignKey('cards.id', onupdate='CASCADE', ondelete='CASCADE'))
foreign_lemma_id = Column(ForeignKey('foreign_lemmas.id', onupdate='CASCADE', ondelete='CASCADE'))
count = Column(types.Integer, default=1, index=True)
lemma_content_pairs = Table('lemma_content_pairs', Base.metadata,
Column('english_lemma_id', types.Integer(), \
ForeignKey('english_lemmas.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('english_lemma_content', lemma_content_pairs.c.english_lemma_id, lemma_content_pairs.c.content_id)
class EnglishLemma(Base):
""" N=Noun, PR=Pronoun, ADJ=Adjective, ADV=Adverb, VB=Verb, PVB=Phrasal Verb, PP=Preposition, CNJ=Conjunction,
"""
__tablename__= 'english_lemmas'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
owner = Column(ForeignKey('auth_id.id'))
form_id = Column(ForeignKey('english_forms.id'))
form = relationship('EnglishForm')
example_sentence = Column(Unicode(100), nullable=False)
pos = Column(types.Enum(u'Noun',u'Pronoun',u'Adjective', u'Adverb', u'Verb', u'Phrasal Verb', u'Preposition', u'Conjunction', u'Collocation', u'Slang'), nullable=False)
picture_id = Column(types.Integer, ForeignKey('pictures.id'))
picture = relationship("Picture")
content_ids = relationship('Content', secondary=lemma_content_pairs, \
backref='content_ids')
class EnglishLemmaCategory(Base):
__tablename__= 'english_lemma_categories'
__mapper_args__ = {'batch': False # allows extension to fire for each
# instance before going to the next.
}
parent = None
id = Column(types.Integer(), primary_key=True)
name = Column(types.Unicode(100))
lemma_id = Column(ForeignKey('english_lemmas.id'))
level = Column("lvl", types.Integer, nullable=False)
left = Column("lft", types.Integer, nullable=False)
right = Column("rgt", types.Integer, nullable=False)
@event.listens_for(EnglishLemmaCategory, "before_insert")
def before_insert(mapper, connection, instance):
print 'making adjustments before insertion'
#If the new term has no parent, connect to root
if instance.parent == None:
category = mapper.mapped_table
values = connection.execute(select([category]).where(category.c.name == 'ALL')).first().values()
parent = EnglishLemmaCategory()
parent.name = values[0]
parent.level = values[2]
parent.left = values[3]
parent.right = values[4]
instance.parent = parent
category = mapper.mapped_table
#Find right most sibling's right value
right_most_sibling = connection.scalar(
select([category.c.rgt]).
where(category.c.name == instance.parent.name)
)
#Update all values greater than rightmost sibiling
connection.execute(
category.update(
category.c.rgt >= right_most_sibling).values(
#Update if left bound in greater than rightmost sibling
lft=case(
[(category.c.lft > right_most_sibling,
category.c.lft + 2)],
else_=category.c.lft
),
#Update if right bound is greater than right most sibling
rgt=case(
[(category.c.rgt >= right_most_sibling,
category.c.rgt + 2)],
else_=category.c.rgt
)
)
)
instance.left = right_most_sibling
instance.right = right_most_sibling + 1
instance.level = instance.parent.level + 1
@event.listens_for(EnglishLemmaCategory, "after_delete")
def after_delete(mapper, connection, target):
category = mapper.mapped_table
#Delete leaf
if target.right-target.left == 1:
print 'updating after deletion of leaf'
#Update all values greater than right side
connection.execute(
category.update(
category.c.rgt > target.left).values(
#Update if left bound in greater than right side
lft=case(
[(category.c.lft > target.left,
category.c.lft - 2)],
else_=category.c.lft
),
#Update if right bound is greater than right
rgt=case(
[(category.c.rgt >= target.left,
category.c.rgt - 2)],
else_=category.c.rgt
)
)
)
#Delete parent
else:
print 'updating after deletion of parent'
category = mapper.mapped_table
#Promote all children
connection.execute(
category.update(
category.c.lft.between(target.left, target.right)).values(
#Update if left bound in greater than right side
lft=case(
[(category.c.lft > target.left,
category.c.lft - 1)],
else_=category.c.lft
),
#Update if right bound is greater than right
rgt=case(
[(category.c.rgt > target.left,
category.c.rgt - 1)],
else_=category.c.rgt
),
lvl=case([(category.c.lvl > target.level,
category.c.lvl - 1)],
else_=category.c.lvl
)
)
)
#Update all values greater than right side
connection.execute(
category.update(
category.c.rgt > target.right).values(
#Update if left bound in greater than right side
lft=case(
[(category.c.lft > target.left,
category.c.lft - 2)],
else_=category.c.lft
),
#Update if right bound is greater than right
rgt=case(
[(category.c.rgt >= target.left,
category.c.rgt - 2)],
else_=category.c.rgt
)
)
)
class EnglishForm(Base):
"""
"""
__tablename__= 'english_forms'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
form = Column(Unicode(50), nullable=False)
class FormInfo(Base):
"""
"""
__tablename__= 'form_infos'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
form_id = Column(types.Integer, ForeignKey('english_forms.id'))
definitions = Column(Unicode(1000))
freq = Column(types.Integer)
class ForeignLemma(Base):
""" N=Noun, PR=Pronoun, ADJ=Adjective, ADV=Adverb, VB=Verb, PVB=Phrasal Verb, PP=Preposition, CNJ=Conjunction,
"""
__tablename__= 'foreign_lemmas'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
language_id = Column(types.Integer, ForeignKey('languages.id'))
form = Column(Unicode(50), nullable=False)
class Flashcard(Base):
__tablename__= 'flashcards'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
card_id = Column(ForeignKey('cards.id', onupdate='CASCADE', ondelete='CASCADE'))
owner = Column(ForeignKey('auth_id.id'))
level = Column(types.Enum('Show', '4Source','8Source', '4Target', '8Target', 'Flashcard1','Flashcard2','Flashcard3','Flashcard4','Flashcard5','Flashcard6','Flashcard7','Flashcard8'), default='Show')
due = Column(types.Date(), default=func.now())
interval = Column(types.Integer(), default=10)
ease = Column(types.Integer(), default=2500)
correct = Column(types.Integer(), default=0)
incorrect = Column(types.Integer(), default=0)
class FlashcardHistory(Base):
__tablename__= 'flashcardhistories'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
flashcard_id = Column(ForeignKey('flashcards.id'), index=True)
time = Column(types.DateTime(), default=func.now())
level = Column(types.Enum('Show', '4Source','8Source', '4Target', '8Target', 'Flashcard1','Flashcard2','Flashcard3','Flashcard4','Flashcard5','Flashcard6','Flashcard7','Flashcard8'))
response_time= Column(types.Integer())
response = Column(Unicode(50))
correct = Column(types.Boolean())
"""CONTENT MODELS"""
tag_content_pairs = Table('tag_content_pairs', Base.metadata,
Column('tag_id', types.Integer(), \
ForeignKey('tags.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('tag_content', tag_content_pairs.c.tag_id, tag_content_pairs.c.content_id)
class Tag(Base):
__tablename__ = 'tags'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(100), unique=True, nullable=False)
contents = relationship('Content', secondary=tag_content_pairs, \
backref='contents')
class Content(Base):
__tablename__= 'contents'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
type = Column(types.Enum(u'lesson', u'reading'))
released = Column(types.Date(), default=func.now())
title = Column(Unicode(80))
description = Column(Unicode(350))
picture_id = Column(types.Integer(), ForeignKey('pictures.id'))
picture = relationship("Picture")
url = Column(URLType)
views = Column(types.Integer(), default=0)
owner = Column(types.Integer, ForeignKey('auth_users.id'), index = True)
quiz_id = Column(types.Integer, ForeignKey("quizzes.id"), index = True)
quiz = relationship('Quiz', uselist=False)
finished_by = relationship('AuthID', secondary=user_finished_content)
difficulty_rated_by = relationship('AuthID', secondary=user_voted_content_difficulty)
quality_rated_by = relationship('AuthID', secondary=user_voted_content_quality)
vocab_added_by = relationship('AuthID', secondary=user_added_content_vocab)
tags = relationship('Tag', secondary=tag_content_pairs, \
backref='tags')
comments = relationship('Comment')
difficulty_votes = relationship('DifficultyVote')
quality_votes = relationship('QualityVote')
vocabulary = relationship('EnglishLemma', secondary=lemma_content_pairs, \
backref='vocabulary')
@classmethod
def get_by_title(cls, title):
""" Returns Content object or None by title content = Content.get_by_title('title')"""
return DBSession.query(cls).filter(cls.title==title).first()
@classmethod
def get_by_url(cls, url):
""" Returns Content object or None by title content = Content.get_by_url('url')"""
return DBSession.query(cls).filter(cls.url==url).first()
class DifficultyVote(Base):
__tablename__ = 'difficulty_votes'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
score = Column(types.Integer())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
content_id = Column(types.Integer, ForeignKey('contents.id'), default = None)
class QualityVote(Base):
__tablename__ = 'quality_votes'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
score = Column(types.Integer())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
content_id = Column(types.Integer, ForeignKey('contents.id'), default = None)
class Comment(Base):
__tablename__ = 'comments'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
comment_type = Column(types.Enum(u'C',u'Q'), default=u'C')
time = Column(types.DateTime(), default=func.now())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
content_id = Column(types.Integer, ForeignKey('contents.id'), default = None)
text = Column(Unicode(1000), nullable = False)
replies = relationship('CommentReply')
class CommentReply(Base):
__tablename__ = 'comment_replies'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
time = Column(types.DateTime(), default=func.now())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
parent_id = Column(types.Integer, ForeignKey('comments.id'), default = None)
text = Column(Unicode(1000), nullable = False)
class Lesson(Base):
""" Table name: lessons
video = Column(types.VARCHAR(200))
quiz_id = Column(types.Integer, ForeignKey('quizzes.id'), nullable= False)
"""
__tablename__ = 'lessons'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
content_id = Column(types.Integer(), ForeignKey('contents.id'))
content = relationship("Content")
video = Column(types.VARCHAR(200))
class Reading(Base):
""" Table name: readings
text = Column(types.UnicodeText())
quiz_id = Column(types.Integer, ForeignKey('quizzes.id'), nullable= False)
"""
__tablename__ = 'readings'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
content_id = Column(types.Integer(), ForeignKey('contents.id'))
text = Column(types.UnicodeText())
sources = relationship('Source')
class Source(Base):
__tablename__ = 'sources'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
reading_id = Column(types.Integer(), ForeignKey('readings.id'))
author = Column(types.Unicode(60))
title = Column(types.Unicode(100))
url = Column(types.Unicode(200))
source = Column(types.Unicode(60))
date = Column(types.Date, default =func.now)
class Quiz(Base):
__tablename__ = 'quizzes'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
title = Column(Unicode(100), nullable=False, default=u"This quiz is coming soon!")
tagline = Column(Unicode(100), nullable=False, default=u'Test your Knowledge!')
content = relationship('Content')
questions = relationship('Question')
def to_json(self):
if len(self.questions) > 0:
questions = self.questions[0].to_json()
for question in self.questions[1:]:
questions += "," + question.to_json
quiz = """var quizJSON = {{"info": {{"name": "{title}","main": "<p>{tagline}</p>", "results": "<h5>Learn More!</h5><p>We have many more lessons for you.</p>", "level1": "You know this lesson very well!", "level2": "You know this lesson well.", "level3": "You might want to watch this lesson again.", "level4": "You should watch this lesson again.","level5":"You should definitely watch this lesson again" }}, "questions": [{questions}]}};""".format(**{'title' : self.title, 'tagline' : self.tagline, 'questions' : questions})
file_path = os.path.join(QUIZ_DIRECTORY, '{0}.js'.format(self.id))
temp_file_path = os.path.join('/tmp', '{0}.js'.format(self.id))
output_file = open(temp_file_path, 'wb')
output_file.write(quiz)
output_file.close()
os.rename(temp_file_path, file_path)
def json_id(self):
questions = DBSession.query(Question).filter(self.id==Question.quiz_id).all()
if len(questions) > 0:
return self.id
else:
return 0
@event.listens_for(Quiz, "after_insert")
def after_insert(mapper, connection, target):
target.to_json()
@event.listens_for(Quiz, "after_update")
def after_update(mapper, connection, target):
print "\n\n\nUPDATING QUIZ\n\n\n", str(target)
target.to_json()
class Question(Base):
__tablename__ = 'questions'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
quiz_id = Column(types.Integer, ForeignKey('quizzes.id'))
prompt = Column(Unicode(100), unique=True, nullable=False)
answers = relationship('Answer')
correct_message = Column(Unicode(100), nullable=False, default=u'That was correct!')
incorrect_message = Column(Unicode(100), nullable=False, default=u'That was incorrect...')
def to_json(self):
correct = randint(0, 9)
incorrect = randint(0, 3)
icheadline = ['Incorrect!', 'Too bad..', 'You were wrong...', 'Sorry...'][incorrect]
cheadline = ['Correct!', 'Good job!', 'Right on!', 'Way to go!', 'Keep it up!', 'Awesome!', 'Wonderful!', "You're right!", 'Yup', 'Good answer'][correct]
dic = {"prompt" : self.prompt,
"cexplanation" : self.correct_message,
"cheadline" : cheadline,
"icexplanation" : self.incorrect_message,
"icheadline":icheadline,
}
for i, answer in enumerate(self.answers):
dic["a{0}t".format(i+1)] = answer.response
if answer.correct:
dic["a{0}v".format(i+1)]='true'
else:
dic["a{0}v".format(i+1)]='false'
if len(self.answers) == 4:
question = """{{"q": "{prompt}", "a": [{{"option": "{a1t}", "correct": {a1v}}}, {{"option": "{a2t}", "correct": {a2v}}}, {{"option": "{a3t}", "correct": {a3v}}}, {{"option": "{a4t}", "correct": {a4v}}}], "correct": "<p><span>{cheadline}</span>{cexplanation}</p>", "incorrect": "<p><span>{icheadline}</span>{icexplanation}</p>"}}""".format(**dic)
elif len(self.answers) == 2:
question = """{{"q": "{prompt}", "a": [{{"option": "{a1t}", "correct": {a1v}}}, {{"option": "{a2t}", "correct": {a2v}}}], "correct": "<p><span>{cheadline}</span>{cexplanation}</p>", "incorrect": "<p><span>{icheadline}</span>{icexplanation}</p>"}}""".format(**dic)
return question
@event.listens_for(Question, "after_update")
def after_update(mapper, connection, target):
print "\n\n\nUPDATING QUIZ\n\n\n", str(target.quiz_id)
quiz = connection.query(Quiz).filter(Quiz.id==target.quiz_id).first()
quiz.to_json()
class Answer(Base):
__tablename__ = 'answers'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
question_id = Column(types.Integer, ForeignKey('questions.id'), default = None)
response = Column(Unicode(100), unique=True, nullable=False)
correct = Column(types.Boolean)
class Picture(Base):
"""Table which stores pictures"""
__tablename__ = 'pictures'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(URLType)
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
@classmethod
def add_file(cls, input_file, name):
pic = Image.open(input_file)
for i, size in enumerate([pic.size]+PICTURE_SIZES):
pic.thumbnail(size, Image.ANTIALIAS)
file_path = os.path.join(PICTURE_DIRECTORIES[i], '{0}.jpeg'.format(name))
pic.save(file_path, 'jpeg')
@classmethod
def from_file(cls, name, image):
if name == None: name = str(uuid.uuid4())
same_name =len(glob.glob(os.path.join(PICTURE_DIRECTORIES[0], '{0}[0-9]*.jpeg'.format(name))))
name+=str(same_name)
input_file = image.file
cls.add_file(input_file, name)
return Picture(name=name)
@classmethod
def update_with_file(cls, pid, name, image):
if name == None: name = str(uuid.uuid4())
same_name =len(glob.glob(os.path.join(PICTURE_DIRECTORIES[0], '{0}[0-9]*.jpeg'.format(name))))
name+=str(same_name)
session = DBSession()
session.query(cls).filter(cls.id==pid).update(values={'name' : name.strip()})
input_file = image.file
cls.add_file(input_file, name)
session.flush()
class PotentialPicture(Base):
"""Table which stores pictures"""
__tablename__ = 'potential_pictures'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(types.VARCHAR(75))
""" Usage Data"""
class UserPoint(Base):
__tablename__ = 'user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
source = Column(types.Unicode(255), default=u'')
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
time = Column(types.DateTime(), default=func.now())
class TotalUserPoint(Base):
__tablename__ = 'total_user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
class MonthlyUserPoint(Base):
__tablename__ = 'monthly_user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
month = Column(types.Integer())
class WeeklyUserPoint(Base):
__tablename__ = 'weekly_user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
week = Column(types.Integer())
def populate(settings):
## Add logistical data
session = DBSession()
default_groups = [(u'users',u'User Group'), (u'teachers',u'Teacher Group'), (u'admin',u'Admin Group')]
for name, description in default_groups:
group = AuthGroup(name=name, description=description)
session.add(group)
session.flush()
transaction.commit()
session.close()
session = DBSession()
languages={'ab':{'name':"Abkhaz",'nativename':"аҧсуа"},'aa':{'name':"Afar",'nativename':"Afaraf"},'af':{'name':"Afrikaans",'nativename':"Afrikaans"},'ak':{'name':"Akan",'nativename':"Akan"},'sq':{'name':"Albanian",'nativename':"Shqip"},'am':{'name':"Amharic",'nativename':"አማርኛ"},'ar':{'name':"Arabic",'nativename':"العربية"},'an':{'name':"Aragonese",'nativename':"Aragonés"},'hy':{'name':"Armenian",'nativename':"Հայերեն"},'as':{'name':"Assamese",'nativename':"অসমীয়া"},'av':{'name':"Avaric",'nativename':"авар мацӀ, магӀарул мацӀ"},'ae':{'name':"Avestan",'nativename':"avesta"},'ay':{'name':"Aymara",'nativename':"aymar aru"},'az':{'name':"Azerbaijani",'nativename':"azərbaycan dili"},'bm':{'name':"Bambara",'nativename':"bamanankan"},'ba':{'name':"Bashkir",'nativename':"башҡорт теле"},'eu':{'name':"Basque",'nativename':"euskara, euskera"},'be':{'name':"Belarusian",'nativename':"Беларуская"},'bn':{'name':"Bengali",'nativename':"বাংলা"},'bh':{'name':"Bihari",'nativename':"भोजपुरी"},'bi':{'name':"Bislama",'nativename':"Bislama"},'bs':{'name':"Bosnian",'nativename':"bosanski jezik"},'br':{'name':"Breton",'nativename':"brezhoneg"},'bg':{'name':"Bulgarian",'nativename':"български език"},'my':{'name':"Burmese",'nativename':"ဗမာစာ"},'ca':{'name':"Catalan; Valencian",'nativename':"Català"},'ch':{'name':"Chamorro",'nativename':"Chamoru"},'ce':{'name':"Chechen",'nativename':"нохчийн мотт"},'ny':{'name':"Chichewa; Chewa; Nyanja",'nativename':"chiCheŵa, chinyanja"},'zh':{'name':"Chinese",'nativename':"中文 (Zhōngwén), 汉语, 漢語"},'cv':{'name':"Chuvash",'nativename':"чӑваш чӗлхи"},'kw':{'name':"Cornish",'nativename':"Kernewek"},'co':{'name':"Corsican",'nativename':"corsu, lingua corsa"},'cr':{'name':"Cree",'nativename':"ᓀᐦᐃᔭᐍᐏᐣ"},'hr':{'name':"Croatian",'nativename':"hrvatski"},'cs':{'name':"Czech",'nativename':"česky, čeština"},'da':{'name':"Danish",'nativename':"dansk"},'dv':{'name':"Divehi; Dhivehi; Maldivian;",'nativename':"ދިވެހި"},'nl':{'name':"Dutch",'nativename':"Nederlands, Vlaams"},'en':{'name':"English",'nativename':"English"},'eo':{'name':"Esperanto",'nativename':"Esperanto"},'et':{'name':"Estonian",'nativename':"eesti, eesti keel"},'ee':{'name':"Ewe",'nativename':"Eʋegbe"},'fo':{'name':"Faroese",'nativename':"føroyskt"},'fj':{'name':"Fijian",'nativename':"vosa Vakaviti"},'fi':{'name':"Finnish",'nativename':"suomi, suomen kieli"},'fr':{'name':"French",'nativename':"français, langue française"},'ff':{'name':"Fula; Fulah; Pulaar; Pular",'nativename':"Fulfulde, Pulaar, Pular"},'gl':{'name':"Galician",'nativename':"Galego"},'ka':{'name':"Georgian",'nativename':"ქართული"},'de':{'name':"German",'nativename':"Deutsch"},'el':{'name':"Greek, Modern",'nativename':"Ελληνικά"},'gn':{'name':"Guaraní",'nativename':"Avañeẽ"},'gu':{'name':"Gujarati",'nativename':"ગુજરાતી"},'ht':{'name':"Haitian; Haitian Creole",'nativename':"Kreyòl ayisyen"},'ha':{'name':"Hausa",'nativename':"Hausa, هَوُسَ"},'he':{'name':"Hebrew (modern)",'nativename':"עברית"},'hz':{'name':"Herero",'nativename':"Otjiherero"},'hi':{'name':"Hindi",'nativename':"हिन्दी, हिंदी"},'ho':{'name':"Hiri Motu",'nativename':"Hiri Motu"},'hu':{'name':"Hungarian",'nativename':"Magyar"},'ia':{'name':"Interlingua",'nativename':"Interlingua"},'id':{'name':"Indonesian",'nativename':"Bahasa Indonesia"},'ie':{'name':"Interlingue",'nativename':"Originally called Occidental; then Interlingue after WWII"},'ga':{'name':"Irish",'nativename':"Gaeilge"},'ig':{'name':"Igbo",'nativename':"Asụsụ Igbo"},'ik':{'name':"Inupiaq",'nativename':"Iñupiaq, Iñupiatun"},'io':{'name':"Ido",'nativename':"Ido"},'is':{'name':"Icelandic",'nativename':"Íslenska"},'it':{'name':"Italian",'nativename':"Italiano"},'iu':{'name':"Inuktitut",'nativename':"ᐃᓄᒃᑎᑐᑦ"},'ja':{'name':"Japanese",'nativename':"日本語 (にほんご/にっぽんご)"},'jv':{'name':"Javanese",'nativename':"basa Jawa"},'kl':{'name':"Kalaallisut, Greenlandic",'nativename':"kalaallisut, kalaallit oqaasii"},'kn':{'name':"Kannada",'nativename':"ಕನ್ನಡ"},'kr':{'name':"Kanuri",'nativename':"Kanuri"},'ks':{'name':"Kashmiri",'nativename':"कश्मीरी, كشميري"},'kk':{'name':"Kazakh",'nativename':"Қазақ тілі"},'km':{'name':"Khmer",'nativename':"ភាសាខ្មែរ"},'ki':{'name':"Kikuyu, Gikuyu",'nativename':"Gĩkũyũ"},'rw':{'name':"Kinyarwanda",'nativename':"Ikinyarwanda"},'ky':{'name':"Kirghiz, Kyrgyz",'nativename':"кыргыз тили"},'kv':{'name':"Komi",'nativename':"коми кыв"},'kg':{'name':"Kongo",'nativename':"KiKongo"},'ko':{'name':"Korean",'nativename':"한국어 (韓國語), 조선말 (朝鮮語)"},'ku':{'name':"Kurdish",'nativename':"Kurdî, كوردی"},'kj':{'name':"Kwanyama, Kuanyama",'nativename':"Kuanyama"},'la':{'name':"Latin",'nativename':"latine, lingua latina"},'lb':{'name':"Luxembourgish, Letzeburgesch",'nativename':"Lëtzebuergesch"},'lg':{'name':"Luganda",'nativename':"Luganda"},'li':{'name':"Limburgish, Limburgan, Limburger",'nativename':"Limburgs"},'ln':{'name':"Lingala",'nativename':"Lingála"},'lo':{'name':"Lao",'nativename':"ພາສາລາວ"},'lt':{'name':"Lithuanian",'nativename':"lietuvių kalba"},'lu':{'name':"Luba-Katanga",'nativename':""},'lv':{'name':"Latvian",'nativename':"latviešu valoda"},'gv':{'name':"Manx",'nativename':"Gaelg, Gailck"},'mk':{'name':"Macedonian",'nativename':"македонски јазик"},'mg':{'name':"Malagasy",'nativename':"Malagasy fiteny"},'ms':{'name':"Malay",'nativename':"bahasa Melayu, بهاس ملايو"},'ml':{'name':"Malayalam",'nativename':"മലയാളം"},'mt':{'name':"Maltese",'nativename':"Malti"},'mi':{'name':"Māori",'nativename':"te reo Māori"},'mr':{'name':"Marathi (Marāṭhī)",'nativename':"मराठी"},'mh':{'name':"Marshallese",'nativename':"Kajin M̧ajeļ"},'mn':{'name':"Mongolian",'nativename':"монгол"},'na':{'name':"Nauru",'nativename':"Ekakairũ Naoero"},'nv':{'name':"Navajo, Navaho",'nativename':"Diné bizaad, Dinékʼehǰí"},'nb':{'name':"Norwegian Bokmål",'nativename':"Norsk bokmål"},'nd':{'name':"North Ndebele",'nativename':"isiNdebele"},'ne':{'name':"Nepali",'nativename':"नेपाली"},'ng':{'name':"Ndonga",'nativename':"Owambo"},'nn':{'name':"Norwegian Nynorsk",'nativename':"Norsk nynorsk"},'no':{'name':"Norwegian",'nativename':"Norsk"},'ii':{'name':"Nuosu",'nativename':"ꆈꌠ꒿ Nuosuhxop"},'nr':{'name':"South Ndebele",'nativename':"isiNdebele"},'oc':{'name':"Occitan",'nativename':"Occitan"},'oj':{'name':"Ojibwe, Ojibwa",'nativename':"ᐊᓂᔑᓈᐯᒧᐎᓐ"},'cu':{'name':"Old Church Slavonic, Church Slavic, Church Slavonic, Old Bulgarian, Old Slavonic",'nativename':"ѩзыкъ словѣньскъ"},'om':{'name':"Oromo",'nativename':"Afaan Oromoo"},'or':{'name':"Oriya",'nativename':"ଓଡ଼ିଆ"},'os':{'name':"Ossetian, Ossetic",'nativename':"ирон æвзаг"},'pa':{'name':"Panjabi, Punjabi",'nativename':"ਪੰਜਾਬੀ, پنجابی"},'pi':{'name':"Pāli",'nativename':"पाऴि"},'fa':{'name':"Persian",'nativename':"فارسی"},'pl':{'name':"Polish",'nativename':"polski"},'ps':{'name':"Pashto, Pushto",'nativename':"پښتو"},'pt':{'name':"Portuguese",'nativename':"Português"},'qu':{'name':"Quechua",'nativename':"Runa Simi, Kichwa"},'rm':{'name':"Romansh",'nativename':"rumantsch grischun"},'rn':{'name':"Kirundi",'nativename':"kiRundi"},'ro':{'name':"Romanian, Moldavian, Moldovan",'nativename':"română"},'ru':{'name':"Russian",'nativename':"русский язык"},'sa':{'name':"Sanskrit (Saṁskṛta)",'nativename':"संस्कृतम्"},'sc':{'name':"Sardinian",'nativename':"sardu"},'sd':{'name':"Sindhi",'nativename':"सिन्धी, سنڌي، سندھی"},'se':{'name':"Northern Sami",'nativename':"Davvisámegiella"},'sm':{'name':"Samoan",'nativename':"gagana faa Samoa"},'sg':{'name':"Sango",'nativename':"yângâ tî sängö"},'sr':{'name':"Serbian",'nativename':"српски језик"},'gd':{'name':"Scottish Gaelic; Gaelic",'nativename':"Gàidhlig"},'sn':{'name':"Shona",'nativename':"chiShona"},'si':{'name':"Sinhala, Sinhalese",'nativename':"සිංහල"},'sk':{'name':"Slovak",'nativename':"slovenčina"},'sl':{'name':"Slovene",'nativename':"slovenščina"},'so':{'name':"Somali",'nativename':"Soomaaliga, af Soomaali"},'st':{'name':"Southern Sotho",'nativename':"Sesotho"},'es':{'name':"Spanish; Castilian",'nativename':"español, castellano"},'su':{'name':"Sundanese",'nativename':"Basa Sunda"},'sw':{'name':"Swahili",'nativename':"Kiswahili"},'ss':{'name':"Swati",'nativename':"SiSwati"},'sv':{'name':"Swedish",'nativename':"svenska"},'ta':{'name':"Tamil",'nativename':"தமிழ்"},'te':{'name':"Telugu",'nativename':"తెలుగు"},'tg':{'name':"Tajik",'nativename':"тоҷикӣ, toğikī, تاجیکی"},'th':{'name':"Thai",'nativename':"ไทย"},'ti':{'name':"Tigrinya",'nativename':"ትግርኛ"},'bo':{'name':"Tibetan Standard, Tibetan, Central",'nativename':"བོད་ཡིག"},'tk':{'name':"Turkmen",'nativename':"Türkmen, Түркмен"},'tl':{'name':"Tagalog",'nativename':"Wikang Tagalog, ᜏᜒᜃᜅ᜔ ᜆᜄᜎᜓᜄ᜔"},'tn':{'name':"Tswana",'nativename':"Setswana"},'to':{'name':"Tonga (Tonga Islands)",'nativename':"faka Tonga"},'tr':{'name':"Turkish",'nativename':"Türkçe"},'ts':{'name':"Tsonga",'nativename':"Xitsonga"},'tt':{'name':"Tatar",'nativename':"татарча, tatarça, تاتارچا"},'tw':{'name':"Twi",'nativename':"Twi"},'ty':{'name':"Tahitian",'nativename':"Reo Tahiti"},'ug':{'name':"Uighur, Uyghur",'nativename':"Uyƣurqə, ئۇيغۇرچە"},'uk':{'name':"Ukrainian",'nativename':"українська"},'ur':{'name':"Urdu",'nativename':"اردو"},'uz':{'name':"Uzbek",'nativename':"zbek, Ўзбек, أۇزبېك"},'ve':{'name':"Venda",'nativename':"Tshivenḓa"},'vi':{'name':"Vietnamese",'nativename':"Tiếng Việt"},'vo':{'name':"Volapük",'nativename':"Volapük"},'wa':{'name':"Walloon",'nativename':"Walon"},'cy':{'name':"Welsh",'nativename':"Cymraeg"},'wo':{'name':"Wolof",'nativename':"Wollof"},'fy':{'name':"Western Frisian",'nativename':"Frysk"},'xh':{'name':"Xhosa",'nativename':"isiXhosa"},'yi':{'name':"Yiddish",'nativename':"ייִדיש"},'yo':{'name':"Yoruba",'nativename':"Yorùbá"},'za':{'name':"Zhuang, Chuang",'nativename':"Saɯ cueŋƅ, Saw cuengh"}}
#languages = [("Abkhaz","аҧсуа"),("Afar","Afaraf"),("Afrikaans","Afrikaans"),("Akan","Akan"),("Albanian","Shqip"),("Amharic","አማርኛ"),("Arabic","العربية"),("Aragonese","Aragonés"),("Armenian","Հայերեն"),("Assamese","অসমীয়া"),("Avaric","авар мацӀ"),("Avestan","avesta"),("Aymara","aymar aru"),("Azerbaijani","azərbaycan dili"),("Bambara","bamanankan"),("Bashkir","башҡорт теле"),("Basque","euskara"),("Belarusian","Беларуская"),("Bengali","বাংলা"),("Bihari","भोजपुरी"),("Bislama","Bislama"),("Bosnian","bosanski jezik"),("Breton","brezhoneg"),("Bulgarian","български език"),("Burmese","Burmese"),("Catalan","Català"),("Chamorro","Chamoru"),("Chechen","нохчийн мотт"),("Chichewa","chiCheŵa"),("Chinese","中文"),("Chuvash","чӑваш чӗлхи"),("Cornish","Kernewek"),("Corsican","corsu"),("Cree","ᓀᐦᐃᔭᐍᐏᐣ"),("Croatian","hrvatski"),("Czech","česky"),("Danish","dansk"),("Divehi","ދިވެހި"),("Dutch","Nederlands"),("Dzongkha","རྫོང་ཁ"),("English","English"),("Esperanto","Esperanto"),("Estonian","eesti"),("Ewe","Eʋegbe"),("Faroese","føroyskt"),("Fijian","vosa Vakaviti"),("Finnish","suomi"),("French","français"),("Fula","Fulfulde | Pulaar"),("Gaelic","Gàidhlig"),("Galician","Galego"),("Georgian","ქართული"),("German","Deutsch"),("Greek","Ελληνικά"),("Guaraní","Avañe'ẽ"),("Gujarati","ગુજરાતી"),("Haitian","Kreyòl ayisyen"),("Hausa","هَوُسَ"),("Hebrew","עברית"),("Herero","Otjiherero"),("Hindi","हिन्दी| हिंदी"),("Hiri Motu","Hiri Motu"),("Hungarian","Magyar"),("Icelandic","Íslenska"),("Ido","Ido"),("Igbo","Asụsụ Igbo"),("Indonesian","Bahasa Indonesia"),("Interlingua","Interlingua"),("Interlingue","Interlingue"),("Inuktitut","ᐃᓄᒃᑎᑐᑦ"),("Inupiaq","Iñupiaq"),("Irish","Gaeilge"),("Italian","Italiano"),("Japanese","日本語"),("Javanese","basa Jawa"),("Kalaallisut","kalaallisut"),("Kannada","ಕನ್ನಡ"),("Kanuri","Kanuri"),("Kashmiri","कश्मीरी"),("Kazakh","Қазақ тілі"),("Khmer","ភាសាខ្មែរ"),("Kikuyu","Gĩkũyũ"),("Kinyarwanda","Ikinyarwanda"),("Kirghiz","кыргыз тили"),("Kirundi","kiRundi"),("Komi","коми кыв"),("Kongo","KiKongo"),("Korean","한국어 (韓國語)"),("Kurdish","Kurdî"),("Kwanyama","Kuanyama"),("Lao","ພາສາລາວ"),("Latin","latine"),("Latvian","latviešu valoda"),("Lezgian","Лезги чlал"),("Limburgish","Limburgs"),("Lingala","Lingála"),("Lithuanian","lietuvių kalba"),("Luba-Katanga","Luba-Katanga"),("Luganda","Luganda"),("Luxembourgish","Lëtzebuergesch"),("Macedonian","македонски јазик"),("Malagasy","Malagasy fiteny"),("Malay","bahasa Melayu"),("Malayalam","മലയാളം"),("Maltese","Malti"),("Manx","Gaelg"),("Marathi","मराठी"),("Marshallese","Kajin M̧ajeļ"),("Mongolian","монгол"),("Māori","te reo Māori"),("Nauru","Ekakairũ Naoero"),("Navajo","Diné bizaad"),("Ndonga","Owambo"),("Nepali","नेपाली"),("North Ndebele","isiNdebele"),("Norwegian","Norsk"),("Nuosu","Nuosuhxop"),("Occitan","Occitan"),("Ojibwe","ᐊᓂᔑᓈᐯᒧᐎᓐ"),("Oriya","ଓଡ଼ିଆ"),("Oromo","Afaan Oromoo"),("Ossetian","ирон æвзаг"),("Panjabi","ਪੰਜਾਬੀ| پنجابی"),("Pashto","پښتو"),("Persian","فارسی"),("Polish","polski"),("Portuguese","Português"),("Pāli","पाऴि"),("Quechua","Kichwa"),("Romanian","română"),("Romansh","rumantsch grischun"),("Russian","русский язык"),("Sami (Northern)","Davvisámegiella"),("Samoan","gagana fa'a Samoa"),("Sango","yângâ tî sängö"),("Sanskrit","संस्कृतम्"),("Sardinian","sardu"),("Serbian","српски језик"),("Shona","chiShona"),("Sindhi","सिन्धी"),("Sinhala","සිංහල"),("Slavonic","ѩзыкъ словѣньскъ"),("Slovak","slovenčina"),("Slovene","slovenščina"),("Somali","Soomaaliga"),("South Ndebele","isiNdebele"),("Southern Sotho","Sesotho"),("Spanish","español | castellano"),("Sundanese","Basa Sunda"),("Swahili","Kiswahili"),("Swati","SiSwati"),("Swedish","svenska"),("Tagalog","Wikang Tagalog"),("Tahitian","Reo Tahiti"),("Tajik","тоҷикӣ"),("Tamil","தமிழ்"),("Tatar","татарча"),("Telugu","తెలుగు"),("Thai","ไทย"),("Tibetan","བོད་ཡིག"),("Tigrinya","ትግርኛ"),("Tonga","faka Tonga"),("Tsonga","Xitsonga"),("Tswana","Setswana"),("Turkish","Türkçe"),("Turkmen","Türkmen | Түркмен"),("Twi","Twi"),("Uighur","Uyƣurqə"),("Ukrainian","українська"),("Urdu","اردو"),("Uzbek","O'zbek"),("Venda","Tshivenḓa"),("Vietnamese","Tiếng Việt"),("Volapük","Volapük"),("Walloon","Walon"),("Welsh","Cymraeg"),("Western Frisian","Frysk"),("Wolof","Wollof"),("Xhosa","isiXhosa"),("Yiddish","ייִדיש"),("Yoruba","Yorùbá"),("Zhuang","Saɯ cueŋƅ"),("Zulu","isiZulu")]
goog = ['Afrikaans', 'Albanian', 'Arabic', 'Armenian', 'Azerbaijani', 'Basque', 'Belarusian', 'Bengali', 'Bosnian', 'Bulgarian', 'Catalan', 'Cebuano', 'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Esperanto', 'Estonian', 'Filipino', 'Finnish', 'French', 'Galician', 'Georgian', 'German', 'Greek', 'Gujarati', 'Haitian Creole', 'Hausa', 'Hebrew', 'Hindi', 'Hmong', 'Hungarian', 'Icelandic', 'Igbo', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Javanese', 'Kannada', 'Khmer', 'Korean', 'Lao', 'Latin', 'Latvian', 'Lithuanian', 'Macedonian', 'Malay', 'Maltese', 'Maori', 'Marathi', 'Mongolian', 'Nepali', 'Norwegian', 'Persian', 'Polish', 'Portuguese', 'Punjabi', 'Romanian', 'Russian', 'Serbian', 'Slovak', 'Slovenian', 'Somali', 'Spanish', 'Swahili', 'Swedish', 'Tamil', 'Telugu', 'Thai', 'Turkish', 'Ukrainian', 'Urdu', 'Vietnamese', 'Welsh', 'Yiddish', 'Yoruba', 'Zulu']
language_tuples = []
for key in languages:
language_tuples.append((languages[key]['name'], languages[key]['nativename'], key))
language_tuples = sorted(language_tuples, key=lambda a: a[0])
for l in language_tuples:
goog_trans = None
if goog.count(l[0]) > 0: goog_trans =l[2]
language = Language(english_name=l[0], native_name=l[1], iso_lang=l[2], goog_translate=goog_trans)
session.add(language)
transaction.commit()
session.close()
def initialize_sql(engine, settings):
DBSession.configure(bind=engine)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
# if settings.has_key('apex.velruse_providers'):
# pass
#SQLBase.metadata.bind = engine
#SQLBase.metadata.create_all(engine)
try:
populate(settings)
except IntegrityError:
transaction.abort()
|
[
"johnpaulcadigan@gmail.com"
] |
johnpaulcadigan@gmail.com
|
b0e487b584903313154d9dd72e6c085f2b3b95d9
|
4664328482163fd927603d66f47209b28471cf0f
|
/venv/lib/python3.7/site-packages/datalad/metadata/extractors/tests/test_datacite_xml.py
|
30ed2525d0915a74e0f941dc65be94d72cbe0d4c
|
[
"MIT"
] |
permissive
|
emmetaobrien/dats-validator
|
08706ddab795d272391b3611cd3ba0de8c4a91a1
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
refs/heads/master
| 2020-12-19T05:03:17.179117
| 2020-01-22T17:28:38
| 2020-01-22T17:28:38
| 235,626,049
| 0
| 0
|
MIT
| 2020-01-22T17:24:56
| 2020-01-22T17:24:56
| null |
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test datacite metadata extractor """
from simplejson import dumps
from datalad.metadata.extractors.datacite import MetadataExtractor
from datalad.metadata.metadata import _get_metadatarelevant_paths
from nose.tools import assert_equal
from datalad.tests.utils import with_tree
from datalad.api import create
xml_content = """\
<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-2.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd">
<identifier identifierType="DOI">10.6080/K0QN64NG</identifier>
<creators>
<creator>
<creatorName>Last1, First1</creatorName>
</creator>
<creator>
<creatorName>Last2, First2</creatorName>
</creator>
</creators>
<titles>
<title>Main
title</title>
<title titleType="AlternativeTitle">CRCNS.org xxx-1</title>
</titles>
<publisher>CRCNS.org</publisher>
<publicationYear>2011</publicationYear>
<subjects>
<subject>Neuroscience</subject>
<subject>fMRI</subject>
</subjects>
<language>eng</language>
<resourceType resourceTypeGeneral="Dataset">Dataset/Neurophysiology</resourceType>
<sizes>
<size>10 GB</size>
</sizes>
<formats>
<format>application/matlab</format>
<format>NIFTY</format>
</formats>
<version>1.0</version>
<descriptions>
<description descriptionType="Other">
Some long
description.
</description>
</descriptions>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="DOI" relationType="IsDocumentedBy">10.1016/j.cub.2011.08.031</relatedIdentifier>
</relatedIdentifiers>
</resource>
"""
@with_tree(tree={'.datalad': {'meta.datacite.xml': xml_content}})
@with_tree(tree={'elsewhere': {'meta.datacite.xml': xml_content}})
def test_get_metadata(path1, path2):
for p in (path1, path2):
print('PATH')
ds = create(p, force=True)
ds.add('.')
meta = MetadataExtractor(
ds,
_get_metadatarelevant_paths(ds, []))._get_dataset_metadata()
assert_equal(
dumps(meta, sort_keys=True, indent=2),
"""\
{
"author": [
"Last1, First1",
"Last2, First2"
],
"citation": [
"10.1016/j.cub.2011.08.031"
],
"description": "Some long description.",
"formats": [
"application/matlab",
"NIFTY"
],
"name": "CRCNS.org xxx-1",
"sameas": "10.6080/K0QN64NG",
"shortdescription": "Main title",
"tag": [
"Neuroscience",
"fMRI"
],
"version": "1.0"
}""")
|
[
"giulia.ippoliti@mail.mcgill.ca"
] |
giulia.ippoliti@mail.mcgill.ca
|
075d28bc2668d9a7c0e17558b267dfb9c839c594
|
2fe37b71c486d4e2de6fb263d89993c9b99f0d37
|
/02Backstage/Python/00Test/Utils/AutoUtils/Scp.py
|
dfb2a211b4ea76d7726b73a04bd840003be28233
|
[] |
no_license
|
smart-town/MyNotes
|
87cb058753163ab7df41f73389af4e31f8288a52
|
87a0e689c1dcf9a79ef1059e6332c1a89a309ecc
|
refs/heads/master
| 2022-07-19T19:48:15.055165
| 2022-07-17T00:15:09
| 2022-07-17T00:15:09
| 160,528,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
""" copy source files to destination """
import os
import OsOrders
class CopyBasic(object):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def dealRemote(data):
if type(data) == dict:
return "{}@{}".format(data["user"], data["host"])
else:
return data
def doCopy(self):
pass
class ScpCopy(CopyBasic):
def __init__(self, source, destination, remote=""):
super(source, destination)
self.remote = remote
def doCopy(self):
order = OsOrders.doScp(self.remote, self.source, self.destination)
print("do scp: %s" % (order))
class LocalCopy(CopyBasic):
def doCopy(self):
print("DO LOCAL COPY %s -> %s" % (self.source, self.destination))
def execute(self):
self.doCopy()
if __name__ == "__main__":
LocalCopy("C:", "D:").doCopy()
ScpCopy("C:/Users/luhha/Desktop", "C:/Users/luhha/Desktop/test", "root@127.0.0.1").doCopy()
|
[
"luhh18@outlook.com"
] |
luhh18@outlook.com
|
d91d8ef9fcacbf89506605d4bc882aabff544142
|
edc3a9f7bbaf119f44c4a7ff73fbcf26a2e05881
|
/table/views.py
|
fe050c5eee7315d825597befc4bb61d6ae5947ec
|
[] |
no_license
|
w39z/VM-Store
|
eb37a05a5bd2cf081980d5eba3d3d4cfbe4042c7
|
0c62f8999e9f83e0e96c5f8057a5337327709561
|
refs/heads/master
| 2023-09-04T21:37:13.976938
| 2021-10-22T16:35:06
| 2021-10-22T16:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from django.shortcuts import render
from table.filters import *
def index(request):
items = Item.objects.all()
filter = ItemFilter(request.GET, queryset=items)
items = filter.qs
return render(request, 'index.html',
{'items': items,
'filter': filter})
|
[
"w39z@mail.ru"
] |
w39z@mail.ru
|
42d1cac53d347072386f233fdab7116d0e8200e9
|
939ca9c100b2b8d7d4c2825a9ef16dd4d7267455
|
/pageimages/templatetags/pageimage_tags.py
|
68f9c371b5917631968437e3f38c07a41abc7264
|
[
"Apache-2.0"
] |
permissive
|
ethoos/mezzanine-pageimages
|
523671cbe5fc9d0a04d697ad86ecaa1d859167f0
|
b529b4acc204aa26734e3c9f9ffb5c68fae58cf1
|
refs/heads/master
| 2021-01-12T04:46:47.578968
| 2017-01-02T10:48:46
| 2017-01-02T10:48:46
| 77,791,604
| 0
| 1
| null | 2017-01-02T10:48:47
| 2017-01-01T18:40:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
#
# Copyright 2013, 2014
# by Arnold Krille for bcs kommunikationsloesungen
# <a.krille@b-c-s.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django import template
from ..models import DefaultImage
from mezzanine.pages.models import Page
register = template.Library()
def get_default_image(type):
defaultimgs = DefaultImage.objects.filter(type=type)
if len(defaultimgs):
return defaultimgs[0].file.url
return u''
def get_image_for_page(page, type):
imgs = page.pageimage_set.filter(type=type)
if len(imgs):
return imgs[0].file.url
if page.parent:
return get_image_for_page(page.parent, type)
return get_default_image(type)
@register.simple_tag(takes_context=True)
def pageimage(context, type, page=None):
if isinstance(page, str):
page = Page.objects.get(titles=page)
if not page and 'page' in context:
page = context['page']
if page:
return get_image_for_page(page, type)
return get_default_image(type)
|
[
"a.krille@b-c-s.de"
] |
a.krille@b-c-s.de
|
3009068a1939ffcb3f8fec5364f73b3488b41100
|
55c1bcc5958b825a8a4208eca766729bba4b9722
|
/samples/secrets-manager/secretLambda.py
|
d6be92d6e870e45510c1afb4a7ff4103c7c68cb7
|
[
"MIT"
] |
permissive
|
Derek-Ashmore/AWSDevOpsUtilities
|
93683a49dc7c7481508c1246e975647c8f66346b
|
fd4cc98449a19b747335ca0dd874b4631439ee13
|
refs/heads/master
| 2020-03-10T21:42:16.648772
| 2019-01-19T20:10:18
| 2019-01-19T20:10:18
| 129,600,347
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
"""
secretLambda.py
This lambda is a simple example of decrypting secrets using KMS to reduce exposure
for needed items like database passwords.
Environment Settings:
-- Secret_Name Secret name to use
Source Control: https://github.com/Derek-Ashmore/AWSDevOpsUtilities
"""
import sys
import json
import os
import boto3
import base64
def secretHandler(event, context):
try:
secretName = os.getenv('Secret_Name')
if secretName == None:
raise Exception('Secret_Name environment variable not set')
print( showSecret(secretName) )
except Exception as e:
e.args += (event,vars(context), secretName)
raise
return 0;
def showSecret(secretName):
secretsMgrClient = boto3.client('secretsmanager')
get_secret_value_response = secretsMgrClient.get_secret_value(
SecretId=secretName
)
return get_secret_value_response['SecretString']
|
[
"dashmore@force66.com"
] |
dashmore@force66.com
|
be26a348d616f0dddc10ae8c7ad0db166b68900d
|
3b1bb402b4d11dfd6e3a6430b1275e6ee814ecf8
|
/client/clustering/ClusterProceduralWeaponSingle.py
|
9bf44ac25365898028388d16b3decba73731393a
|
[] |
no_license
|
DanieleGravina/ProceduralWeapon
|
546c6e966381717b2a7c2c2bd4fcdd14d05abb76
|
d8b3289086f857f31349bac7edb12de313de4319
|
refs/heads/master
| 2021-01-10T20:54:38.864377
| 2015-04-03T15:33:12
| 2015-04-03T15:33:12
| 26,865,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,229
|
py
|
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from sklearn.manifold import MDS
from sklearn.cluster import DBSCAN
import numpy as np
from sklearn import metrics
import statistics
import matplotlib.pyplot as plt
from radar_chart_single import draw_radar
from math import *
from Costants import *
limits = [(ROF_MIN/100, ROF_MAX/100), (SPREAD_MIN/100, SPREAD_MAX/100), (AMMO_MIN, AMMO_MAX), (SHOT_COST_MIN, SHOT_COST_MAX), (RANGE_MIN/100, RANGE_MAX/100),
(SPEED_MIN, SPEED_MAX), (DMG_MIN, DMG_MAX), (DMG_RAD_MIN, DMG_RAD_MAX), (-GRAVITY_MIN, -GRAVITY_MAX),
(EXPLOSIVE_MIN, EXPLOSIVE_MAX)]
label =["ROF", "SPREAD", "AMMO", "SHOT_COST", "LIFE_SPAN", "SPEED", "DMG", "DMG_RAD", "GRAVITY", "EXPLOSIVE"]
def printWeapon(pop):
for ind in pop :
print("Weapon "+ " Rof:" + str(ind[0]) + " Spread:" + str(ind[1]) + " MaxAmmo:" + str(ind[2])
+ " ShotCost:" + str(ind[3]) + " Range:" + str(ind[4]) )
print("Projectile "+ " Speed:" + str(ind[5]) + " Damage:" + str(ind[6]) + " DamageRadius:" + str(ind[7])
+ " Gravity:" + str(ind[8]) + " Explosive:" + str(ind[9]))
print("*********************************************************" + "\n")
def writeWeapon(pop, pop_file):
i = 0
for ind in pop :
pop_file.write("(" + str(i) + ")" + "\n")
i += 1
pop_file.write("Weapon "+ " Rof:" + str(ind[0]) + " Spread:" + str(ind[1]) + " MaxAmmo:" + str(ind[2])
+ " ShotCost:" + str(ind[3]) + " Range:" + str(ind[4]) + "\n")
pop_file.write("Projectile "+ " Speed:" + str(ind[5]) + " Damage:" + str(ind[6]) + " DamageRadius:" + str(ind[7])
+ " Gravity:" + str(ind[8]) + " Explosive:" + str(ind[9]) +"\n")
def normalize(data):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i][j] = (data[i][j] - limits[j][0])/(limits[j][1] - limits[j][0])
return data
def postProcess(data):
clone = list(data)
#fireinterval become rate of fire -> (1/fireinterval)
clone[0] = log(1/(ROF_MIN/100)) + log(1/clone[0])
#gravity is inverted
clone[8] = - clone[8]
return clone
class ClusterProceduralWeapon:
def __init__(self, data = None, fitness = None, file = None):
self.data = data
self.fits = fitness
self.file = file
def cluster(self):
try :
os.makedirs("cluster")
os.chdir("cluster")
except :
os.chdir("cluster")
self.file = open("cluster.txt", "w")
cluster_file = self.file
X = np.array(self.data, np.float32)
print(X.shape)
X = normalize(X)
db = DBSCAN(eps=0.05, min_samples=5).fit(X)
labels = db.labels_
labels_unique = np.unique( [labels[i] for i in range(len(labels)) if labels[i] != - 1] )
n_clusters_ = len(labels_unique)
print(labels)
index = []
fitness = []
entropy_mean = []
entropy_stdev = []
dist_mean = []
dist_stdev = []
fits_clustered = [[] for _ in range(n_clusters_)]
clusters = [[] for _ in range(n_clusters_)]
print("number of estimated clusters : %d" % n_clusters_ )
cluster_file.write("number of estimated clusters : %d" % n_clusters_ + "\n")
num_cluster = 0
for k in range(n_clusters_):
my_members = labels == k
for i in range(len(labels)):
if my_members[i]:
index += [i]
fitness += [self.fits[i]]
fits_clustered[k] += [self.fits[i]]
if fitness != []:
entropy_mean += [ statistics.mean( [fitness[i][0] for i in range(len(fitness))] ) ]
entropy_stdev += [ statistics.stdev( [fitness[i][0] for i in range(len(fitness))] ) ]
dist_mean += [ statistics.mean( [fitness[i][1] for i in range(len(fitness))] ) ]
dist_stdev += [ statistics.stdev( [fitness[i][1] for i in range(len(fitness))] ) ]
clusters[k] += [postProcess(self.data[i]) for i in range(len(labels)) if my_members[i]]
cluster_file.write("cluster: " + str(num_cluster) + "°" + "\n")
num_cluster += 1
cluster_file.write("index:"+ "\n")
cluster_file.write(str(index) + "\n")
cluster_file.write("fitness:"+ "\n")
cluster_file.write(str(fitness)+ "\n")
cluster_file.write("mean balance:"+ "\n")
cluster_file.write(str(entropy_mean)+ "\n")
cluster_file.write("std dev balance:"+ "\n")
cluster_file.write(str(entropy_stdev)+ "\n")
cluster_file.write("mean dist from target:"+ "\n")
cluster_file.write(str(dist_mean)+ "\n")
cluster_file.write("std dev dist from target:"+ "\n")
cluster_file.write(str(dist_stdev)+ "\n")
cluster_file.write("members:"+ "\n")
writeWeapon([self.data[i] for i in range(len(labels)) if my_members[i]], cluster_file)
cluster_file.write("==========================================================================="+ "\n")
print(index)
print("members:")
printWeapon([self.data[i] for i in range(len(labels)) if my_members[i]])
print("fitness:"+ "\n")
print(str(fitness)+ "\n")
print("mean entropy:"+ "\n")
print(str(entropy_mean)+ "\n")
print("std dev fitness:"+ "\n")
print(str(entropy_stdev)+ "\n")
print("mean dist:"+ "\n")
print(str(dist_mean)+ "\n")
print("std dev dist:"+ "\n")
print(str(dist_stdev)+ "\n")
print("mean of cluster")
print(np.mean([self.data[i] for i in range(len(labels)) if my_members[i]], axis=0))
print("std of cluster")
print(np.std([self.data[i] for i in range(len(labels)) if my_members[i]], axis=0))
index = []
fitness = []
entropy_mean = []
entropy_stdev = []
dist_mean = []
dist_stdev = []
colors = list('bgrcmykbgrcmykbgrcmykbgrcmyk')
'''
mds = MDS(n_components=2)
pos = mds.fit_transform(X.astype(np.float64))
colors = list('bgrcmykbgrcmykbgrcmykbgrcmyk')
plt.figure(figsize=(16,9))
for i in range(len(pos[:,0])):
if labels[i] != -1 :
plt.plot(pos[i, 0], pos[i, 1], 'o', markerfacecolor=colors[labels[i]], markeredgecolor='k')
else:
plt.plot(pos[i, 0], pos[i, 1], 'x', markerfacecolor=colors[labels[i]], markeredgecolor='k')
plt.savefig("mds.png", bbox_inches='tight')
plt.close()
'''
X_ordered = []
X = np.array(self.data);
colors_ordered = []
fits_ordered = []
colors_cluster = []
for i in range(n_clusters_):
for j in range(len(labels)):
if labels[j] == i and labels[j] != -1:
X_ordered.append(X[j][:])
fits_ordered.append(self.fits[j])
colors_ordered += [colors[labels[j]]]
colors_cluster += [colors_ordered[len(colors_ordered) - 1]]
labels_ = [labels[i] for i in range(len(labels)) if labels[i] != -1]
'''
width = 0.8
ind = np.arange(len(labels_))
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(wspace=0.50, hspace=0.25)
k = [i for i in range(len(labels_))]
for j in range(10):
ax = fig.add_subplot(4, 3, j+1)
plt.ylabel(label[j])
plt.ylim(limits[j][0], limits[j][1])
ax.bar(k, [X_ordered[ind][j] for ind in range(len(labels_))], color=colors_ordered)
'''
#plt.show()
'''
plt.figure(9)
colors_cluster = [colors[labels[i]] for i in range(len(labels))]
width = 0.8
ind = np.arange(len(labels))
k = [i for i in range(len(labels))]
for j in range(9):
plt.subplot(330 + j)
plt.ylabel(label[j])
plt.ylim(limits[j][0], limits[j][1])
plt.bar(k, [X[i][j] for i in range(len(labels))], color=colors_cluster)
#plt.xticks(ind+width/2., list(str(i) for i in range(len(labels)) ) )
#plt.show()
'''
drawRadarChart(self, clusters, n_clusters_, colors_cluster, fits_clustered)
drawBarPlot(self, clusters, n_clusters_, colors_cluster, fits_clustered)
def drawRadarChart(self, clusters, n_clusters_, colors, fits):
weapons = []
num_samples = []
for cluster in clusters:
if(len(cluster) > 0):
weapons += [np.mean(cluster, axis=0)]
num_samples += [len(cluster)]
index = 0
while len(weapons) > 0 :
draw_radar(weapons[:1], colors[index], fits[index], num_samples[0])
weapons = weapons[1:]
num_samples = num_samples[1:]
index += 1
plt.savefig("radar"+ str(index) + ".png", bbox_inches='tight')
plt.close()
def drawBarPlot(self, clusters, n_clusters_, colors_cluster, fitness_cluster):
weapons = []
weapons_std = []
limits[0] = (0, log(1/(ROF_MIN/100))*2)
alphabet = list("ABCDEFGHILMNOPQRSTUVZ")
k = np.arange(n_clusters_)
width = 0.35
for cluster in clusters:
if(len(cluster) > 0):
weapons += [list(np.mean(cluster, axis=0))]
weapons_std += [list(np.std(cluster, axis=0))]
fig = plt.figure(figsize=(16, 9))
fig.subplots_adjust(wspace=0.80, hspace=0.25)
for j in range(10):
ax = fig.add_subplot(4, 3, j+1)
plt.ylabel(label[j])
plt.ylim(limits[j][0], limits[j][1])
ax.bar(k, [weapons[ind][j] for ind in range(n_clusters_)], width, color=colors_cluster)
ax.set_xticks(k + width/2)
ax.set_xticklabels( alphabet[:n_clusters_] )
ax = fig.add_subplot(4, 3, 11)
plt.ylabel("BALANCE")
ax.bar(k, [np.mean(fitness_cluster[i], axis = 0)[0] for i in range(n_clusters_)], width, color=colors_cluster,
yerr = [np.std(fitness_cluster[i], axis = 0)[0] for i in range(n_clusters_)])
ax.set_xticks(k + width/2)
ax.set_xticklabels( alphabet[:n_clusters_] )
ax = fig.add_subplot(4, 3, 12)
plt.ylabel("DISTANCE")
ax.bar(k, [np.mean(fitness_cluster[i], axis = 0)[1] for i in range(n_clusters_)], width, color=colors_cluster,
yerr = [np.std(fitness_cluster[i], axis = 0)[1] for i in range(n_clusters_)])
ax.set_xticks(k + width/2)
ax.set_xticklabels( alphabet[:n_clusters_] )
plt.savefig("cluster.png", bbox_inches='tight', dpi = 200)
plt.close()
def main():
data = []
pop_file = open("population_cluster.txt", "r")
content = pop_file.readlines()
temp = []
fitness = []
for string in content:
if "Weapon" in string or "Projectile" in string:
split_spaces = string.split(" ")
for splitted in split_spaces:
if ":" in splitted:
split_colon = splitted.split(":")
temp += [float(split_colon[1])]
if (len(temp) == 10):
data += [temp]
temp = []
if "fitness" in string :
split_spaces = string.split(" ")
temp_fit = []
for splitted in split_spaces:
if "(" in splitted:
splitted = splitted.replace("(", "")
splitted = splitted.replace(")", "")
splitted = splitted.replace(",", "")
splitted = splitted.replace("\n", "")
fit = float(splitted)
temp_fit += [fit]
if ")" in splitted:
splitted = splitted.replace("(", "")
splitted = splitted.replace(")", "")
splitted = splitted.replace(",", "")
splitted = splitted.replace("\n", "")
dist = float(splitted)
temp_fit += [dist]
fitness += [temp_fit]
fits = np.array( [fitness[i][0] for i in range(len(fitness))] )
dists = np.array( [fitness[i][1] for i in range(len(fitness))] )
#get third quartile
q3 = np.percentile(fits, 75)
print("third quartile " + str(q3))
data_filtered = []
dists_filtered = []
fits_filtered = []
#filter out ind with fit < q3
for i in range(len(fits)):
if fits[i] >= 0 :
data_filtered += [data[i]]
dists_filtered += [dists[i]]
fits_filtered += [fitness[i]]
d3 = np.percentile(dists_filtered, 50)
print("median dist" + str(d3))
c = ClusterProceduralWeapon(data_filtered, fits_filtered)
c.cluster()
main()
|
[
"daniele.gravina@mail.polimi.it"
] |
daniele.gravina@mail.polimi.it
|
7c3dcf56c5f50a0d2b92114c2556ecdb85840978
|
a67676617c4777a3a9c6226624c0fd86557cb277
|
/27_remove_element.py
|
35a4a832b623ba749c14e457b64cf12e360abee5
|
[] |
no_license
|
AdditionalPylons/leetcode
|
2599d3c67825d4a50eccc5ea2b56ae2e33e9fbf2
|
b1873d8bb023d1e7182276692cb8c953a8bab8f6
|
refs/heads/master
| 2022-12-04T15:09:48.461400
| 2020-08-18T19:44:25
| 2020-08-18T19:44:25
| 288,321,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
"""Given an array nums and a value val, remove all instances of that value in-place and return the new length.
Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length."""
"""Runtime: 36 ms, faster than 57.44% of Python3 online submissions for Remove Element.
Memory Usage: 13.8 MB, less than 68.23% of Python3 online submissions for Remove Element."""
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
shift_vector = 0
final_length = len(nums)
for i in range(len(nums)):
if nums[i] == val:
shift_vector -=1
final_length -=1
elif shift_vector != 0:
nums[i+shift_vector] = nums[i]
return final_length
|
[
"bill@popstack.io"
] |
bill@popstack.io
|
72d836455d93771000edfb04b38af3e3e1f679e1
|
adbf3c67b8ebe1e74d70a7fd20328d26a2be7400
|
/myparser.py
|
9786e530cca43bc5309295f7bf70841375293e71
|
[] |
no_license
|
mejitos/stack-overflow-overflow
|
3adc4eacfd92d406560243234a87687ea40bcdb1
|
1681864f51f4949fe2f63cbc682d43e63f013e94
|
refs/heads/master
| 2022-12-10T16:22:59.682860
| 2019-12-08T15:54:35
| 2019-12-08T15:54:35
| 226,351,427
| 0
| 0
| null | 2022-07-06T20:23:37
| 2019-12-06T14:53:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,659
|
py
|
from bs4 import BeautifulSoup
from config import Config
from result import Result
class Parser:
"""Class used for parsing wanted information from HTML"""
def parse_urls(self, resource):
"""Parses clean urls out of HTML resource
Args:
resource: HTML resource as a string
Returns:
List of clean urls as strings
"""
soup = BeautifulSoup(resource, Config.PARSER)
parsed_urls = []
urls = soup.find_all('div', class_='result-link')
for url in urls:
parsed_url = url.find('a')['href']
if parsed_url not in parsed_urls:
parsed_urls.append(parsed_url)
return parsed_urls
def parse_results(self, url, resource):
"""Parses results from single results thread
Args:
resource: HTML resource as a string
Returns
List of Result objects
"""
output = []
soup = BeautifulSoup(resource, Config.PARSER)
# TODO: Parse the question as own function
question = soup.find('div', id='question')
q_votes = question.find('div', class_='votecell').getText().replace('\n', ' ').strip().split(' ')[0]
q_text = question.find('div', class_='post-text').getText().replace('\n', ' ').strip()
q_info_container = question.find('div', class_='post-signature owner grid--cell')
q_date = q_info_container.find('span', class_='relativetime').getText().replace('\n', ' ').strip()
q_user = q_info_container.find('div', class_='user-details').find('a').getText().replace('\n', ' ').strip()
q = Result(url, [{'type': 'text', 'content': q_text}], q_user, q_votes, q_date)
output.append(q)
# TODO: Parse the answers as own function
answers = soup.find_all('div', class_='answer')
for answer in answers:
try:
a_votes = answer.find('div', class_='votecell').getText().replace('\n', ' ').strip().split(' ')[0]
except:
a_votes = 'N/A'
try:
a_info_container = answer.find_all('div', class_='post-signature grid--cell fl0')
if len(a_info_container) > 1:
a_info_container = a_info_container[1]
else:
a_info_container = a_info_container[0]
except:
pass
try:
a_user = a_info_container.find('div', class_='user-details').find('a').getText().replace('\n', ' ').strip()
except:
a_user = 'N/A'
try:
a_date = a_info_container.find('span', class_='relativetime').getText().replace('\n', ' ').strip()
except:
a_date = 'N/A'
try:
all_text = []
a_text = answer.find('div', class_='post-text')
for elem in a_text:
if elem.name == 'pre':
all_text.append({'type': 'code', 'content': elem.text.split('\n')})
elif elem.name == 'p':
all_text.append({'type': 'text', 'content': elem.text.replace('\n', ' ').strip()})
# elif elem.name == 'h1':
# all_text.append({'type': 'heading', 'content': elem.text})
except:
all_text = ['N/A']
a = Result(url, all_text, a_user, a_votes, a_date)
output.append(a)
return output
|
[
"timomehto@gmail.com"
] |
timomehto@gmail.com
|
f5dc838839a6a1297a8ed33656fed6d294e04a4c
|
b2755ce7a643ae5c55c4b0c8689d09ad51819e6b
|
/anuvaad-etl/anuvaad-extractor/aligner/etl-aligner/service/alignwflowservice.py
|
4621a73752a35a772c9831afdbb8a067d95dc515
|
[
"MIT"
] |
permissive
|
project-anuvaad/anuvaad
|
96df31170b27467d296cee43440b6dade7b1247c
|
2bfcf6b9779bf1abd41e1bc42c27007127ddbefb
|
refs/heads/master
| 2023-08-17T01:18:25.587918
| 2023-08-14T09:53:16
| 2023-08-14T09:53:16
| 265,545,286
| 41
| 39
|
MIT
| 2023-09-14T05:58:27
| 2020-05-20T11:34:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
#!/bin/python
import logging
from configs.alignerconfig import anu_dp_wf_aligner_out_topic
from utilities.alignmentutils import AlignmentUtils
from repository.alignmentrepository import AlignmentRepository
from validator.alignmentvalidator import AlignmentValidator
from kafkawrapper.alignmentproducer import Producer
log = logging.getLogger('file')
alignmentutils = AlignmentUtils()
repo = AlignmentRepository()
producer = Producer()
util = AlignmentUtils()
validator = AlignmentValidator()
class AlignWflowService:
def __init__(self):
pass
# Wrapper to build response compatibile with the anuvaad etl wf manager.
def getwfresponse(self, result, object_in):
wfresponse = {"taskID": object_in["taskID"], "jobID": object_in["jobID"], "input": result["input"],
"output": result["output"], "workflowCode": object_in["workflowCode"],
"stepOrder": object_in["stepOrder"], "status": "SUCCESS", "state": "SENTENCES-ALIGNED",
"tool": object_in["tool"], "metadata": object_in["metadata"],
"taskStartTime": result["startTime"], "taskEndTime": result["endTime"]}
return wfresponse
def update_wflow_details(self, result, object_in):
wf_res = self.getwfresponse(result, object_in)
producer.push_to_queue(wf_res, anu_dp_wf_aligner_out_topic)
|
[
"vishalmahuli8@gmail.com"
] |
vishalmahuli8@gmail.com
|
a1900950b36a1a0eeada9e202f153c8985039b65
|
e342abb1306e4b083f235a2992ffb863c96c9a86
|
/examples/user/user_playlists.py
|
f71f755bceeeb2c38e3122cc3e6f50cb403624cb
|
[
"MIT"
] |
permissive
|
LorenzoCavatorta/spotify.py
|
102422e6588cb6c49cff026562e37f28cb0650eb
|
7f375f030fbac4ef3dbbd577a898b4d72f37b72b
|
refs/heads/master
| 2020-08-01T17:09:06.795264
| 2019-09-30T12:24:57
| 2019-09-30T12:24:57
| 211,055,943
| 0
| 0
|
MIT
| 2019-09-26T09:50:46
| 2019-09-26T09:50:46
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import asyncio
import spotify
client = spotify.Client('someid', 'somesecret')
async def main():
# You can use a user with a http presence
user = await client.user_from_token('sometoken')
# Or you can get a generic user
user = await client.get_user(user_id)
# returns a list of spotify.Playlist objects
playlists = await user.get_playlists()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
|
[
"m3nta1@yahoo.com"
] |
m3nta1@yahoo.com
|
32fb9188aec819ccd91d7b952306ceb971f26b87
|
2d308f49fd8326173f2a1cf6ba1ab25b0abff302
|
/rxbpn/testing/tobserver.py
|
1f00d63ddba26fd7f0f8138d5487905f7b6dfcc4
|
[
"BSD-3-Clause"
] |
permissive
|
JIAWea/rxbpn
|
6c407dd38524a2e6b2f800cf9b88ebd0287398c6
|
8760d086c802291398c25d7bd8e4e541962b191a
|
refs/heads/main
| 2023-03-16T18:49:05.563516
| 2021-01-29T09:58:20
| 2021-01-29T09:58:20
| 333,379,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from rxbp.acknowledgement.acksubject import AckSubject
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.observer import Observer as AckObserver
class TASubscribe(AckObserver):
def on_next(self, value):
# if value[0] == 11:
# return AckSubject()
print("Received: {}, type: {}".format(value, type(value)))
return continue_ack
def on_completed(self):
print('Done!')
def on_error(self, exc):
print('Exception: ', exc)
|
[
"1552937000@qq.com"
] |
1552937000@qq.com
|
7e61c3ab69667c4955fafc38691acf34ab01cb3a
|
1f3e98e3bb36765f869ca3177a47c53ce302ec70
|
/test/output/001.py
|
909bc70c708589058665a3782d4e0a85027d506b
|
[
"MIT"
] |
permissive
|
EliRibble/pyfmt
|
d73dec1061e93a28ad738139edf523e1678d0e19
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
refs/heads/master
| 2020-04-01T10:57:18.521463
| 2019-05-24T21:39:18
| 2019-05-24T21:39:18
| 153,139,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
def main():
print("hello world")
|
[
"eli@authentise.com"
] |
eli@authentise.com
|
97632d9e7d7bf489360aea53ea24698165011038
|
8a1d0238e997e7c953a21fc397f76a6c145b5e09
|
/configs/gb_SMALL_STIMS_LARGE_MATRIX.bcicfg.py
|
401a3c23afe5d9edc2c565238ca773aaf19936c9
|
[] |
no_license
|
luciopercable/eye_loc
|
8c39cb562bcbb46b6d9d01ac224ad77e91193559
|
a30f9690246b1f9f69ccbdb9c53519e9d4677e94
|
refs/heads/master
| 2020-04-19T11:58:37.406875
| 2018-12-08T17:41:55
| 2018-12-08T17:41:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import itertools
r,c = 6,6
step_vert = 110
step_horiz = 110
names = [a for a in u'abcdefghijklmonpqrstuvwxyz_1234567890!@#$%^&*()+=-~[]{};:\"\|?.,/<>½¾¿±®©§£¥¢÷µ¬']
aim_word = 'neuroscience_158'
rows = [list(a) for a in np.arange(r*c).reshape((c,r)).T]
columns = [list(a) for a in np.arange(r*c).reshape((c,r))]
posr = [55 - step_horiz* (len(rows)/2- a) for a in range(r)]
posc = [55- step_vert* (len(columns)/2- a) for a in range(len(columns))]
pos = [(r, c) for c in posc[::-1] for r in posr ]
config = {
'stimuli_dir':'.\\rescources\\stimuli\\letters_grey_black',
'background':'black',
'rows':rows,
'columns':columns,
'positions':pos,
'names':names,
'size':50,
'window_size':(1680, 1050),
'number_of_inputs':12,
'aims_learn': [0,5,35,30,21],#[29,35],#,9,13,17,21,25,30,34,38],
'aims_play': [names.index(a) for a in aim_word],#[0:2]
'shrink_matrix' : 1,
'textsize' : 0.07
}
# print config['aims_play']
#print rows + columns
print pos
|
[
"kriattiffer@gmail.com"
] |
kriattiffer@gmail.com
|
f96ecaafff5a7d64947e990639f494c299a6634b
|
2c96ab7bef672279c55a9cc6cd64707e0d0362dd
|
/PopulationSnowVsSunGraph/pythondraft.py
|
1e7bc3a484c9667b0bb401afd8b487fde00f656c
|
[] |
no_license
|
kahmed1996/Project-2
|
ac9185a6ba10a1c3966d75cd1d7f5202a4fe66b4
|
8f4265aa21b892f2fedcc2398fb637759afc6771
|
refs/heads/master
| 2022-09-25T15:44:57.988401
| 2019-09-23T11:35:28
| 2019-09-23T11:35:28
| 205,035,363
| 0
| 0
| null | 2022-08-23T17:52:30
| 2019-08-28T22:39:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
#Starting the data magic. This takes five boxes
engine = create_engine("sqlite:///citypop.db")
engine
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
#Getting all the stuff set so it databass properly
Base.classes.keys()
onlytable=Base.classes.onlytable2
session = Session(engine)
inspector=inspect(engine)
#Putting the DB into a dataframe
dataframe = pd.read_sql_query("SELECT * FROM onlytable2", engine)
#Putting the dataframe into a Panda
dataframe_PD=pd.DataFrame.from_records(dataframe,columns=['date', 'SNOW', 'NOSNOW'])
dataframe_PD.set_index(["date"])
#Getting the variable types right
dataframe_PD['SNOW']=pd.to_numeric(dataframe_PD['SNOW'])
dataframe_PD['NOSNOW']=pd.to_numeric(dataframe_PD['NOSNOW'])
#PLOTLY!
lines=dataframe_PD.plot.line(x='date',y='SNOW')
lines=dataframe_PD.plot.line(x='date',y='NOSNOW')
plt.savefig('precipitationplot.png')
plt.show()
|
[
"NBLaptop@Nicks-MacBook-Air.local"
] |
NBLaptop@Nicks-MacBook-Air.local
|
351b4eddb3f58e872e3497a9bea27b19aa4d720f
|
4d89652acca24e0bc653e0b4cb5846ceb5b568e4
|
/google-cloud-sdk/lib/surface/run/domain_mappings/list.py
|
ab9c9af7d8e8d0e25820072bf29df8501224e959
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
ibssasimon/LyricLingo
|
410fcec94d2bd3ea75c975c55713f5b8fb913229
|
0dfc951b270912470b36ce0083afd9d4fe41b10a
|
refs/heads/master
| 2021-06-25T10:00:18.215900
| 2020-01-09T00:35:46
| 2020-01-09T00:35:46
| 222,135,399
| 2
| 1
| null | 2021-04-30T20:54:14
| 2019-11-16T17:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,061
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for listing all domain mappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import commands
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.run import resource_args
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class List(commands.List):
"""Lists domain mappings."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To list all Cloud Run domain mappings, run:
$ {command}
""",
}
@classmethod
def CommonArgs(cls, parser):
# Flags specific to connecting to a cluster
cluster_group = flags.GetClusterArgGroup(parser)
namespace_presentation = presentation_specs.ResourcePresentationSpec(
'--namespace',
resource_args.GetNamespaceResourceSpec(),
'Namespace to list domain mappings in.',
required=True,
prefixes=False)
concept_parsers.ConceptParser(
[namespace_presentation]).AddToParser(cluster_group)
parser.display_info.AddFormat(
"""table(
{ready_column},
metadata.name:label=DOMAIN,
route_name:label=SERVICE,
region:label=REGION)""".format(ready_column=pretty_print.READY_COLUMN))
parser.display_info.AddUriFunc(cls._GetResourceUri)
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
def Run(self, args):
"""List available domain mappings."""
conn_context = connection_context.GetConnectionContext(
args, self.ReleaseTrack())
namespace_ref = args.CONCEPTS.namespace.Parse()
with serverless_operations.Connect(conn_context) as client:
self.SetCompleteApiEndpoint(conn_context.endpoint)
return commands.SortByName(client.ListDomainMappings(namespace_ref))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""Lists domain mappings."""
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
AlphaList.__doc__ = List.__doc__
|
[
"ibssasimon@gmail.com"
] |
ibssasimon@gmail.com
|
48cbc1da28c514c7264777210b3ecea16d3f98c4
|
21ac48139cefea2bf9f4c49509d6c31b12061373
|
/ELK/python_ex1.py
|
ca49a1e122b8e45a8c61ff6de6fa24bb8159e356
|
[] |
no_license
|
dkyos/dev-samples
|
23e60a035c278a2d63c82d84182bfb642a97c5c2
|
61a6cb1d084c85df7c2127da47b6a2bca0cfb6e5
|
refs/heads/master
| 2020-05-21T23:42:36.002672
| 2019-12-04T14:11:36
| 2019-12-04T14:11:36
| 14,510,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#!/usr/bin/env python
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
res = es.search(index="deraw", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
|
[
"dk77.yun@samsung.com"
] |
dk77.yun@samsung.com
|
59fbf899cb91638c4c208f659ae96a918d587461
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/nltk/cluster/__init__.py
|
38a9111e2204c7174d3bfbd82559e79570513835
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:59aceae689404a10cc3a170d5442209edea3f051e4f50c800fa557e86d234639
size 4271
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
d4434ec75f5e7002b51785f4507c97ae1014eb85
|
690ace6a6fe00db3dd0c799d9d7078d18f641daf
|
/graph1.py
|
3e45f25fb01d024c06bad00934ddfb10502e0025
|
[] |
no_license
|
tom523/server-sample
|
6abdcaa27e98954ac83c7835cf7fe55cd8475cff
|
f77228ac6da5ed45d562b3cc113af0e1cecb90c2
|
refs/heads/master
| 2022-08-02T05:28:45.228869
| 2020-05-22T08:14:32
| 2020-05-22T08:14:32
| 266,049,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSort(self):
indegree_dict = defaultdict(int)
for _, adjancency_list in self.graph.items():
for adjancency in adjancency_list:
indegree_dict[adjancency] += 1
zero_indegree_list = list(set(self.graph.keys()) - set(indegree_dict.keys()))
ret = []
while zero_indegree_list:
v = zero_indegree_list.pop()
ret.append(v)
for adj in self.graph[v]:
indegree_dict[adj] -= 1
if indegree_dict[adj] == 0:
zero_indegree_list.append(adj)
print(ret)
g = Graph(6)
g.addEdge(5, 2)
g.addEdge(5, 0)
g.addEdge(4, 0)
g.addEdge(4, 1)
g.addEdge(2, 3)
g.addEdge(3, 1)
g.topologicalSort()
|
[
"358777330@qq.com"
] |
358777330@qq.com
|
5bfc5f2cabecf0d946bad1504ba6985fda33a417
|
1b1144757634a9cab972ed5696199910ba762912
|
/mysite/app/user/migrations/0005_auto_20210302_1918.py
|
dd505597b2f7558b46067bafbb80a58d77b59243
|
[] |
no_license
|
bair2503/Python
|
67a44905c499c4cec1d29c090112fecd0e82e1c4
|
1ae168cbf269b781b8fd7d4b2fbcfa828362f3d4
|
refs/heads/main
| 2023-06-24T13:41:59.636586
| 2021-07-28T19:26:09
| 2021-07-28T19:26:09
| 390,099,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Generated by Django 3.1 on 2021-03-02 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_auto_20210302_1911'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='address',
field=models.CharField(max_length=200),
),
]
|
[
"bairgatapov93@mail.ru"
] |
bairgatapov93@mail.ru
|
b442efd2544e3d179838e135dad1cad579f97c26
|
ef9a176d58d6b6b5c3c135580bd2283dafe2047e
|
/product_catalogs/odbc_wrappers/MongoConnector.py
|
f178d61c62cc66d3079b6295c5591d7b49b48928
|
[] |
no_license
|
stevesette/DS_4300
|
fe60f15e33be023697266afceb11949116ceaa55
|
9971036a4f67782f76eac736ea8cd7122c1e0100
|
refs/heads/master
| 2020-12-05T19:03:06.571948
| 2020-04-11T17:03:23
| 2020-04-11T17:03:23
| 232,217,392
| 0
| 0
| null | 2020-03-21T03:07:57
| 2020-01-07T01:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 779
|
py
|
import pymongo
class MongoConnector:
"""
We define the database as 'hw3' manually in this assignment to make the assignment simpler, we could have taken
that in as a variable when we defined the connection but with no other odbc connector to compare to in this assignment
it seemed a bit pointless.
"""
def __init__(self):
self.connection = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
self.db = self.connection["hw3"]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def insert_file(self, filename, filedata):
self.db[filename].insert_many(filedata)
def run_query(self, collection, query):
return [x for x in self.db[collection].find(query)]
|
[
"setteducati.s@husky.neu.edu"
] |
setteducati.s@husky.neu.edu
|
1b5142f366dc75a64591a6b27ee82c0362541e40
|
23052f3c9dcfecb3cf50e5593960d47d257a5579
|
/praw_blog.py
|
1aa3eaad6f3a15e13d8581447d4fc99a0e2ccc28
|
[] |
no_license
|
sergewh20/PRAW-blog
|
77b4dcc121bbd0a4ce9e1f1fc956644874dd81e5
|
2567a3125a9317cdb20691cf78f4cf29b8eb67ca
|
refs/heads/master
| 2022-11-14T03:25:21.099797
| 2020-07-02T12:40:48
| 2020-07-02T12:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
import praw
import pandas as pd
reddit = praw.Reddit(client_id = 'CLIENT_ID',
client_secret = 'CLIENT_SECRET',
usernme = 'USERNAME',
password = 'PASSWORD',
user_agent = 'PRAW Blog')
subreddit_list= ['india','worldnews','announcements','funny','AskReddit',
'gaming','pics','science','movies','todayilearned'
]
author_list = []
id_list = []
link_flair_text_list = []
num_comments_list = []
score_list = []
title_list = []
upvote_ratio_list = []
for subred in subreddit_list:
subreddit = reddit.subreddit(subred)
hot_post = subreddit.hot(limit = 10000)
for sub in hot_post:
author_list.append(sub.author)
id_list.append(sub.id)
link_flair_text_list.append(sub.link_flair_text)
num_comments_list.append(sub.num_comments)
score_list.append(sub.score)
title_list.append(sub.title)
upvote_ratio_list.append(sub.upvote_ratio)
print(subred, 'completed; ', end='')
print('total', len(author_list), 'posts has been scraped')
df = pd.DataFrame({'ID':id_list,
'Author':author_list,
'Title':title_list,
'Count_of_Comments':num_comments_list,
'Upvote_Count':score_list,
'Upvote_Ratio':upvote_ratio_list,
'Flair':link_flair_text_list
})
df.to_csv('reddit_dataset.csv', index = False)
|
[
"noreply@github.com"
] |
sergewh20.noreply@github.com
|
4b766dd20dadec39d3f2a2f88debe30a8f290e2a
|
4551ef7051f937af33908fdd0768bc7174caba97
|
/dwh/pipeline.py
|
5cca159ca5561ae71560c280bf61377f2d999c4b
|
[] |
no_license
|
thangaiya/SelfService
|
171163aba5d72053beaa55887b563fc7477b5292
|
7e6e2827b27fe81e29b06b546b880d9440912264
|
refs/heads/master
| 2021-04-09T14:51:30.211736
| 2018-03-18T12:27:08
| 2018-03-18T12:27:08
| 125,722,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,433
|
py
|
import re
from csv import DictReader
from collections import *
from itertools import chain
from xml.parsers.expat import ParserCreate
from unidecode import unidecode
from pathlib import Path
from typing import *
from typing.io import *
from typing.re import *
from .utils import dispatch_function, XMLStack, PersistentBuffer
from .configuration import Configurations
def normalize(field):
if isinstance(field, str):
field = unidecode(field)
return field
@dispatch_function
def parse(file: IO, type: Text, **kwargs) -> Hashable:
"""Dispatch `file` to the appropriate parser (specified by `type`)"""
return type.lower()
@parse.register('csv')
def _(file: IO, type: Text, **kwargs) -> Iterator[Dict]:
"""Parse a csv `file`"""
yield from DictReader(file, **kwargs)
@parse.register('txt')
def _(file: IO, type: Text, pattern: Pattern, flags: int = 0) -> Iterator[Dict]:
"""Parse a text `file`"""
yield from (item.groupdict() for item in re.finditer(pattern, file.read(), flags))
@parse.register('xml')
def _(file: IO, type: Text, buffer_size: int = 65536, buffer_text: bool = True) -> Iterator[Tuple]:
"""Parse an xml `file`"""
parser = ParserCreate()
parser.buffer_size = buffer_size
parser.buffer_text = buffer_text
stack = XMLStack()
parser.StartElementHandler = stack.start
parser.EndElementHandler = stack.end
parser.CharacterDataHandler = stack.character
for line in file:
parser.Parse(line, False)
yield from stack.items()
stack.clear()
def project(values: Dict, mappings: Dict) -> Dict:
"""Performs a projection of `values` (row) to `mappings` (schema)"""
return {(alias or field): normalize(values.get(field)) for (field, alias) in mappings.items()}
@dispatch_function
def dispatch(item: Union[Dict, Tuple], outputs: List[Dict]) -> Hashable:
"""Dispatch `item` to the appropriate dispatcher (based on its type)"""
return type(item)
@dispatch.register(OrderedDict)
@dispatch.register(dict)
def _(item: Dict, outputs: List[Dict]) -> Iterator[Tuple[Text, Dict]]:
"""Dispatch `item` (row) to multiple `outputs` (tables)"""
yield from ((output['name'], project(item, output['fields'])) for output in outputs)
@dispatch.register(tuple)
def _(item: Tuple, outputs: List[Dict]) -> Iterator[Tuple[Text, Dict]]:
"""Dispatch `item` (row) to multiple `outputs` (tables)"""
tag, values = item
yield from ((output['name'], project(values, output['fields'])) for output in outputs if tag == output['tag'])
def apply_pipeline(file: Path, config: Dict) -> Iterator[Tuple[Text, Dict]]:
"""Pass `file` through the pipeline (specified by `config`)"""
if re.match(config.get('pattern'), file.name):
with open(file, **config.get('source_args')) as f:
yield from chain.from_iterable(dispatch(item, config.get('outputs')) for item in parse(f, **config.get('parser_args')))
def get_rows(file: Path, configs: List[Dict]) -> Iterator[Tuple[Text, Dict]]:
"""Pass `file` through all the pipelines (specified by `configs`)"""
yield from chain.from_iterable((apply_pipeline(file, config) for config in configs))
def persist(pb: PersistentBuffer, config: Configurations, file):
try:
for table, row in get_rows(file, config.get()):
pb.add(table, row)
except Exception as e:
raise e
finally:
file.unlink()
|
[
"thangaiya@gmail.com"
] |
thangaiya@gmail.com
|
8b7e403a7ac3e0d9f15db32a34eb8da70fbb217a
|
0e33e481ce9122b0d43ec033dc1d0c162b67d4ee
|
/blog/migrations/0001_initial.py
|
49409277632f1fe3f1c46d98e394c58b70cb6477
|
[] |
no_license
|
lunnbag/my-first-blog
|
1f2a69f9c6407fc775b925c41a60da1dcfb40bb2
|
f3af52fa53f18793546847074066e559158c89ec
|
refs/heads/master
| 2020-03-20T18:18:33.571922
| 2018-06-16T15:41:50
| 2018-06-16T15:41:50
| 137,582,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-16 12:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"lauralunn@hotmail.co.uk"
] |
lauralunn@hotmail.co.uk
|
c8c37c72b598f4e577de6c74001660675ab2e307
|
a25aac80385265247c23c571463753b6b71051bf
|
/pre_traitement.py
|
9967bd322995d35bfa97f911a19153f154d8695e
|
[] |
no_license
|
XavierFarchetto/Hackathon
|
3a6ccfc436a2657d5b7ce3edc84353b7560b6db4
|
42649d01d894bd7118fe03eb7327124677d68883
|
refs/heads/master
| 2020-04-06T14:03:59.457239
| 2018-11-14T15:14:32
| 2018-11-14T15:14:32
| 157,526,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
import os
input_directory = "annotated_job_offers"
output_directory = "jo"
def verify_annotation(current_line):
words = current_line.split("\t")
if len(words) < 2:
word = words[0]
if "\n" in word:
word = word[:-1]
line = "\t".join([word, "O"]) + "\n"
else:
annotation = words[1]
if annotation == "\n" or annotation == "":
annotation = "O"
if "\n" in annotation:
annotation = annotation[:-1]
classification = annotation.split(" ")
if len(classification) > 1:
union = "_".join(classification)
line = "\t".join([words[0], union]) + "\n"
else:
line = "\t".join([words[0], annotation]) + "\n"
return line
def study_tag(line, list):
tag = line.split("\t")[1]
if not(tag in list):
list.append(tag)
return sorted(list)
else:
return list
def verify_file(input_directory, output_directory, file_name, tags_list):
input_file_name = os.path.join(input_directory, file_name)
output_file_name = os.path.join(output_directory, file_name)
tag_list = tags_list
with open(output_file_name, "w") as output:
with open(input_file_name, "r") as input:
line = input.readline()
while line:
line = verify_annotation(line)
output.write(line)
tag_list = study_tag(line[:-1], tag_list)
line = input.readline()
return tag_list
def write_tags_file(list):
with open("tags.txt", "w") as file :
for tag in list:
file.write(tag+"\n")
def verify_directory(input_directory, output_directory, first_file=1, last_file=5428):
file_list = sorted(os.listdir(input_directory))[first_file-1:last_file]
tag_list = []
for counter, file in enumerate(file_list):
tag_list = verify_file(input_directory, output_directory, file, tag_list)
print("Element {}\{} - File {} reviewed".format(str(counter+first_file), last_file, file))
write_tags_file(tag_list)
if __name__ == "__main__":
# 5428 file max
verify_directory(input_directory, output_directory)
|
[
"xavier.farchetto@telecomnancy.eu"
] |
xavier.farchetto@telecomnancy.eu
|
8aa4afb7f7c82069446a0267272eef69cf23eb38
|
4c78f66b6f852fa4ad0729eebadc3ee96a65ed57
|
/from_article_url.py
|
0e9ca6d389f946421d08ce31c2e7e20fd5e6dd84
|
[] |
no_license
|
proteeti13/first-page-news
|
04c933fc5a8c4dbfa70b7fa1e14a45369dab0bde
|
50ffca23371c205a56d7d8b6a499a3822a175452
|
refs/heads/master
| 2020-07-30T20:40:22.603338
| 2019-09-23T10:45:38
| 2019-09-23T10:45:38
| 210,353,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,904
|
py
|
import requests
from bs4 import BeautifulSoup
import articleDateExtractor
import dateparser
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
url_prothom_alo = "https://www.prothomalo.com/bangladesh/article/1614818/%E2%80%98%E0%A6%AA%E0%A6%9B%E0%A6%A8%E0%A7%8D%E0%A6%A6%E0%A7%87%E0%A6%B0%E2%80%99-%E0%A6%97%E0%A6%BE%E0%A7%9C%E0%A6%BF-%E0%A6%95%E0%A6%BF%E0%A6%A8%E0%A6%A4%E0%A7%87-%E0%A7%AA%E0%A7%AE-%E0%A6%B2%E0%A6%BE%E0%A6%96-%E0%A6%9F%E0%A6%BE%E0%A6%95%E0%A6%BE-%E0%A6%AC%E0%A6%BE%E0%A7%9C%E0%A6%A4%E0%A6%BF-%E0%A6%97%E0%A7%81%E0%A6%A8%E0%A6%9B%E0%A7%87-%E0%A6%AC%E0%A6%BF%E0%A6%AE%E0%A6%BE%E0%A6%A8"
url_cnn = "https://edition.cnn.com/interactive/2019/09/business/samsung-headquarters-south-korea/index.html"
url_bbc = "https://www.bbc.com/news/av/stories-49666419/life-saving-surgery-but-not-by-a-doctor"
url_daily_star = "https://www.thedailystar.net/frontpage/rohingyas-voter-list-election-commission-staffers-fraud-ring-behind-it-1801495"
url_bdnews = "https://bdnews24.com/world/2019/09/18/iran-s-rouhani-blames-us-saudi-for-conflict-in-region"
url_huffpost = "https://www.huffpost.com/entry/migrant-mothers-children-suing-trump-asylum-ban_n_5d819313e4b0957256ada9d6?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuaHVmZnBvc3QuY29tLw&guce_referrer_sig=AQAAANOwUFQmmgtG832C2zFu5uIzShOo3_RozywzkTKf85PBdUFTHQKAGjHyBDynkdwTJxHck2dYWcFBGL2IzcnmF5qdCPWruhCVMQGJ6w0r-1adq1h7JtIyl6ebGslvov3BUdBonintC93gn1dTVOJkdSpfmxkd4L0zipjURTlwZjhC"
url_nytimes = "https://www.nytimes.com/2019/09/17/climate/trump-california-emissions-waiver.html?action=click&module=Top%20Stories&pgtype=Homepage"
url_list = [
url_prothom_alo,
url_cnn,
url_bbc,
url_daily_star,
url_bdnews,
url_huffpost,
url_nytimes,
]
for url in url_list:
response = requests.get(url, headers=header)
soup = BeautifulSoup(response.text, "lxml")
site_name = soup.find("meta", property="og:site_name")
title = soup.find("meta", property="og:title")
content = soup.find("meta", property="og:description")
url = soup.find("meta", property="og:url")
image = soup.find("meta", property="og:image")
# date = dateparser.parse(soup)
print(
"site-name : ", site_name["content"] if site_name else "No site_name given here"
)
print("title : ", title["content"] if title else "No title given here")
print("content : ", content["content"] if content else "No description given here")
print("image : ", image["content"] if image else "No image given here")
print("url : ", url["content"] if url else "No url given here")
# print("date :", date)
# from newspaper import Article
# url = "https://bdnews24.com/world/2019/09/18/iran-s-rouhani-blames-us-saudi-for-conflict-in-region"
# article = Article(url)
# article.download()
# article.parse()
# print(article.publish_date)
|
[
"proteeti13@gmail.com"
] |
proteeti13@gmail.com
|
9c90388d3381674e0cb41add4b6132118e241883
|
444970b3dda58e0feb7adb6faf94d024d4672749
|
/Processor.py
|
fab2e317671631624bd55c2fbc719f1d57e4540a
|
[] |
no_license
|
kyrie2014/CI-Test
|
061eb494a115882a975afe79d3a17a52da3541e1
|
9244d3c8b904146baa4cd58c57a0c6ab5c87ed0a
|
refs/heads/master
| 2020-03-31T20:03:15.559493
| 2018-10-11T05:16:45
| 2018-10-11T05:16:45
| 152,523,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,819
|
py
|
# coding:utf-8
from xml.dom import minidom, Node
from os.path import *
import sys
import pandas as pd
import re
import os
import copy
class AutoProcessor(object):
xml_path = r'\\shnas01\publicshared\BM\BM_AutoTest\AutoTest_Case'
bug_path = r'\\shnas01\publicshared\BM\BM_AutoTest\AutoBugs\bm_bug.xlsx'
def __init__(self, log_path=None, case_dir=None, flag=None):
self.result_path = join(
join(
log_path,
AutoProcessor.latest_log_dirs(log_path, flag)[0]
),
r'testResult.xml'
) if log_path is not None else None
self.xml_path = join(self.xml_path, case_dir)
# print self.result_path
def filter_bug(self):
if exists(self.bug_path):
return None
# filter用例名字
def filter(summary):
result = re.findall(r'#([\S\s]*)#', summary)
if len(result) > 0:
return result[0]
return None
df = pd.DataFrame(pd.read_excel(self.bug_path))
new_df = df.apply(lambda summary: map(filter, df['Summary']), axis=0)
return new_df['Summary'].tolist()
def comment_node(self, xml_name, cases):
xml_file = join(self.xml_path, xml_name)
doc = minidom.parse(xml_file).documentElement
element = doc.getElementsByTagName('TestCase') + doc.getElementsByTagName('Test')
for node in element:
if node.getAttribute('name') in cases:
comment = node.ownerDocument.createComment(node.toxml())
node.parentNode.replaceChild(comment, node)
with open(xml_file, 'wb') as file:
file.write(doc.toxml())
def uncomment_node(self, xml_name, cases):
import re
xml_file = join(self.xml_path, xml_name)
content, flag = '', False
pattern = re.compile(r'<!--[\S\s]*{}[\S\s]*'.format('|'.join(cases)))
with open(xml_file, 'r') as file:
for line in file:
filter = pattern.findall(line)
if filter:
if '-->' not in filter[-1]:
flag = True
line = re.sub('(<!)?--[>]?', '', line)
if flag and '-->' in line:
line = line.replace('-->', '')
content += line
with open(xml_file, 'wb') as file:
file.write(content)
def uncomment_all_nodes(self):
import re
for _, _, files in os.walk(self.xml_path):
for file in files:
if '.xml' not in file or '_plan.xml' in file:
continue
print 'File: ' + file
xml_file = join(self.xml_path, file)
content, is_modified = '', False
with open(xml_file, 'r') as file:
for line in file:
if re.findall(r'(<!--\s?<)|(>\s?-->)', line):
print 'Modified: ' + line
line = re.sub('(<!)?--[>]?', '', line)
is_modified = True
content += line
if is_modified:
with open(xml_file, 'wb') as file:
file.write(content)
@staticmethod
def latest_log_dirs(path, flag):
return sorted(
[
(x, getctime(join(path, x)))
for x in os.listdir(path)
if (isdir(join(path, x)) and flag in x)
],
key=lambda i: i[1]
)[-1]
def comment_and_create_tpm_bug(self):
fail_cases = dict()
doc = minidom.parse(self.result_path).documentElement
# node = doc.getElementsByTagName('Summary')[-1]
# ratio = node.getAttribute('firstRunPassRate').strip('%')
# if int(ratio) <= 30:
# return fail_cases_list
node = doc.getElementsByTagName('TestBuildInfo')[-1]
node2 = doc.getElementsByTagName('BuildInfo')[-1]
# 获取测试结果失败的用例名
nodes = doc.getElementsByTagName('TestCase')
for n in nodes:
xml_file_name = ''
ch_nodes = [ch for ch in n.childNodes if ch.nodeType == n.ELEMENT_NODE]
instance = copy.copy(n)
for _ in range(10):
instance = instance.parentNode
if instance.tagName == 'TestPackage':
xml_file_name = instance.getAttribute('appPackageName')
break
if 'pyInitialize' in xml_file_name:
continue
if len(ch_nodes) > 1:
for cn in ch_nodes:
if 'fail' == cn.getAttribute('result'):
fail_cases[cn.getAttribute('name')] = xml_file_name
else:
cn = ch_nodes[-1]
if 'fail' == cn.getAttribute('result'):
fail_cases[n.getAttribute('name')] = xml_file_name
if fail_cases is None:
return
# self.comment_node(comment_cases)
# 初始化bug信息
from BugInfo import BugInfo, BugDescription
bug_info = BugInfo()
bug_desc = BugDescription(
device= node.getAttribute('deviceID'),
url = node.getAttribute('pack-url'),
hw = node.getAttribute('product-hardware'),
path = node.getAttribute('sharedPath'),
ver = node2.getAttribute('build_display')
)
from HttpHelper import HttpHelper
for case, xml_name in fail_cases.items():
# 注释case
self.comment_node(xml_name + '.xml', case)
bug_desc_instance = copy.copy(bug_desc)
bug_desc_instance.case_name = case + '@' + xml_name
# 提交TPM bug
# print bug_info.format_bug_info(bug_desc_instance)
HttpHelper().put(bug_info.format_bug_info(bug_desc_instance))
def init_option():
from optparse import OptionParser
parser = OptionParser(
usage='%prog -p [common|uncommon|reset] [case_directory] [sn]',
description='common or uncommon cases, and file bugs.'
)
parser.add_option(
'-p',
'--param',
dest='param',
nargs=3,
action='store',
help='common or uncommon cases, and file bugs',
metavar='PARAM'
)
(options, args) = parser.parse_args()
return options.param if options.param else sys.exit()
if '__main__' == __name__:
param, case_dir, sn = init_option()
# param, sn = 'common', 'SC77311E10181120412'
cases = dict()
try:
if param == 'common':
print 'common specified node'
path = os.getcwd().replace('testcases\ext', 'results')
ap = AutoProcessor(path, case_dir, sn)
ap.comment_and_create_tpm_bug()
elif param == 'uncommon':
print 'uncommon specified node'
ap = AutoProcessor(case_dir=case_dir)
closed_bugs = ap.filter_bug()
if closed_bugs is not None:
for bug in closed_bugs:
print 'Bug --> ' + bug
tmp = bug.split('@')
if tmp[1] not in bugs:
cases[tmp[1]] = [tmp[0]]
else:
cases[tmp[1]].append(tmp[0])
for file, cases in cases.items():
ap.uncomment_node(file, cases)
else:
print 'Not found bug!'
elif param == 'reset':
print 'reset all common node'
ap = AutoProcessor(case_dir=case_dir)
ap.uncomment_all_nodes()
except Exception, cause:
print cause
print 'Total test is pass'
|
[
"Kyrie.Liu@spreadtrum.com"
] |
Kyrie.Liu@spreadtrum.com
|
ad880090cfa86821407e0941820ac38bb2b6257a
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/cherrypy/cherrypy/lib/static.py
|
730d86b5c8aca8450f7467f6e5d78d45615cc9e1
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025
| 2014-09-06T22:34:16
| 2014-09-06T22:34:16
| 23,744,842
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/cherrypy/cherrypy/lib/static.py
|
[
"ron.y.kagan@gmail.com"
] |
ron.y.kagan@gmail.com
|
15e3d0c90005d336f978715f7fa6b9fbb8df55ad
|
1198238841bedc19e9cc16c2ba22b4c6861cad1f
|
/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi
|
9fc34374b9ad035291f3ad04b7ab44b64eeb9340
|
[
"MIT"
] |
permissive
|
henryfradley/relax_description
|
1ba44bf25fe740c2c24c92215730faf4f01ab954
|
c7617d06b14a6cb666c69bc0b1530d244fad8ac7
|
refs/heads/master
| 2023-01-19T08:10:24.230424
| 2020-11-21T03:34:38
| 2020-11-21T03:34:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,813
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/henryfradley/Library/Caches/node-gyp/12.18.3",
"standalone_static_library": 1,
"save_dev": "true",
"dry_run": "",
"legacy_bundling": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/Users/henryfradley/.nvm/versions/node/v12.18.3/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/henryfradley/.nvm/versions/node/v12.18.3/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/henryfradley/.npm-init.js",
"userconfig": "/Users/henryfradley/.npmrc",
"cidr": "",
"node_version": "12.18.3",
"user": "501",
"auth_type": "legacy",
"editor": "vim",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/henryfradley/.nvm/versions/node/v12.18.3/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/henryfradley/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.6 node/v12.18.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/_r/vn81lt5d45s78j324zmphdkm0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/Users/henryfradley/.nvm/versions/node/v12.18.3"
}
}
|
[
"fradleyhenry@gmail.com"
] |
fradleyhenry@gmail.com
|
e940cbbc122b704ae013d728430934c12dab1aa9
|
b4026496a66b0577c96e45c6e7b18faeb433f328
|
/scripts/ci/pre_commit/pre_commit_check_order_setup.py
|
e94109c0469bfa3bedba6ec8703410a30a125bd2
|
[
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] |
permissive
|
dferguson992/airflow
|
4beb2f970b77645d56546fb558953fe205d7355b
|
3d52b3ed8e6ed7cd4298edc731d88e9de0406df9
|
refs/heads/master
| 2021-08-19T01:32:45.597782
| 2020-11-16T19:54:29
| 2020-11-16T19:54:29
| 311,678,786
| 1
| 0
|
Apache-2.0
| 2020-11-16T19:54:30
| 2020-11-10T14:10:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,657
|
py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test for an order of dependencies in setup.py
"""
import os
import re
import sys
from os.path import abspath, dirname
from typing import List
errors = []
MY_DIR_PATH = os.path.dirname(__file__)
SOURCE_DIR_PATH = os.path.abspath(os.path.join(MY_DIR_PATH, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, SOURCE_DIR_PATH)
def _check_list_sorted(the_list: List[str], message: str) -> None:
sorted_list = sorted(the_list)
if the_list == sorted_list:
print(f"{message} is ok")
return
i = 0
while sorted_list[i] == the_list[i]:
i += 1
print(f"{message} NOK")
errors.append(
f"ERROR in {message}. First wrongly sorted element" f" {the_list[i]}. Should be {sorted_list[i]}"
)
def setup() -> str:
setup_py_file_path = abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, os.pardir, 'setup.py'))
with open(setup_py_file_path) as setup_file:
setup_context = setup_file.read()
return setup_context
def check_main_dependent_group(setup_context: str) -> None:
"""
Test for an order of dependencies groups between mark
'# Start dependencies group' and '# End dependencies group' in setup.py
"""
pattern_main_dependent_group = re.compile(
'# Start dependencies group\n(.*)# End dependencies group', re.DOTALL
)
main_dependent_group = pattern_main_dependent_group.findall(setup_context)[0]
pattern_sub_dependent = re.compile(' = \\[.*?\\]\n', re.DOTALL)
main_dependent = pattern_sub_dependent.sub(',', main_dependent_group)
src = main_dependent.strip(',').split(',')
_check_list_sorted(src, "Order of dependencies")
def check_sub_dependent_group(setup_context: str) -> None:
r"""
Test for an order of each dependencies groups declare like
`^dependent_group_name = [.*?]\n` in setup.py
"""
pattern_dependent_group_name = re.compile('^(\\w+) = \\[', re.MULTILINE)
dependent_group_names = pattern_dependent_group_name.findall(setup_context)
pattern_dependent_version = re.compile('[~|><=;].*')
for group_name in dependent_group_names:
pattern_sub_dependent = re.compile(f'{group_name} = \\[(.*?)\\]', re.DOTALL)
sub_dependent = pattern_sub_dependent.findall(setup_context)[0]
pattern_dependent = re.compile('\'(.*?)\'')
dependent = pattern_dependent.findall(sub_dependent)
src = [pattern_dependent_version.sub('', p) for p in dependent]
_check_list_sorted(src, f"Order of sub-dependencies group: {group_name}")
def check_alias_dependent_group(setup_context: str) -> None:
"""
Test for an order of each dependencies groups declare like
`alias_dependent_group = dependent_group_1 + ... + dependent_group_n` in setup.py
"""
pattern = re.compile('^\\w+ = (\\w+ \\+.*)', re.MULTILINE)
dependents = pattern.findall(setup_context)
for dependent in dependents:
src = dependent.split(' + ')
_check_list_sorted(src, f"Order of alias dependencies group: {dependent}")
def check_install_and_setup_requires(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
install_requires and setup_requires in setup.py
"""
pattern_install_and_setup_requires = re.compile('(setup_requires) ?= ?\\[(.*?)\\]', re.DOTALL)
install_and_setup_requires = pattern_install_and_setup_requires.findall(setup_context)
for dependent_requires in install_and_setup_requires:
pattern_dependent = re.compile('\'(.*?)\'')
dependent = pattern_dependent.findall(dependent_requires[1])
pattern_dependent_version = re.compile('[~|><=;].*')
src = [pattern_dependent_version.sub('', p) for p in dependent]
_check_list_sorted(src, f"Order of dependencies in do_setup section: {dependent_requires[0]}")
def check_extras_require(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
extras_require in setup.py
"""
pattern_extras_requires = re.compile(r'EXTRAS_REQUIREMENTS: Dict\[str, List\[str\]] = {(.*?)}', re.DOTALL)
extras_requires = pattern_extras_requires.findall(setup_context)[0]
pattern_dependent = re.compile('\'(.*?)\'')
src = pattern_dependent.findall(extras_requires)
_check_list_sorted(src, "Order of dependencies in: extras_require")
def check_provider_requirements(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
providers_require in setup.py
"""
pattern_extras_providers_packages = re.compile(
r'PROVIDERS_REQUIREMENTS: Dict\[str, Iterable\[str\]\] = {(.*?)}', re.DOTALL
)
extras_requires = pattern_extras_providers_packages.findall(setup_context)[0]
pattern_dependent = re.compile('"(.*?)"')
src = pattern_dependent.findall(extras_requires)
_check_list_sorted(src, "Order of dependencies in: providers_require")
def check_extras_provider_packages(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
providers_require in setup.py
"""
pattern_extras_requires = re.compile(
r'EXTRAS_PROVIDERS_PACKAGES: Dict\[str, Iterable\[str\]\] = {(.*?)}', re.DOTALL
)
extras_requires = pattern_extras_requires.findall(setup_context)[0]
pattern_dependent = re.compile('"(.*?)":')
src = pattern_dependent.findall(extras_requires)
_check_list_sorted(src, "Order of dependencies in: extras_provider_packages")
def checks_extra_with_providers_exist() -> None:
from setup import EXTRAS_REQUIREMENTS, EXTRAS_PROVIDERS_PACKAGES # noqa # isort:skip
message = 'Check if all extras have providers defined in: EXTRAS_PROVIDERS_PACKAGES'
local_error = False
for key in EXTRAS_REQUIREMENTS.keys(): # noqa
if key not in EXTRAS_PROVIDERS_PACKAGES.keys(): # noqa
if not local_error:
local_error = True
print(f"Extra {key} NOK")
errors.append(
f"ERROR in {message}. The {key} extras is missing there."
" If you do not want to install any providers with this extra set it to []"
)
if not local_error:
print(f"{message} is ok")
if __name__ == '__main__':
setup_context_main = setup()
check_main_dependent_group(setup_context_main)
check_alias_dependent_group(setup_context_main)
check_sub_dependent_group(setup_context_main)
check_install_and_setup_requires(setup_context_main)
check_extras_require(setup_context_main)
check_provider_requirements(setup_context_main)
check_extras_provider_packages(setup_context_main)
checks_extra_with_providers_exist()
print()
print()
for error in errors:
print(error)
print()
if errors:
sys.exit(1)
|
[
"noreply@github.com"
] |
dferguson992.noreply@github.com
|
ac30e12fa3d62c3d9eca81ef631868ac0afd7eac
|
ccd1c59f380326eaa7a5ec069e8bf49ceae5589a
|
/07_farangeit_to_celsium.py
|
04c298e4caf8e3aa3f3259ddda4ee5d3fbded1e0
|
[] |
no_license
|
Froststorm/Codeabby_learn
|
bfc7c27797c4fe72b4657556c92e00535d1be7b9
|
9e00624a87db4990721897cb3b01675028d814e5
|
refs/heads/master
| 2021-01-19T19:02:30.937847
| 2017-11-30T19:05:22
| 2017-11-30T19:05:22
| 101,183,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
def toCelsium(a):
return round(5/9*(a-32))
listOfNums = [int(input())]
listOfNums = [int(x) for x in listOfNums.split()]
print(listOfNums[1::], end="\n\n\n")
# for i in range(34):
print(" ".join([str(toCelsium(i)) for i in listOfNums[1:]]))
|
[
"andrey.paladin@gmail.com"
] |
andrey.paladin@gmail.com
|
d45ddbfb167176a465fa730c943239468eb2aa4a
|
d95699c77bfe9e74e358b277f1a2a72dd471cc73
|
/train101.py
|
124dbb628500e464a0fe8ca85125c5319ed55add
|
[] |
no_license
|
ckfanzhe/HuaLuCup2020
|
61dffb29a7a0f41cf0607b6080c76a1a6cee2d26
|
307b3a741060db07df56d865ac8754663c089a69
|
refs/heads/main
| 2023-06-08T13:54:10.901945
| 2021-06-20T08:53:44
| 2021-06-20T08:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
# -*- coding: utf-8 -*-
import argparse
import random
import numpy as np
import torch.backends.cudnn as cudnn
import multiprocessing
import time
import torch
import torch.nn as nn
from torchsummary import summary
import os
from cfg import _metrics, _fit, _modelcheckpoint, _reducelr, _criterion
from data_gen_train import data_flow
from models.model import ResNet50, EfficientB7, ResNet101
def model_fn(args, mode):
model = ResNet101(weights=args.pretrained_weights, input_shape=(args.img_channel, args.input_size, args.input_size), num_classes=args.num_classes)
for param in model.parameters():
param.requires_grad = True
for name, value in model.named_parameters():
print(name, value.requires_grad)
model = nn.DataParallel(model)
model = model.cuda()
return model
def train_model(args, mode):
model = model_fn(args, mode)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
criterion = {'lossL' : nn.CrossEntropyLoss().cuda(), 'lossS' : _criterion.LabelSmoothSoftmaxCE().cuda()}
metrics = {"acc@1" : _metrics.top1_accuracy, "acc@3" : _metrics.topk_accuracy}
checkpoint1 = _modelcheckpoint.SingleModelCheckPoint(filepath=os.path.join('./models/', 'best_resnext101.pth'), monitor='val_acc@1', mode='max', verbose=1, save_best_only=True, save_weights_only=True)
checkpoint2 = _modelcheckpoint.SingleModelCheckPoint(filepath=os.path.join('./models/', 'ep{epoch:05d}-val_acc@1_{val_acc@1:.4f}-val_lossS_{val_lossS:.4f}-val_lossL_{val_lossL:.4f}.pth'), monitor='val_acc@1', mode='max', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = _reducelr.StepLR(optimizer, factor=0.2, patience=8, min_lr=1e-6)
_fit.Fit(
data_flow = data_flow,
model=model,
args=args,
batch_size = args.batch_size,
optimizer=optimizer,
criterion=criterion,
metrics=metrics,
reduce_lr = reduce_lr,
checkpoint = [checkpoint1, checkpoint2],
verbose=1,
workers=int(multiprocessing.cpu_count() * 0.8),
)
print('Training Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--data_local', default=r'/notebooks', type=str, help='')
parser.add_argument('--input_size', default=400, type=int, help='')
parser.add_argument('--img_channel', default=3, type=int, help='')
parser.add_argument('--num_classes', default=4, type=int, help='')
parser.add_argument('--batch_size', default=32, type=int, help='')
parser.add_argument('--learning_rate', default=1e-4, type=float, help='')
parser.add_argument('--max_epochs', default=40, type=int, help='')
parser.add_argument('--start_epoch', default=0, type=int, help='')
parser.add_argument('--pretrained_weights', default='./models/zoo/resnext101_32x8d-8ba56ff5.pth', type=str, help='')
parser.add_argument('--seed', default=None, type=int, help='')
args, unknown = parser.parse_known_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '0, 1, 2, 3, 4, 5, 6, 7, 8'
print('CUDA device count : {}'.format(torch.cuda.device_count()))
if not os.path.exists(args.data_local):
raise Exception('FLAGS.data_local_path: %s is not exist' % args.data_local)
if args.seed != None:
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
cudnn.deterministic = True
print('You have chosen to seed training with seed {}.'.format(args.seed))
else:
print('You have chosen to random seed.')
train_model(args=args, mode='train')
|
[
"ieluoyiming@163.com"
] |
ieluoyiming@163.com
|
208846eb714574f4adfe61167d1f7792766e06ec
|
be1d8fdaf2820d910799180aaee4772cfbf2cfb7
|
/UndirectedUnweightedGraph_adjList.py
|
34b93d151250f18696e046fc09f711f5f33924ac
|
[] |
no_license
|
gerganzh/Python-Mini-Projects
|
04dd612ffb56b64606409b1bef23c27c60ab91f5
|
5c4da489896c608aa43647aedf4f7147921d6018
|
refs/heads/master
| 2020-05-25T12:54:55.604746
| 2019-05-21T09:48:29
| 2019-05-21T09:48:29
| 187,809,132
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,509
|
py
|
#Week 7, Task 1
'''
I implemented an unweighted and undirected graph data structure, where the nodes consist of positive integers. I
decided to use adjacency list approach.
'''
import sys
from collections import OrderedDict
class Node: #creating class node
def __init__(self, node):
self.name = node
self.neighbors = [] #the list will be appended by adjacent (connected) nodes
def add_neighbors(self, neighbors): #function to add adjacent nodes(neighbours) in the list
for neighbor in neighbors:
if isinstance(neighbor, Node): #if the object is an instance of Node
if neighbor.name not in self.neighbors: #makes sure that there are no duplicate nodes
self.neighbors.append(neighbor.name) #appending the neighbors list
neighbor.neighbors.append(self.name)
else:
return None
class Graph: #creating the Graph class
def __init__(self):
self.nodes = {} #dictionary that will contain the nodes
def add_nodes(self, nodes): #adding nodes to Graph
for node in nodes:
if isinstance(node, Node): #checks if the object is an instance of Node
self.nodes[node.name] = node.neighbors #
def adjacency_list(self): #used for printing the adjacency list
adj_list = [str(key) + ":" + str(self.nodes[key]) for key in self.nodes.keys()] #creating the list with dictionary inside
for a in adj_list: #to print it on a new row
print(a)
def adjacency_list_dict(self): #to print a dictionary, with the dictionary of neighbours inside (needed for BFS and isPath)
return({str(key): self.nodes[key] for key in self.nodes.keys()})
def isPath(graph, v, w, path=[]):
path += [v]
if v == w: #if/when the nodes are the same
print('There is path!')
return('The path is: ' + str(path))
elif v not in graph and w not in graph: #if both nodes are not in the graph
raise TypeError('Both nodes do not exist in this graph. ')
elif v not in graph:
raise TypeError ("The start node does not exist in this graph. ")
elif w not in graph:
raise TypeError("The end node does not exist in this graph. ")
for node in graph[v]: #for every node that is connected to the start node
if node not in path: #if it's not already in path
newpath = isPath(graph, node, w, path) #call the function again with the new node instead of start node
return newpath #and return the path
return ("Can't find path to this node!")
def print_list(graph): #to print the list
print('The Adjacency List for this unweighted, undirected graph is: ')
return graph.adjacency_list()
def print_dict(graph): #to print the dictionary
return(graph.adjacency_list_dict())
def bfs(graph1, start): #BFS Search
queue = [start] #create the queue
visited = [] #keep track of the visited nodes
if start not in graph1:
raise TypeError('Node not found.')
while queue: #looping until queue is empty
node = queue.pop(0) #remove element with index 0 from list and returns it
visited.append(node) #append the list with node
neighbours = graph1[node] #get the adjacent nodes
for neighbour in neighbours: #for each element in neighbours
queue.append(neighbour) #append the queue with neighbour node
visited.append(neighbour) #append the visited list with neighbour node
return bfs_write(visited) #remove the duplicates and retain order
def bfs_write(lst): #output the traversed nodes to an external txt file
f = open('file.txt', 'w+')
f.write(str(list(OrderedDict.fromkeys(lst))))
f.close()
########################################## Testing The Code ###########################################################
if __name__ == "__main__":
A = Node('1') #create instances of class Node
B = Node('2')
C = Node('3')
D = Node('4')
E = Node('5')
A.add_neighbors([B, C, E]) #add neighbours (connected nodes)
B.add_neighbors([A, C])
C.add_neighbors([B, A, E, D])
D.add_neighbors([C])
E.add_neighbors([A, C])
graph = Graph() #create the graph itself
graph.add_nodes([A, B, C, D, E])
print_list(graph)#print the adjacency list
dict = print_dict(graph)
bfs(dict,'1')
print(isPath(dict,'1','3'))
|
[
"noreply@github.com"
] |
gerganzh.noreply@github.com
|
ae003c75bee275054a1f41fbe04ff1af3abba836
|
24470bd278c86ce441015c4e1737d240d67f37a0
|
/models.py
|
44e3a92569245472f8563b8d6d85d42c9d2736b2
|
[] |
no_license
|
kevsersrca/graphql-flask
|
d8acaab43004aca9b763e811189c39bc7b6c3e17
|
88d11432dbff1fbcb93061f7093b31e8805bdfd2
|
refs/heads/master
| 2021-01-20T21:11:48.494715
| 2017-08-29T11:56:06
| 2017-08-29T11:56:06
| 101,755,776
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
from sqlalchemy import *
from sqlalchemy.orm import (scoped_session, sessionmaker, relationship,
backref)
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('mysql+pymysql://root:@localhost/tutorial', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
class Department(Base):
__tablename__ = 'department'
id = Column(Integer, primary_key=True)
name = Column(String(255), index=True)
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
hired_on = Column(DateTime, default=func.now())
department_id = Column(Integer, ForeignKey('department.id'))
department = relationship( Department, backref=backref('users', uselist=True, cascade='delete,all'))
|
[
"kev@Kev-MacBook-Pro.local"
] |
kev@Kev-MacBook-Pro.local
|
89bb687edf42e8d1b56379fb9bdefa2543b5cfa9
|
852d549b766134aa7d2d25fbcaceb5f1e9017fc9
|
/exam.py
|
642515798c839d156e41b43ade02fb5cfbf8d56e
|
[] |
no_license
|
python819/pythonfiles
|
3ae570873ebe17e5b70d5e989294fc49143e74d6
|
033a76e11adc25d88f034f9bf82520d8f2a79cca
|
refs/heads/master
| 2020-07-02T17:55:52.582633
| 2019-08-10T10:20:14
| 2019-08-10T10:20:14
| 201,612,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#module 1:--
#1
#for j in range(1,6):
# print('*'*j)
#2
'''
a=input('enter the password')
if len(a)>=6 and len(a)<=12:
if a.isalpha() is True:
print('should contain atleast one special symboland one number')
elif a.isalnum() is True:
print('should contain atleast one special symbol')
elif a.islower() is True :
print('should contain atleast one uppercase')
elif a.isupper() is True :
print('should contain atleast one lowercase')
else:
print('strong password')
else:
print('passowrd shoulb be min of 6 and max of 12 characters' )
'''
#3
a=[]
for j in range(0,5):
a[j]=input('enter the elements')
print(a.sort())
|
[
"indraneilsai2@gmail.com"
] |
indraneilsai2@gmail.com
|
0a5b7be0c07c9be2e0443f4569ef24288a0b59c0
|
a01a43157460788b9156e09a7985267afcb2438b
|
/Province/Beijing.py
|
a16aa27771a6af43b8392cc39a9e4f7939b6114b
|
[] |
no_license
|
Ginchung/2020Wuhan
|
4f4db7556712363e1173394ee5e1c273b68e3f37
|
b9e9bdc8a326dfcc24a518229ac5c62490308188
|
refs/heads/master
| 2020-12-26T19:53:40.615462
| 2020-02-20T11:01:28
| 2020-02-20T11:01:28
| 237,622,642
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,513
|
py
|
### Project: 2020Wuhan@Github/Ginchung
## File: beijing.py
## Run with 'python beijing.py'
# object 'sdct'
# Function: stores the info from official message
# Key: YYYY-MM-DD-HH
# Web source: http://wjw.beijing.gov.cn/wjwh/ztzl/xxgzbd/gzbdyqtb/
province='beijing'
sdct={}
sdct['2020-01-21-18']='西城区1例、海淀区2例、丰台区1例、通州区1例、大兴区2例、昌平区2例,武汉来京人员1例'
sdct['2020-01-22-18']='西城区2例、朝阳区1例、海淀区2例、丰台区1例、石景山区1例、通州区1例、大兴区2例、昌平区2例,武汉来京人员2例'
sdct['2020-01-24-00']='西城区3例、朝阳区3例、海淀区3例、丰台区2例、石景山区1例、通州区2例、顺义区1例、大兴区2例、昌平区2例,外地来京人员7例'
sdct['2020-01-24-17']='西城区4例、朝阳区5例、海淀区4例、丰台区2例、石景山区1例、通州区2例、顺义区1例、大兴区2例、昌平区3例,外地来京人员10例'
sdct['2020-01-24-20']='西城区4例、朝阳区5例、海淀区6例、丰台区2例、石景山区1例、通州区2例、顺义区1例、大兴区2例、昌平区3例,外地来京人员10例'
sdct['2020-01-25-17']='西城区4例、朝阳区6例、海淀区8例、丰台区2例、石景山区1例、通州区3例、顺义区1例、大兴区2例、昌平区3例,外地来京人员11例'
sdct['2020-01-25-23']='东城区1例、西城区5例、朝阳区8例、海淀区10例、丰台区3例、石景山区1例、通州区4例、顺义区1例、大兴区2例、昌平区5例,外地来京人员11例'
sdct['2020-01-26-08']='东城区1例、西城区5例、朝阳区8例、海淀区13例、丰台区3例、石景山区1例、通州区4例、顺义区1例、大兴区2例、昌平区5例,外地来京人员11例'
sdct['2020-01-26-21']='东城区1例、西城区5例、朝阳区11例、海淀区16例、丰台区4例、石景山区2例、通州区6例、顺义区1例、大兴区3例、昌平区7例,外地来京人员12例'
sdct['2020-01-27-09']='东城区1例、西城区7例、朝阳区11例、海淀区17例、丰台区4例、石景山区2例、通州区6例、顺义区1例、大兴区5例、昌平区7例,外地来京人员11例'
sdct['2020-01-27-20']='东城区2例、西城区7例、朝阳区17例、海淀区17例、丰台区4例、石景山区2例、通州区7例、顺义区1例、大兴区5例、昌平区7例,外地来京人员11例'
sdct['2020-01-28-12']='东城区2例、西城区8例、朝阳区17例、海淀区21例、丰台区7例、石景山区2例、通州区7例、顺义区2例、大兴区7例、昌平区7例,外地来京人员11例'
sdct['2020-01-29-12']='东城区2例、西城区9例、朝阳区19例、海淀区23例、丰台区8例、石景山区2例、门头沟1例、通州区7例、顺义区2例、大兴区10例、昌平区8例,外地来京人员11例'
sdct['2020-01-29-20']='东城区2例、西城区12例、朝阳区21例、海淀区24例、丰台区10例、石景山区2例、门头沟区1例、通州区8例、顺义区2例、大兴区10例、昌平区8例,外地来京人员11例'
sdct['2020-01-30-08']='东城区2例、西城区12例、朝阳区21例、海淀区24例、丰台区11例、石景山区2例、门头沟区1例、通州区8例、顺义区2例、大兴区10例、昌平区9例、怀柔区1例,外地来京人员11例'
sdct['2020-01-30-20']='东城区2例、西城区12例、朝阳区21例、海淀区24例、丰台区11例、石景山区2例、门头沟区1例、通州区10例、顺义区2例、大兴区15例、昌平区9例、怀柔区1例,外地来京人员11例'
sdct['2020-01-31-00']='东城区3例、西城区13例、朝阳区22例、海淀区26例、丰台区11例、石景山区2例、门头沟区1例、通州区12例、顺义区2例、大兴区16例、昌平区12例、怀柔区1例,外地来京人员11例'
sdct['2020-01-31-14']='东城区3例、西城区16例、朝阳区24例、海淀区27例、丰台区11例、石景山区2例、门头沟区1例、通州区12例、顺义区2例、大兴区17例、昌平区12例、怀柔区1例,外地来京人员11例'
sdct['2020-02-01-00']='东城区3例、西城区17例、朝阳区27例、海淀区35例、丰台区12例、石景山区3例、门头沟区1例、通州区13例、顺义区2例、大兴区19例、昌平区12例、怀柔区1例,外地来京人员11例'
sdct['2020-02-01-12']='东城区3例、西城区17例、朝阳区28例、海淀区39例、丰台区16例、石景山区3例、门头沟区1例、房山区2例、通州区13例、顺义区2例、昌平区12例、大兴区20例、怀柔区1例、外地来京人员11例'
sdct['2020-02-02-00']='东城区3例、西城区17例、朝阳区35例、海淀区41例、丰台区16例、石景山区4例、门头沟区1例、房山区2例、通州区13例、顺义区6例、昌平区12例、大兴区21例、怀柔区1例、外地来京人员11例'
sdct['2020-02-03-00']='东城区2例、西城区26例、朝阳区31例、海淀区42例、丰台区17例、石景山区5例、门头沟区3例、房山区4例、通州区13例、顺义区5例、昌平区13例、大兴区28例、怀柔区3例、延庆区1例,外地来京人员19例'
sdct['2020-02-04-00']='东城区2例、西城区28例、朝阳区36例、海淀区45例、丰台区18例、石景山区6例、门头沟区3例、房山区5例、通州区13例、顺义区5例、昌平区14例、大兴区29例、怀柔区3例、延庆区1例,外地来京人员20例'
sdct['2020-02-05-00']='东城区6例、西城区29例、朝阳区43例、海淀区45例、丰台区20例、石景山区7例、门头沟区3例、房山区11例、通州区13例、顺义区6例、昌平区15例、大兴区29例、怀柔区4例、延庆区1例,外地来京人员21例'
sdct['2020-02-06-00']=''
city=[]
latest='2020-02-05-00'
for i in sdct[latest].replace(',','、').split('、'):
city.append(i[:2])
print('city of %s: '%province,city,'\n')
print('number of infected cities now: ',len(city))
Table={}
for k,v in zip(sdct.keys(),sdct.values()):
if len(v)<5:
continue
s=['0']*len(city)
v=v.replace(',','、').replace('武汉','外地')
for i in v.split('、'):
tmp=''
for t in i:
if t.isdigit():
tmp+=t
#tmp=int(tmp)
s[city.index(i[:2])]=tmp
Table[k]=s
print(s)
### Output
print(province,',',','.join(city))
for date,out in zip(Table.keys(),Table.values()):
print(date,',',','.join(out))
|
[
"ljcone@qq.com"
] |
ljcone@qq.com
|
8bfb6e8d486ecbfa00ac77fffd5fed9b085007f4
|
af7dbe519166b969d8af2a56e0ad2231aae80b44
|
/generate/lib/generate_hash.py
|
d8f5af74f50e99ab55caae92a9a674d18ee697bd
|
[] |
no_license
|
tevix/browser-extensions
|
3e73d8fd5b39e0fd22e65ddb7d8cf59d726d34e1
|
dd09f301096ef95ed8caaa6a939f3e12aaadec34
|
refs/heads/master
| 2021-01-22T03:49:01.688265
| 2019-12-07T17:12:10
| 2019-12-07T17:12:10
| 81,462,065
| 0
| 0
| null | 2017-02-09T15:05:20
| 2017-02-09T15:05:20
| null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
import os
import hashlib
import json
os.chdir(os.path.dirname(os.path.abspath(__file__)))
hashes = {}
for root, dirs, files in os.walk('.'):
for file in files:
path = os.path.join(root, file)
with open(path, 'rb') as cur_file:
hash = hashlib.md5(cur_file.read()).hexdigest()
hashes[path.replace('\\', '/')[2:]] = hash
with open('hash.json', 'w') as hash_file:
json.dump(hashes, hash_file, indent=2)
|
[
"james.colin.brady@gmail.com"
] |
james.colin.brady@gmail.com
|
3fc9a5f3265731caf8a21fd55c7b32f7899d4b42
|
035a3eebe2091897b942796781a192f67680b336
|
/objects/queries/dboraashwevchoice.py
|
3f919be71e6bb812c4a2453c10240c2196c7f0dc
|
[] |
no_license
|
gduvalsc/kairos
|
763eb8fa5daeefc5fd46ea066a47400d5be7b7f5
|
2bf863ba3f73fc16ef75842ad390eb55fb1906f1
|
refs/heads/master
| 2021-12-23T18:33:34.382989
| 2021-10-19T16:44:00
| 2021-10-19T16:44:00
| 77,072,517
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
null=None
true=True
false=False
class UserObject(dict):
def __init__(self):
object = {
"type": "query",
"id": "DBORAASHWEVCHOICE",
"collections": ["ORAHAS"],
"request": "select distinct event as label from ORAHAS where session_state = 'WAITING' order by label"
}
super(UserObject, self).__init__(**object)
|
[
"gduvalsc@gmail.com"
] |
gduvalsc@gmail.com
|
b9913ec6af02ea14d2cc7fb4552be713477a0d1d
|
5f55f05a2b115407e0703d1848c6f4681a16546d
|
/make_test_data.py
|
a2f390840aabdeda6cb958ed575cdbde1ebcd96c
|
[] |
no_license
|
yanjj199609017239230/D-python-gy-api-1908A
|
d20f359dbe39a0f213da273ce762c19ee912aa0f
|
0d188710438cd51f5c0afea5818fba115ed0b8af
|
refs/heads/master
| 2020-07-30T06:03:36.655044
| 2019-09-22T08:53:49
| 2019-09-22T08:53:49
| 210,112,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__title__ = ''
#__author__ = 'xuepl'
#__mtime__ = '2019/9/11'
import json
import os
import yaml
from config.conf import FILE_PATH
POST = 1
GET = 0
mode_name = "charge"
test_case = "扣款异常流1金额为空"
method = POST
url = "/acc/charge" #接口地址
data = None
params = None
status_code = 200
headers = {}
expect = "2000"
json_data = '''{
"accountName": "stdg2623",
"changeMoney": 25
}''' #注意数据格式为字典或者为json串
if(isinstance(json_data,str)):
json_data = json.loads(json_data)
d = [{
"test_case":test_case,
"method":method,
"url":url,
"data":data,
"params":params,
"json":json_data,
"status_code":status_code,
"expect":expect,
"headers":headers
}]
with open(os.path.join(FILE_PATH,"test_{}.yaml".format(mode_name)),'a',encoding='utf-8') as f:
yaml.safe_dump(d,f,encoding='utf-8',default_flow_style=False,allow_unicode=True)
f.write("\n")
|
[
"1726550139@qq.com"
] |
1726550139@qq.com
|
214e442be29616883451dca6b73800ab366555e6
|
8bccd1376213a9fe56ab7bd69815a309339e8ceb
|
/quickdrop/quickdrop.py
|
be95d836b7decbb1458b091af81941e2e584a249
|
[] |
no_license
|
zevaverbach/quickdrop
|
0d2906fb3deb2fdb24b0f932347238676fab33b1
|
7017c1518f831515f331c715dcd5f5d24a1877f9
|
refs/heads/master
| 2020-04-23T12:44:27.704581
| 2019-02-17T22:20:15
| 2019-02-17T22:20:15
| 171,179,181
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
import os
from pathlib import Path
import sys
import click
import dropbox
import pyperclip
DROPBOX_ACCESS_TOKEN = os.getenv('DROPBOX_ACCESS_TOKEN')
DROPBOX_ROOT_PATH = os.getenv('DROPBOX_ROOT_PATH')
LB = '\n'
@click.command()
@click.argument('filepath', type=click.Path(exists=True))
def cli(filepath):
check_for_env_vars()
dropbox_relative_path = get_relative_path(filepath)
url = share_file(dropbox_relative_path)
copy_to_clipboard(url)
print(f'Okay, {filepath} is now shared, accessible via {LB}{url}.')
print('This url was also copied to your clipboard for your convenience.')
def share_file(filepath):
try:
shared_link = get_client().sharing_create_shared_link(filepath)
except dropbox.exceptions.ApiError as e:
raise click.ClickException('There was a problem with the path.')
else:
return shared_link.url
def get_relative_path(filepath):
DROPBOX_ROOT = Path(DROPBOX_ROOT_PATH).expanduser()
if '/' not in filepath:
filepath = f'/{filepath}'
elif not filepath.startswith('/') and not filepath.startswith('~'):
*path_parts, filename = filepath.split('/')
relevant_path_parts = []
for path_part in path_parts:
if path_part not in DROPBOX_ROOT_PATH:
relevant_path_parts.append(path_part)
filepath = os.path.join(*relevant_path_parts, f'/{filename}')
filepath_expanded_user = Path(filepath).expanduser()
path = Path(str(filepath_expanded_user).replace(str(DROPBOX_ROOT), ''))
return str(path)
def check_for_valid_access_token():
if not DROPBOX_ACCESS_TOKEN:
raise click.ClickException(
'Please get an access token here and store it in an environment '
'variable called "DROPBOX_ACCESS_TOKEN": '
' https://www.dropbox.com/developers/apps')
try:
dbx = get_client()
dbx.users_get_current_account()
except dropbox.exceptions.AuthError as e:
raise click.ClickException(str(e))
def check_for_env_vars():
check_for_valid_access_token()
check_for_dropbox_root_path()
def check_for_dropbox_root_path():
if not DROPBOX_ROOT_PATH:
raise click.ClickException(
'Please create an environment variable called "DROPBOX_ROOT_PATH" '
'with the path to your computer\'s root Dropbox folder.')
if not Path(DROPBOX_ROOT_PATH).exists:
raise click.ClickException(f'{DROPBOX_ROOT_PATH} doesn\'t exist!')
def get_client():
return dropbox.Dropbox(DROPBOX_ACCESS_TOKEN)
def copy_to_clipboard(url):
pyperclip.copy(url)
|
[
"zev@averba.ch"
] |
zev@averba.ch
|
08436a0b3e5a7e257274990561662c86d8e96311
|
53d6fc1222eaba9f2c4d9883ab2093612ba6fa87
|
/dicta.py
|
cc20977fbc23d9c8bcc83112b8e0c75312b7ab5b
|
[] |
no_license
|
vinay-iyengar/Bootcamp-Projects
|
0c1fcd266669f117f0710d8229c866d9649d1a43
|
5a414903c117947b0f47568ea0e4d448658c448b
|
refs/heads/master
| 2021-10-12T00:56:27.342506
| 2019-01-31T09:24:09
| 2019-01-31T09:24:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
d={1:"Speckbit", 2:"World", 3:"Quiet"}
for key,val in d.items():
print(d)
print(key, "=>", val)
|
[
"vinay2397@gmail.com"
] |
vinay2397@gmail.com
|
7276c06a92637c166751509d796fa9da93aaa076
|
78d035d98059909fa8546b65040432880d629e22
|
/gunicorn.conf.py
|
3702d784b774884ff291c145142b734a94780675
|
[] |
no_license
|
traffic-signal-control/TSCC-flask
|
1c1e49043afd35e81ca5535ff1455f588fd91b4b
|
afcc4e5105a9966f1840e0605ce16315b50e80cf
|
refs/heads/master
| 2020-05-04T07:55:48.889744
| 2019-04-17T09:59:14
| 2019-04-17T09:59:14
| 179,037,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
__author__ = 'Wingslet'
debug = True
workers = 5 # 定义同时开启的处理请求的进程数量,根据网站流量适当调整
worker_class = "gevent" # 采用gevent库,支持异步处理请求,提高吞吐量
bind = "0.0.0.0:8000" # 监听IP放宽,以便于Docker之间、Docker和宿主机之间的通信
|
[
"wingsweihua@gmail.com"
] |
wingsweihua@gmail.com
|
b9e393ac52c6010d7045b8feb8aa0c3c7d0a91bd
|
e97ed793c12124203338ba26976e25059903daa8
|
/statements/mkpdf_helper.py
|
b0558d5d1e14c544a95e2f5bbc822beb3123e9da
|
[
"MIT"
] |
permissive
|
alex65536/contest-template
|
26af256b5ed4b82b0101a0227726c03fdd31bde7
|
0a36269f792340a9a73b727159fd9ad220d96025
|
refs/heads/master
| 2021-06-21T06:18:01.482886
| 2021-01-05T01:08:03
| 2021-01-05T01:08:03
| 136,298,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
#!/usr/bin/env python3
import sys
import json
from os import path
if len(sys.argv) < 2:
sys.stderr.write("Usage: {} PROBLEM\n".format(sys.argv[0]))
sys.exit(1)
problem = sys.argv[1]
obj = None
tl = "??? секунд"
ml = "??? мегабайт"
infile = "???.in"
outfile = "???.out"
try:
obj = json.loads(open(path.join("..", "problems",
problem, "problem.json"), 'r').read())
except FileNotFoundError:
pass
if obj:
tl_sec = obj["problem"]["timeLimit"]
tl_sec_int = round(tl_sec)
if abs(tl_sec_int - tl_sec) < 1e-12:
word = "секунд"
if 10 <= tl_sec_int % 100 <= 19:
word = "секунд"
elif tl_sec_int % 10 == 1:
word = "секунда"
elif tl_sec_int % 10 in {2, 3, 4}:
word = "секунды"
tl = format("{} {}".format(tl_sec_int, word))
else:
tl = format("{:.3g} секунды".format(tl_sec))
ml_mb = int(obj["problem"]["memoryLimit"])
word = "мегабайта"
if 10 <= ml_mb % 100 <= 19 or ml_mb % 10 in {0, 5, 6, 7, 8, 9}:
word = "мегабайт"
elif ml_mb % 10 == 1:
word = "мегабайт"
ml = str(ml_mb) + ' ' + word
infile = obj["problem"]["input"]
if not infile:
infile = "стандартный ввод"
outfile = obj["problem"]["output"]
if not outfile:
outfile = "стандартный вывод"
print("\\def\\ProblemTimeLimit{{{}}}".format(tl))
print("\\def\\ProblemMemoryLimit{{{}}}".format(ml))
print("\\def\\ProblemInputFile{{{}}}".format(infile))
print("\\def\\ProblemOutputFile{{{}}}".format(outfile))
print("\\input{{{}.tex}}".format(problem))
print()
|
[
"sh200105@mail.ru"
] |
sh200105@mail.ru
|
981f3b685443c1e8fabdc340684e1a4a52e41de2
|
e15fb687990589783066669784912ea8ac5bacaf
|
/genome_designer/test_data/full_vcf_test_set/generate_full_vcf_test_set.py
|
9dac81496c35a6bb2eaa6bc20477bb1f155f8606
|
[
"MIT"
] |
permissive
|
RubensZimbres/millstone
|
74d32105fa54104d0597b6789fb2871cb4fbd854
|
898936072a716a799462c113286056690a7723d1
|
refs/heads/master
| 2020-03-16T18:57:55.174716
| 2018-03-07T16:40:14
| 2018-03-07T16:40:14
| 132,894,394
| 1
| 2
| null | 2018-05-10T12:01:34
| 2018-05-10T12:01:33
| null |
UTF-8
|
Python
| false
| false
| 5,259
|
py
|
"""
Script for generating the test set.
This document describes how this test test was generated.
1) Select a region of the MG1655 genome to excise.
"""
import copy
import random
from Bio import SeqIO
import vcf
import simNGS_util
# Portion of MG1655 Genbank of size ~5.5 kB
EXCISED_GENBANK = 'mg1655_tolC_through_zupT.gb'
TEMPLATE_VCF = 'template.vcf'
VCF_TEMPLATE_READER = vcf.Reader(TEMPLATE_VCF)
SAMPLE_FASTA_ROOT = 'sample'
DESIGNED_SNP_VCF = 'designed_snps.vcf'
# If we do a SNP every 100 bases, that's 50 SNPs.
# We'll then do 20 designed SNPs and 20 SNPs per sample so we should get
# fairly interesting overlaps.
TOTAL_SNPS = 50
NUM_IN_CDS = 45
NUM_OTHER = TOTAL_SNPS - NUM_IN_CDS
# We'll create this many genomes.
NUM_SAMPLES = 6
def is_position_in_coding_feature(position, cds_features):
"""Checks whether the given position lies inside of a coding feature
in the given genome record.
"""
for feature in cds_features:
if (feature.location.start <= position and
position < feature.location.end):
return True
return False
BASE_OPTIONS = ['A', 'T', 'G', 'C']
def choose_alt(ref):
"""Returns a random base that is not ref.
"""
alt = ref
while alt == ref:
alt = random.choice(BASE_OPTIONS)
return alt
def get_subset_of_snps(all_snps, subset_size):
all_snp_positions = all_snps.keys()
subset = {}
while len(subset) < subset_size:
pos = random.choice(all_snp_positions)
if pos in subset:
continue
subset[pos] = all_snps[pos]
return subset
def create_vcf_for_subset(subset, out_path):
with open(out_path, 'w') as designed_fh:
writer = vcf.Writer(designed_fh, VCF_TEMPLATE_READER,
lineterminator='\n')
for pos, value_dict in subset.iteritems():
writer.write_record(vcf.model._Record(
'Chromosome', # CHROM
pos, # POS
None, # ID
value_dict['ref'], # REF
value_dict['alt'], # ALT
None, # QUAL
None, # FILTER
None, # INFO
None, # FORMAT
None, # sample_indexes
samples=None))
def main():
seq_record = SeqIO.read(EXCISED_GENBANK, 'genbank')
cds_features = [f for f in seq_record.features if f.type == 'CDS']
# Generate all possible SNPs to sample from. Store them in a dictionary
# keyed by position so we can easily deal with lookups and avoiding
# duplicates as needed below.
all_snps = {}
len_seq_record = len(seq_record)
# Select random positions for SNPs, respecting the distribution
# set above by the NUM_IN_CDS vs TOTAL_SNPS constants.
# NOTE: These SNP positions are pythonic. We have to update them when
# writing them out in vcf format below.
num_in_cds = 0
num_other = 0
while num_in_cds < NUM_IN_CDS or num_other < NUM_OTHER:
position = random.randint(0, len_seq_record - 1)
if position in all_snps:
continue
in_cds_feature = is_position_in_coding_feature(position, cds_features)
do_add_position = False
if in_cds_feature and num_in_cds < NUM_IN_CDS:
do_add_position = True
num_in_cds += 1
elif not in_cds_feature and num_other < NUM_OTHER:
do_add_position = True
num_other += 1
if do_add_position:
ref = seq_record.seq[position]
alt = choose_alt(ref)
all_snps[position] = {
'ref': ref,
'alt': [alt]
}
assert len(all_snps) == TOTAL_SNPS, "Didn't get all the SNPs we expected."
# Now select a subset of these SNPS to serve as designed.
designed_snps = get_subset_of_snps(all_snps, 20)
create_vcf_for_subset(designed_snps, DESIGNED_SNP_VCF)
# Now create the samples.
for sample_num in range(NUM_SAMPLES):
sample_name = SAMPLE_FASTA_ROOT + str(sample_num)
sample_record = copy.deepcopy(seq_record)
sample_record.id = sample_name
# Grab a subset of SNPs.
sample_snps = get_subset_of_snps(all_snps, 20)
# Introduce the mutations.
for position, value_dict in sample_snps.iteritems():
sample_record.seq = (
sample_record.seq[:position] +
value_dict['alt'][0] +
sample_record.seq[position + 1:])
assert len(sample_record) == len(seq_record), (
"For now we are only doing mutations.")
# Write out the sample fasta.
sample_output = sample_name + '.fa'
with open(sample_output, 'w') as out_fh:
SeqIO.write(sample_record, out_fh, 'fasta')
# Generate fake reads using simNGS.
simLibrary_fasta = sample_name + '.simLibrary.fa'
print sample_output, simLibrary_fasta
simNGS_util.run_simLibrary(sample_output, simLibrary_fasta)
# Generate reads using simNGS.
output_fq = sample_name + '.simLibrary.fq'
simNGS_util.run_paired_simNGS(simLibrary_fasta, output_fq)
if __name__ == '__main__':
main()
|
[
"gleb.kuznetsov@gmail.com"
] |
gleb.kuznetsov@gmail.com
|
fa09d7b28df8eaa91203691c54a1efca37c983c9
|
71804d207ca012e5398117128f5a65eb50c69699
|
/project4/task1.py
|
5a06e063eb7f7589bb88346d783ecffbe41c877e
|
[] |
no_license
|
rupadevan94/web-information-management
|
134cc1ba2119ebeed60b75754a8ca7d964395ace
|
963aafa48747ae35925a7a9cfc00bd3d73969f40
|
refs/heads/master
| 2020-06-28T06:34:14.633542
| 2019-08-02T04:37:36
| 2019-08-02T04:37:36
| 200,165,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
#!/usr/bin/env python3
import sys
import os
import numpy
import numpy.linalg
import scipy.misc
def getOutputPngName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.png'
def getOutputNpyName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.npy'
if len(sys.argv) < 3:
sys.exit('usage: task1.py <PNG inputFile> <rank>')
inputfile = sys.argv[1]
rank = int(sys.argv[2])
outputpng = getOutputPngName(inputfile, rank)
outputnpy = getOutputNpyName(inputfile, rank)
#
# TODO: The current code just prints out what it is supposed to to
# Replace the print statement wth your code
#
print("This program should read %s file, perform rank %d approximation, and save the results in %s and %s files." % (inputfile, rank, outputpng, outputnpy))
|
[
"rupadevan94@gmail.com"
] |
rupadevan94@gmail.com
|
657337bf90a24e453740657f6c0d434ef21313c9
|
cf62f7a7f9e13205fe83957fb7bfcf1b097bf481
|
/src/index.py
|
a2ae504efaedb021f53a79f53ead655fd59982c9
|
[
"Apache-2.0"
] |
permissive
|
biothings/mygene.info
|
09bf19f481c066789a4ad02a0d2880f31dae28f6
|
fe1bbdd81bc29b412ca4288d3af38e47c0602ab7
|
refs/heads/master
| 2023-08-22T21:34:43.540840
| 2023-08-08T23:25:15
| 2023-08-08T23:25:18
| 54,933,630
| 89
| 20
|
NOASSERTION
| 2023-07-18T23:53:49
| 2016-03-29T00:36:49
|
Python
|
UTF-8
|
Python
| false
| false
| 757
|
py
|
"""
Mygene Web Server Entry Point
Examples:
>>> python index.py
>>> python index.py --debug
>>> python index.py --port=8000
"""
import os.path
import config
from biothings.web.launcher import main
ADDON_HANDLERS = [
(r"/demo/?(.*)", "tornado.web.StaticFileHandler",
{"path": "docs/demo", "default_filename": "index.html"}),
]
if config.INCLUDE_DOCS:
if not os.path.exists(config.DOCS_STATIC_PATH):
raise IOError('Run "make html" to generate sphinx docs first.')
ADDON_HANDLERS += [
(r"/widget/(.*)", "tornado.web.RedirectHandler", {"url": "/static/widget/{0}"}),
(r"/?(.*)", "tornado.web.StaticFileHandler", {'path': config.DOCS_STATIC_PATH}),
]
if __name__ == '__main__':
main(ADDON_HANDLERS)
|
[
"xzhou@scripps.edu"
] |
xzhou@scripps.edu
|
2a456d30c85a8c4ec540c3dfdb5ecdb022605603
|
51d54eecaef308fa2b1bfe6b5b0f15c9921c0e02
|
/Fadi/svmRun/stopwordsfeature.py
|
9696ce1a1efdb5e7c7649c9dec1859f3141a0299
|
[] |
no_license
|
chaitanyamalaviya/11761-Project
|
8e9adb0c4ef8a908023a25274c2f4a032a428797
|
eda750e5296786919ad6882a7db8d4bc2366f05f
|
refs/heads/master
| 2021-05-01T00:31:25.292387
| 2016-12-03T18:07:07
| 2016-12-03T18:07:07
| 73,503,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,328
|
py
|
from __future__ import division
import pickle
import os
import nltk
import nltk.tokenize
from nltk.corpus import stopwords
import logging
import numpy as np
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
STOPWORDS = set(stopwords.words('english'))
def saveObj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def loadObj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def importArticles(corpusFileName):
articles = []
path = os.getcwd()
with open(path + '/' + corpusFileName, "r") as f:
lines = f.readlines()
article = []
for line in lines:
line = line.rstrip()
if line == "~~~~~":
if article:
articles.append(article)
article = []
else:
# Removes the start stop tags for the sentence
line = line[4:]
line = line[:-4]
line = line.rstrip()
article.append(line)
articles.append(article)
return articles
def getFakeGood(labelsFileName):
path = os.getcwd()
with open(path + '/' + labelsFileName, "r") as f:
lines = f.readlines()
labels = []
for line in lines:
line = line.rstrip()
labels.append(int(line))
return labels
def getNumberOfStopwords(article):
sumStop = 0
sumLength = 0
for sentence in article:
tokenizedSentence = nltk.word_tokenize(sentence.lower())
stopwords = len([i for i in tokenizedSentence if i in STOPWORDS])
length = len(tokenizedSentence)
sumStop += stopwords*length
sumLength += length
return float(sumStop)/sumLength
def getFeature(devFileName):
articles = importArticles(devFileName)
featureLength = len(articles)
featureArray = np.zeros([featureLength,1], dtype=float)
i = 0
for article in articles:
stopWords = getNumberOfStopwords(article)
featureArray[i] = stopWords
i += 1
return featureArray
def main():
articlesPickle = []
goodArticles = []
badArticles = []
articles = importArticles('trainingSet.dat')
labels = getFakeGood('trainingSetLabels.dat')
getFeature('trainingSet.dat')
i = 0
for label in labels:
if label == 1:
article = articles[i]
score = getNumberOfStopwords(article)
logging.debug("Average number of stopwords in good article: %s" % score)
goodArticles.append(score)
articlesPickle.append(score)
if label == 0:
article = articles[i]
score = getNumberOfStopwords(article)
logging.debug("Average number of stopwords in bad article: %s" % score)
badArticles.append(score)
articlesPickle.append(score)
i = i + 1
logging.debug("Average number of stopwords in good articles: %f" % (sum(goodArticles)/len(goodArticles)))
logging.debug("Average number of stopwords in bad articles: %f" % (sum(badArticles)/len(badArticles)))
saveObj(articlesPickle, 'feature_stopwords')
if __name__ == "__main__": main()
|
[
"fadibotros@Fadis-MBP.wv.cc.cmu.edu"
] |
fadibotros@Fadis-MBP.wv.cc.cmu.edu
|
db27264593236ca9c6e9701e619205b975ec714f
|
7db2ff6c164bda9b4179f6314592a85020fb372d
|
/ex31.py
|
be53dc0e4a34c596848ee99f65437a49a7bd3a1b
|
[] |
no_license
|
Takashiidobe/learnPythonTheHardWayZedShaw
|
5e124bf9004a0d7d00f6b2cfd2d5df1f231da662
|
cb73e9079c2b93221ed1857801ad6be6d0ee2615
|
refs/heads/master
| 2020-03-17T15:44:47.964103
| 2018-05-16T21:01:55
| 2018-05-16T21:01:55
| 133,722,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
print("""You enter a dark room with two doors.
Do you go through door #1 or door #2?""")
door = input(">")
if door == "1":
print("There's a giant bear here eating a cheesecake.")
print("What do you do?")
print("1. Take the cake.")
print("2. Scream at the bear.")
bear = input("> ")
if bear == "1":
print("The bear eats your face off. Good Job!")
elif bear == "2":
print("The bear eats your legs off. Good Job!")
else:
print(f"Well, doing {bear} is probably better.")
print("Bear runs away.")
elif door == "2":
print('You stare into the endless abyss at Cthulu\'s lair')
print("1. Blueberries.")
print("2. Yellow jacket clothespins.")
print("3. Understanding revolvers yelling melodies")
insanity = input("> ")
if insanity == "1" or insanity == "2":
print("Your body survives powered by a mind of jello.")
print("Good job!")
else:
print("The insanity rots your eyes into a pool of jello")
print("Good job!")
else:
print("You stumble around and fall ona knife and die. Good job!")
|
[
"idobetakashi@gmail.com"
] |
idobetakashi@gmail.com
|
d013fddbef0d5f30733064fb694bf1132b7eb341
|
78a3ba49a3aaea55431a41e72ff5297b069037fc
|
/neurokernel/LPU/InputProcessors/GaussianNoiseInputProcessor.py
|
f0c313149a6d3e50b379b499a7bf9c791a0f415b
|
[
"BSD-3-Clause"
] |
permissive
|
mkturkcan/neurodriver
|
edb9f0d79f8eabb0f3bd06c35e2277c0be70e255
|
dc5a10212e32ba1dee97af2cbc1b025917361b32
|
refs/heads/master
| 2021-04-27T00:27:41.903107
| 2018-03-04T19:08:36
| 2018-03-04T19:08:36
| 123,819,113
| 0
| 0
|
BSD-3-Clause
| 2018-03-04T19:00:03
| 2018-03-04T19:00:03
| null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
import numpy as np
from BaseInputProcessor import BaseInputProcessor
class GaussianNoiseInputProcessor(BaseInputProcessor):
def __init__(self, variable, uids, mean, std, start = -1, stop = -1):
super(GaussianNoiseInputProcessor, self).__init__([(variable,uids)],
mode=0)
self.mean = mean
self.std = std
self.start = start
self.stop = stop
self.var = variable
self.num = len(uids)
def update_input(self):
self.variables[self.var]['input'] = self.std*\
np.array(np.random.randn(self.num), dtype = self.dtypes[self.var]) + self.mean
def is_input_available(self):
if self.start>-1. and self.stop>self.start:
return (self.LPU_obj.time >= self.start and
self.LPU_obj.time < self.stop)
else:
return False
def post_run(self):
pass
|
[
"mkt2126@columbia.edu"
] |
mkt2126@columbia.edu
|
cb9d222ef240028c7c2bb5e92e4ffa33f9a97b42
|
1ef667feb6d4653dab8bd2d0474bbce37a568900
|
/perm/ops/user_perm.py
|
ad610982420a34e7be387fd8652d7079f1d1b884
|
[
"MIT"
] |
permissive
|
ni-ning/pauli
|
02f05859b0285914f26a50b9b68344bfa72e7666
|
5fdcba9c0aa3bb3f960546ee078f417a0f772a84
|
refs/heads/master
| 2020-07-30T13:27:00.562980
| 2019-09-10T09:48:08
| 2019-09-10T09:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,502
|
py
|
# coding:utf-8
import six
from ...auth.models import User
from ..models import UserPerm, RoleDesc
from . import perm_base
def get_or_create_user_perm(user_id):
user_perm = UserPerm.objects(user_id=user_id, soft_del=False).first()
if not user_perm:
user = User.objects(id=user_id, soft_del=False).first()
if not user:
return None
user_perm = UserPerm(user_id=user_id)
user_perm.save()
return user_perm
def add_perm(perm, user_id=None, role_id=None):
if isinstance(perm, six.string_types):
perm = perm_base.get_perm_desc_from_string(perm)
if not perm_base.is_valid_perm_desc(perm):
return False, "错误的权限描述"
if user_id:
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "目标用户不存在"
if any(map(lambda x: perm_base.equ(perm, x), user_perm.perms)):
return False, "重复的权限描述"
user_perm.perms.append(perm)
user_perm.save()
return True, user_perm
elif role_id:
role = RoleDesc.objects(id=role_id).first()
if not role:
return False, "目标角色不存在"
if any(map(lambda x: perm_base.equ(perm, x), role.perms)):
return False, "重复的权限描述"
role.perms.append(perm)
role.save()
return True, role
else:
return False, "用户或者角色id未提供"
def update_perms(perms, user_id=None, role_id=None):
parsed_perms = []
for perm in perms:
if isinstance(perm, six.string_types):
perm = perm_base.get_perm_desc_from_string(perm)
if not perm_base.is_valid_perm_desc(perm):
return False, "错误的权限描述 %s" % perm
parsed_perms.append(perm)
if user_id:
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "目标用户不存在"
user_perm.perms = parsed_perms
user_perm.save()
return True, user_perm
elif role_id:
role = RoleDesc.objects(id=role_id).first()
if not role:
return False, "目标角色不存在"
user_perm.perms = parsed_perms
role.save()
return True, role
else:
return False, "用户或者角色id未提供"
def remove_perm(perm, user_id=None, role_id=None):
if isinstance(perm, six.string_types):
perm = perm_base.get_perm_desc_from_string(perm)
if not perm_base.is_valid_perm_desc(perm):
return False, "错误的权限描述"
if user_id:
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "目标用户不存在"
user_perm.perms = list(filter(lambda x: not perm_base.equ(perm, x),
user_perm.perms))
user_perm.save()
return True, user_perm
elif role_id:
role = RoleDesc.objects(id=role_id, soft_del=False).first()
if not role:
return False, "目标角色不存在"
role.perms = list(filter(lambda x: not perm_base.equ(perm, x),
role.perms))
role.save()
return True, role
else:
raise Exception("Neither user_id and role_id is available.")
def get_user_perm_list(user_id, exclude_role=False):
'''
获取用户的权限列表.
user_id: str, 用户的id
exclude_role: boolean, 是否要排除掉用户的角色中的权限
'''
user_perm = UserPerm.objects(user_id=user_id, soft_del=False).first()
if not user_perm:
return []
else:
perms = []
perms.extend(user_perm.perms)
if (not exclude_role) and user_perm.roles:
roles = RoleDesc.objects(id__in=user_perm.roles,
soft_del=False)
for role in roles:
perms.extend(role.perms)
return perms
def has_perm(user_id, target_perm_desc,
is_upstream=False, is_owner=False, perm_list=None):
'''
检查用户是否拥有某个指定权限。
perm_list: list, optional, 如果提供此参数,则不每次调用时根据user_id获取
用户的权限列表。
'''
if isinstance(target_perm_desc, six.string_types):
target_perm_desc = perm_base\
.get_perm_desc_from_string(target_perm_desc)
perm_list = perm_list or get_user_perm_list(user_id)
if perm_base.is_perm_allowed(perm_list, target_perm_desc,
is_upstream=is_upstream, is_owner=is_owner):
return True
#for i in perm_list:
# if perm_base.is_perm_matched(i, target_perm_desc,
# is_upstream=is_upstream, is_owner=is_owner):
# return True
return False
def get_user_roles(user_id):
'''
Return the role objects.
'''
user_perm = UserPerm.objects(user_id=user_id, soft_del=False).first()
if user_perm and user_perm.roles:
roles = RoleDesc.objects(id__in=user_perm.roles,
soft_del=False)
return roles
return []
def refresh_user_info_roles(user_id, roles):
user = User.objects(id=user_id).first()
if user:
role_names = []
if roles:
role_names = [role_desc.name for role_desc in\
RoleDesc.objects(id__in=roles,
soft_del=False)]
user.info['role_names'] = role_names
user.save()
return True
else:
return False
def update_user_roles(user_id, role_ids):
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "用户不存在或已删除"
role_ids = list(set(role_ids))
role_count = RoleDesc.objects(id__in=role_ids,
soft_del=False).count()
if len(role_ids) != role_count:
return False, "存在无效的用户角色"
user_perm.roles = role_ids
user_perm.save()
refresh_user_info_roles(user_perm.user_id, user_perm.roles)
return True, user_perm
def add_role_to_user(role_id, user_id):
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "用户不存在或已删除"
role = RoleDesc.objects(id=role_id, soft_del=False).first()
if not role:
return False, "角色不存在或已经删除"
if not role_id in user_perm.roles:
user_perm.roles.append(role_id)
user_perm.save()
refresh_user_info_roles(user_perm.user_id, user_perm.roles)
return True, user_perm
def remove_role_from_user(role_id, user_id):
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "用户不存在或已删除"
user_perm.roles = list(filter(lambda x: x != role_id,
user_perm.roles))
user_perm.save()
refresh_user_info_roles(user_perm.user_id, user_perm.roles)
return True, user_perm
def get_all_roles(soft_del=False):
roles = RoleDesc.objects(soft_del=soft_del)
return list(roles)
def create_role(name=None):
if not name:
return False, "角色名未提供"
role_desc = RoleDesc.objects(name=name, soft_del=False).first()
if role_desc:
return False, "角色名已经存在"
role_desc = RoleDesc(name=name)
role_desc.save()
return get_role_info(role_desc)
def remove_role(role_id):
if not role_id:
return False, "角色id未提供"
role = RoleDesc.objects(id=role_id).first()
if not role:
return False, "角色不存在"
role.soft_del = True
role.save()
return get_role_info(role)
def update_role(role_id, name=None):
if not role_id:
return False, "角色id未提供"
if not name:
return False, "名字未提供"
duplicated_role = RoleDesc.objects(soft_del=False,
name=name,
id__ne=role_id).first()
if duplicated_role:
return False, "同名角色已经存在"
role = RoleDesc.objects(id=role_id, soft_del=False).first()
if not role:
return False, "角色不存在"
role.name = name
role.save()
return get_role_info(role)
def get_role_info(role=None, role_id=None, role_name=None):
if not role:
if role_id:
role = RoleDesc.objects(id=role_id).first()
elif role_name:
role = RoleDesc.objects(name=role_name, soft_del=False).first()
if not role:
return False, "角色不存在"
ret = {'id': str(role.id),
'name': str(role.name),
'perms': role.perms,
'granted_positions': role.granted_positions,
'created': str(role.created),
'soft_del': role.soft_del,
'lut': str(role.lut)}
return True, ret
def get_perm_triples(actions, user_id):
ret = {}
perm_list = get_user_perm_list(user_id)
for action in actions:
ret.setdefault(action, {})
ret[action]['*'] = has_perm(user_id, action, perm_list=perm_list)
ret[action]['+'] = has_perm(user_id, {'action': action, 'effect': 'allow', 'resource': '+'},
is_upstream=True, perm_list=perm_list)
ret[action]['-'] = has_perm(user_id, {'action': action, 'effect': 'allow', 'resource': '-'},
is_owner=True, perm_list=perm_list)
return True, ret
|
[
"socrateslee@users.noreply.github.com"
] |
socrateslee@users.noreply.github.com
|
8cdd0bd9d537ad94f769df4f3a1faf52e3fb8895
|
5760ff9bca037a2e85dde8ad4d583139ab8e128a
|
/migrations/versions/20150624090637_3606d4a47663_update_answercomment_model.py
|
c4dcdcc74edfefac69c1499b71d92697c7e86322
|
[] |
no_license
|
dianchang/dianchang
|
5b58cbfcf6dfcd9c2c9d55c0612a9327086b8b54
|
3414cd5af0a66facd6ec4eb787e7646d04d8c96c
|
refs/heads/master
| 2016-08-11T11:24:49.322330
| 2015-07-30T05:18:09
| 2015-07-30T05:18:09
| 36,111,229
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
"""Update AnswerComment model.
Revision ID: 3606d4a47663
Revises: 2040a458fc8a
Create Date: 2015-06-24 09:06:37.957787
"""
# revision identifiers, used by Alembic.
revision = '3606d4a47663'
down_revision = '2040a458fc8a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('answer_comment', sa.Column('likes_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('answer_comment', 'likes_count')
### end Alembic commands ###
|
[
"hustlzp@qq.com"
] |
hustlzp@qq.com
|
005aa7160b9dcb3eb53b5920602371a013ae5a0c
|
807d460fbb00db68c1eb1a1cb490ae74a7806df9
|
/mysite/topic/admin.py
|
b1213bb354395c4a8a166f3cda781cba67c5ab67
|
[] |
no_license
|
modanhan/cpsc471
|
67fa6a68581efae5ebefac96e0e523b63c8e1edf
|
d1eabcc418d1d2c8d71b408a48394edabdeb80d0
|
refs/heads/master
| 2020-03-09T00:18:35.152083
| 2018-04-18T03:15:11
| 2018-04-18T03:15:11
| 128,484,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Topic
from .models import ChallengeTopic, TopicRating
# Register your models here.
admin.site.register(Topic)
admin.site.register(ChallengeTopic)
admin.site.register(TopicRating)
|
[
"modanhan@live.com"
] |
modanhan@live.com
|
1f05d5c3403f2296215637c6ca97504a6c5fa394
|
0b6413fb9fda0bdb599dc79ec55259e461e18080
|
/env/Scripts/django-admin.py
|
beaf0d2211e0cf7732b3bf8c34f5abc7d5170a71
|
[] |
no_license
|
VictorTherache/P10-Softdesk-API
|
bfd9e7f430fa2dbab35c0f9a93f36b2dcf982225
|
57acb53b5bdd172185b2c70ff040d33e5235e32b
|
refs/heads/main
| 2023-09-01T22:25:41.136504
| 2021-10-29T13:06:34
| 2021-10-29T13:06:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
#!v:\code\medium_api_drf\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"codingvictor22@gmail.com"
] |
codingvictor22@gmail.com
|
899c5f0098afd90b2bbd71e177e514e42fe973d5
|
36d4c9a57b53f5e14acb512759b49fe44d9990d8
|
/hackerrank/30-days-of-code/day-8.py
|
d6527ddafbd6b3abc73b984d4cbb1c5fe239558e
|
[] |
no_license
|
yosef8234/test
|
4a280fa2b27563c055b54f2ed3dfbc7743dd9289
|
8bb58d12b2837c9f8c7b1877206a365ab9004758
|
refs/heads/master
| 2021-05-07T22:46:06.598921
| 2017-10-16T18:11:26
| 2017-10-16T18:11:26
| 107,286,907
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
# # -*- coding: utf-8 -*-
# Objective
# Today, we're learning about Key-Value pair mappings using a Map or Dictionary data structure. Check out the Tutorial tab for learning materials and an instructional video!
# Task
# Given NN names and phone numbers, assemble a phone book that maps friends' names to their respective phone numbers. You will then be given an unknown number of names to query your phone book for; for each namename queried, print the associated entry from your phone book (in the form name=phoneNumbername=phoneNumber) or Not foundNot found if there is no entry for namename.
# Note: Your phone book should be a Dictionary/Map/HashMap data structure.
# Input Format
# The first line contains an integer, NN, denoting the number of entries in the phone book.
# Each of the NN subsequent lines describes an entry in the form of 22 space-separated values on a single line. The first value is a friend's namename, and the second value is an 88-digit phone numberphone number.
# After the NN lines of phone book entries, there are an unknown number of lines of queries. Each line (query) contains a namename to look up, and you must continue reading lines until there is no more input.
# Note: Names consist of lowercase English letters and are first names only.
# Constraints
# 1≤N≤1051≤N≤105
# 1≤queries≤1051≤queries≤105
# Output Format
# On a new line for each query, print Not foundNot found if the name has no corresponding entry in the phone book; otherwise, print the full namename and phoneNumberphoneNumber in the format name=phoneNumbername=phoneNumber.
# Sample Input
# 3
# sam 99912222
# tom 11122222
# harry 12299933
# sam
# edward
# harry
# Sample Output
# sam=99912222
# Not found
# harry=12299933
# Explanation
# N=3N=3
# We add the NN subsequent (Key,Value) pairs to our map so it looks like this:
# phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}
# We then process each query and print Key=ValueKey=Value if the queried Key is found in the map, or Not foundNot found otherwise.
# Query 0: samsam
# Sam is one of the keys in our dictionary, so we print sam=99912222sam=99912222.
# Query 1: edwardedward
# Edward is not one of the keys in our dictionary, so we print Not foundNot found.
# Query 2: harryharry
# Harry is one of the keys in our dictionary, so we print harry=12299933harry=12299933.
n=int(input())
phonebook = dict(input().split() for _ in range(n))
for j in range(n):
name = input().strip()
if name in phonebook:
print(name + "=" + phonebook[name])
else:
print("Not found")
|
[
"ekoz@protonmail.com"
] |
ekoz@protonmail.com
|
b7af57cfe3b70002b84576ef64c5255279fa4d72
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/TankmanOperationDialogMeta.py
|
e1d2fcccb7f4552ec5aef843bb1b493e8473c8d1
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/TankmanOperationDialogMeta.py
from gui.Scaleform.daapi.view.dialogs.SimpleDialog import SimpleDialog
class TankmanOperationDialogMeta(SimpleDialog):
def as_setDataS(self, data):
return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
cf386bf9e4f886259a355ae0d1237f0389fbdb0b
|
19cb4e993c6d482e02ae7cf3fa521302483754bd
|
/setup.py
|
849ff91c4f2292e4d01ef2aaf5b0a848f2f59186
|
[
"MIT"
] |
permissive
|
brimcfadden/stormed-amqp
|
b98414cbc1be5ae2fa25651cbd7ca21e08fa33e8
|
59e81bfa4632366dc3f20b3dff25df3331480798
|
refs/heads/master
| 2020-12-24T11:06:28.460936
| 2011-07-15T15:25:21
| 2011-07-15T15:25:21
| 2,053,652
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
import distutils.core
try:
# to enable "python setup.py develop"
import setuptools
except ImportError:
pass
distutils.core.setup(
name="stormed-amqp",
version='0.1',
packages = ["stormed", "stormed.method", "stormed.method.codegen"],
author="Paolo Losi",
author_email="paolo.losi@gmail.com",
download_url="http://github.com/downloads/paolo-losi/stormed-amqp/stormed-amqp-0.1.tar.gz",
license="http://www.opensource.org/licenses/mit-license.html",
description="native tornadoweb amqp 0-9-1 client implementation",
)
|
[
"paolo.losi@gmail.com"
] |
paolo.losi@gmail.com
|
79673a33e5eeef00b9b046e1a7b02efaaf6695b1
|
929f5bbde3c215c86649cbd22f8b29a74fe3f3bf
|
/server/LabManager/calendar/routes.py
|
462b3f1c3f31c8c5769302f8efea9c687f95818b
|
[] |
no_license
|
Fayhen/Laborator.io
|
52f1a47f42a1bfa5cdfde2c55d25eacf20e76058
|
e486b2f152e0291a4132ad6fcd5b157c812f9798
|
refs/heads/master
| 2021-06-13T17:35:43.701277
| 2020-03-29T19:56:50
| 2020-03-29T19:56:50
| 179,155,182
| 0
| 0
| null | 2021-06-02T01:18:06
| 2019-04-02T20:40:07
|
Python
|
UTF-8
|
Python
| false
| false
| 81
|
py
|
from flask import Blueprint
calendar = Blueprint("calendar", __name__)
# WIP
|
[
"diego00alfa@gmail.com"
] |
diego00alfa@gmail.com
|
94a4a250cda1258c1bd6f317825a0d895ccc4900
|
55b0e3b5c59a4b929ca0b12dca6a7c88abc99b1c
|
/scripts/figures/angularMomentum_conservation.py
|
3fd84d5447cf74472ae41518839e1845cd9e9d47
|
[] |
no_license
|
andrewhalle/M31-dynamics
|
d007c387c59b87c7f76b5109011821e73b66317d
|
bf730eb66dc297ab7f35596e2b7b7fd6440a397f
|
refs/heads/master
| 2021-01-20T03:14:08.664161
| 2017-06-19T18:32:33
| 2017-06-19T18:32:33
| 60,396,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
# Generates figure which shows change #
# in total angular momentum of the #
# simulation versus time #
import rebound
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import sys
import os
sys.path.append("../include")
from universal_logs import *
def calculate_angular_momentum(sim):
com = sim.calculate_com()
j = np.array([0, 0, 0])
for p in sim.particles:
r = np.array([p.x - com.x, p.y - com.y, p.z - com.z])
v = np.array([p.vx - com.vx, p.vy - com.vy, p.vz - com.vz])
j = j + np.cross(r, v)
return np.linalg.norm(j)
sim_number = sys.argv[1].zfill(3)
initial = restore("../../logs/suite_u/" + sim_number + "/000000000.logu")
initial_mom = calculate_angular_momentum(initial)
sims = os.listdir("../../logs/suite_u/" + sim_number)
sims.sort()
sims.pop()
data = []
i = 1
while i < len(sims):
sim = restore("../../logs/suite_u/" + sim_number + "/" + sims[i])
data.append([i, (calculate_angular_momentum(sim) - initial_mom) / initial_mom])
i += 1
x = [a[0] for a in data]
y = [a[1] for a in data]
plt.plot(x, y, 'k')
plt.xlabel("Time")
plt.ylabel("Error")
plt.savefig("../../images/conservation/angular_momentum/" + sim_number + ".png")
|
[
"ahalle@berkeley.edu"
] |
ahalle@berkeley.edu
|
2fad265d11b5850de7947324b15cf3811b053d58
|
1b25efab9fd81f1c1b9cd484a13d530759809838
|
/backend/dating/api/v1/serializers.py
|
94acc95fb234b127aaf19304903f55ffff0256f5
|
[] |
no_license
|
crowdbotics-apps/test-31906
|
1728e7947b6cbd52dc123310647ec523914aa1aa
|
2f6841d3ac3e4d335712fd11b3ee81166eec2f47
|
refs/heads/master
| 2023-08-30T11:31:54.409975
| 2021-11-10T07:26:53
| 2021-11-10T07:26:53
| 426,524,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
from rest_framework import serializers
from dating.models import Setting, Like, UserPhoto, Match, Dislike, Inbox, Profile
class InboxSerializer(serializers.ModelSerializer):
class Meta:
model = Inbox
fields = "__all__"
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = "__all__"
class DislikeSerializer(serializers.ModelSerializer):
class Meta:
model = Dislike
fields = "__all__"
class UserPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = UserPhoto
fields = "__all__"
class SettingSerializer(serializers.ModelSerializer):
class Meta:
model = Setting
fields = "__all__"
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = "__all__"
class MatchSerializer(serializers.ModelSerializer):
class Meta:
model = Match
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
355c311d207458bb0ef7f666c6b733a12241be3a
|
2bc12c36eb39bc271641f391bb7067ef1bfddfc9
|
/2022/9/solution.py
|
0b862ff0c73affc2fd6b221dfc3e04128e6d7a07
|
[
"MIT"
] |
permissive
|
iangregson/advent-of-code
|
0243077b8415a1e95e9dcd413c76aea3b9466ce5
|
cd20404940ac5ed18b8ac61a836186a2f2580003
|
refs/heads/master
| 2023-01-05T18:59:10.931750
| 2023-01-03T11:53:46
| 2023-01-03T11:53:46
| 252,134,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,902
|
py
|
from pathlib import Path
file = Path(__file__).parent / 'input.txt'
# file = Path(__file__).parent / 'test_input.txt'
# file = Path(__file__).parent / 'test_input2.txt'
text = file.read_text().splitlines()
class Grid():
C = { 'R': (1,0), 'U': (0,-1), 'L': (-1,0), 'D': (0,1) }
D = { 'UR': (1,-1), 'UL': (-1,-1), 'DL': (-1,1), 'DR': (1,1) }
def __init__(self) -> None:
pass
@staticmethod
def neighbors_all(pos):
d = list(Grid.D.values()) + list(Grid.C.values())
return Grid.neighbors(pos, d)
@staticmethod
def neighbors_diagonal(pos):
return Grid.neighbors(pos, Grid.D.values())
@staticmethod
def neighbors_cardinal(pos):
return Grid.neighbors(pos, Grid.C.values())
@staticmethod
def neighbors(pos, directions):
x, y = pos
neighbors = set()
for (dx, dy) in directions:
neighbors.add((dx + x, dy + y))
return neighbors
@staticmethod
def plot(grid_size, knots, start_point=(0,0)):
top_left, bottom_right = grid_size
rows = []
for y in range(top_left[1], bottom_right[1]+1):
row = ""
for x in range(top_left[0], bottom_right[0]+1):
cell = '.'
if (x,y) == start_point:
cell = 's'
for knot in knots:
if (x,y) == knot.pos:
cell = f"{knot}"
row += cell
rows.append(row)
print("\n".join(rows))
class Rope():
def __init__(self, knots) -> None:
self.head = Knot(knots.pop(0))
ptr = self.head
while knots:
k = knots.pop(0)
ptr.next = Knot(k)
ptr = ptr.next
@property
def tail(self):
tail = self.head
while tail.next:
tail = tail.next
return tail
def __iter__(self):
self.__ptr = self.head
return self
def __next__(self):
n = None
if self.__ptr:
n = self.__ptr
self.__ptr = n.next
else:
raise StopIteration
return n
def __repr__(self) -> str:
return f"{list(self)}"
def __str__(self) -> str:
return f"{list(self)}"
class Knot():
def __init__(self, id, pos = (0,0)) -> None:
self.id = id
self.next = None
self.pos = None
self.visited = set()
self.move(pos)
def __str__(self) -> str:
return f"{self.id}"
def __repr__(self) -> str:
return f"{self.id}"
def move(self, pos):
self.pos = pos
self.visited.add(self.pos)
if not self.next:
return
if self.next.pos == self.pos:
return
if self.next.pos in Grid.neighbors_all(self.pos):
return
# Prefer a cardinal position
for pos in Grid.neighbors_all(self.next.pos):
if pos in Grid.neighbors_cardinal(self.pos):
self.next.move(pos)
break
# But fall back to a diagonal
if self.next.pos not in Grid.neighbors_all(self.pos):
for pos in Grid.neighbors_all(self.next.pos):
if pos in Grid.neighbors_all(self.pos):
self.next.move(pos)
break
class Sim():
def __init__(self, instructions, rope, grid_size=[(0,-4),(5,0)]) -> None:
self.instructions = instructions
self.rope = rope
self.grid_size = grid_size
def visualize_step(self):
knots = list(self.rope)
knots.reverse()
Grid.plot(self.grid_size, knots)
print()
def next(self, visualize=False):
instruction = self.instructions.pop(0)
direction, steps = instruction.split()
for _ in range(int(steps)):
x, y = self.rope.head.pos
dx, dy = Grid.C[direction]
self.rope.head.move((x + dx, y + dy))
if visualize:
self.visualize_step()
r = Rope(['H','T'])
simulator = Sim(text[:], r)
while simulator.instructions:
simulator.next()
print("Part 1:", len(simulator.rope.tail.visited))
r = Rope(['H',1,2,3,4,5,6,7,8,9])
simulator = Sim(text[:], r, grid_size=[(-11,-15),(14,5)])
while simulator.instructions:
simulator.next()
print("Part 2:", len(simulator.rope.tail.visited))
|
[
"ian.gregson@bigtincan.com"
] |
ian.gregson@bigtincan.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.