blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c55812681bffcd67f705310e9d3133f402e043f6
|
9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af
|
/services/web__morningstaronline_co_uk.py
|
ac5fc53d8d59be5e0045ca7297f649f07c83b74c
|
[] |
no_license
|
rudolphos/NewsGrabber
|
f9bddc9a9b3a9e02f716133fd746f48cee635b36
|
86354fb769b2710ac7cdd5bd8795e43158b70ad2
|
refs/heads/master
| 2021-01-12T12:07:55.335079
| 2016-10-09T22:39:17
| 2016-10-09T22:39:17
| 72,316,773
| 0
| 0
| null | 2016-10-30T00:35:08
| 2016-10-30T00:35:08
| null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
refresh = 5
version = 20160312.01
urls = ['https://www.morningstaronline.co.uk/britain',
'https://www.morningstaronline.co.uk/world',
'https://www.morningstaronline.co.uk/editorial',
'https://www.morningstaronline.co.uk/features',
'https://www.morningstaronline.co.uk/sport',
'https://www.morningstaronline.co.uk/arts']
regex = [r'^https?:\/\/[^\/]*morningstaronline\.co\.uk']
videoregex = []
liveregex = []
|
[
"Arkiver@hotmail.com"
] |
Arkiver@hotmail.com
|
1ddcd5557d75e12aacac8b96c81f84d3742dcb9c
|
7a0b5b4315f7059fab4272c54d8c31b0fe956dbd
|
/Attributes.py
|
3502604e17d2e7b22ba62b8464ccce4a26026ed2
|
[] |
no_license
|
welkerCode/MachineLearning-SVM
|
94400d2a540d38e14f95ab0d14985e234b88b4cc
|
3d3ef3e71e0186816903754bd676ad9c1535bb47
|
refs/heads/master
| 2020-03-11T16:51:51.124393
| 2018-04-23T23:08:20
| 2018-04-23T23:08:20
| 130,130,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# I learned to parse workbooks with https://www.sitepoint.com/using-python-parse-spreadsheet-data/
class Attributes:
def __init__(self):
self.values = []
def addValue(self, newValue):
if self.values.count(newValue) == 0:
self.values.append(newValue)
def getValues(self):
return self.values
|
[
"taylormaxwelker@gmail.com"
] |
taylormaxwelker@gmail.com
|
529e1d1dcbc6ea47584217f70df3e62ca984ad37
|
7f3802c03f27fe61d9cdf9140385bee158a7f436
|
/coma/interfaces/mrtrix3.py
|
64464b2ce1b8aab6dc5213308fd641e63bab9a83
|
[
"MIT"
] |
permissive
|
GIGA-Consciousness/structurefunction
|
0157e88f12577f7dc02b40e251cc40ee6f61d190
|
5c5583bb26d6092fa3b7a630192d8e79199f8df0
|
refs/heads/master
| 2023-07-11T20:01:58.844183
| 2015-01-05T09:06:48
| 2015-01-05T09:06:48
| 14,110,584
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,826
|
py
|
def inclusion_filtering_mrtrix3(track_file, roi_file, fa_file, md_file, roi_names=None, registration_image_file=None, registration_matrix_file=None, prefix=None, tdi_threshold=10):
import os
import os.path as op
import numpy as np
import glob
from coma.workflows.dmn import get_rois, save_heatmap
from coma.interfaces.dti import write_trackvis_scene
import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.mrtrix as mrtrix
import nipype.interfaces.diffusion_toolkit as dtk
from nipype.utils.filemanip import split_filename
import subprocess
import shutil
rois = get_rois(roi_file)
fa_out_matrix = op.abspath("%s_FA.csv" % prefix)
md_out_matrix = op.abspath("%s_MD.csv" % prefix)
invLen_invVol_out_matrix = op.abspath("%s_invLen_invVol.csv" % prefix)
subprocess.call(["tck2connectome", "-assignment_voxel_lookup",
"-zero_diagonal",
"-metric", "mean_scalar", "-image", fa_file,
track_file, roi_file, fa_out_matrix])
subprocess.call(["tck2connectome", "-assignment_voxel_lookup",
"-zero_diagonal",
"-metric", "mean_scalar", "-image", md_file,
track_file, roi_file, md_out_matrix])
subprocess.call(["tck2connectome", "-assignment_voxel_lookup",
"-zero_diagonal",
"-metric", "invlength_invnodevolume",
track_file, roi_file, invLen_invVol_out_matrix])
subprocess.call(["tcknodeextract", "-assignment_voxel_lookup",
track_file, roi_file, prefix + "_"])
fa_matrix_thr = np.zeros((len(rois), len(rois)))
md_matrix_thr = np.zeros((len(rois), len(rois)))
tdi_matrix = np.zeros((len(rois), len(rois)))
track_volume_matrix = np.zeros((len(rois), len(rois)))
out_files = []
track_files = []
for idx_i, roi_i in enumerate(rois):
for idx_j, roi_j in enumerate(rois):
if idx_j >= idx_i:
filtered_tracks = glob.glob(op.abspath(prefix + "_%s-%s.tck" % (roi_i, roi_j)))[0]
print(filtered_tracks)
if roi_names is None:
roi_i = str(int(roi_i))
roi_j = str(int(roi_j))
idpair = "%s_%s" % (roi_i, roi_j)
idpair = idpair.replace(".","-")
else:
roi_name_i = roi_names[idx_i]
roi_name_j = roi_names[idx_j]
idpair = "%s_%s" % (roi_name_i, roi_name_j)
tracks2tdi = pe.Node(interface=mrtrix.Tracks2Prob(), name='tdi_%s' % idpair)
tracks2tdi.inputs.template_file = fa_file
tracks2tdi.inputs.in_file = filtered_tracks
out_tdi_name = op.abspath("%s_TDI_%s.nii.gz" % (prefix, idpair))
tracks2tdi.inputs.out_filename = out_tdi_name
tracks2tdi.inputs.output_datatype = "Int16"
binarize_tdi = pe.Node(interface=fsl.ImageMaths(), name='binarize_tdi_%s' % idpair)
binarize_tdi.inputs.op_string = "-thr %d -bin" % tdi_threshold
out_tdi_vol_name = op.abspath("%s_TDI_bin_%d_%s.nii.gz" % (prefix, tdi_threshold, idpair))
binarize_tdi.inputs.out_file = out_tdi_vol_name
mask_fa = pe.Node(interface=fsl.MultiImageMaths(), name='mask_fa_%s' % idpair)
mask_fa.inputs.op_string = "-mul %s"
mask_fa.inputs.operand_files = [fa_file]
out_fa_name = op.abspath("%s_FA_%s.nii.gz" % (prefix, idpair))
mask_fa.inputs.out_file = out_fa_name
mask_md = mask_fa.clone(name='mask_md_%s' % idpair)
mask_md.inputs.operand_files = [md_file]
out_md_name = op.abspath("%s_MD_%s.nii.gz" % (prefix, idpair))
mask_md.inputs.out_file = out_md_name
mean_fa = pe.Node(interface=fsl.ImageStats(op_string = '-M'), name = 'mean_fa_%s' % idpair)
mean_md = pe.Node(interface=fsl.ImageStats(op_string = '-M'), name = 'mean_md_%s' % idpair)
mean_tdi = pe.Node(interface=fsl.ImageStats(op_string = '-l %d -M' % tdi_threshold), name = 'mean_tdi_%s' % idpair)
track_volume = pe.Node(interface=fsl.ImageStats(op_string = '-l %d -V' % tdi_threshold), name = 'track_volume_%s' % idpair)
tck2trk = mrtrix.MRTrix2TrackVis()
tck2trk.inputs.image_file = fa_file
tck2trk.inputs.in_file = filtered_tracks
trk_file = op.abspath("%s_%s.trk" % (prefix, idpair))
tck2trk.inputs.out_filename = trk_file
tck2trk.base_dir = op.abspath(".")
if registration_image_file is not None and registration_matrix_file is not None:
tck2trk.inputs.registration_image_file = registration_image_file
tck2trk.inputs.matrix_file = registration_matrix_file
workflow = pe.Workflow(name=idpair)
workflow.base_dir = op.abspath(idpair)
workflow.connect(
[(tracks2tdi, binarize_tdi, [("tract_image", "in_file")])])
workflow.connect(
[(binarize_tdi, mask_fa, [("out_file", "in_file")])])
workflow.connect(
[(binarize_tdi, mask_md, [("out_file", "in_file")])])
workflow.connect(
[(mask_fa, mean_fa, [("out_file", "in_file")])])
workflow.connect(
[(mask_md, mean_md, [("out_file", "in_file")])])
workflow.connect(
[(tracks2tdi, mean_tdi, [("tract_image", "in_file")])])
workflow.connect(
[(tracks2tdi, track_volume, [("tract_image", "in_file")])])
workflow.config['execution'] = {'remove_unnecessary_outputs': 'false',
'hash_method': 'timestamp'}
result = workflow.run()
tck2trk.run()
fa_masked = glob.glob(out_fa_name)[0]
md_masked = glob.glob(out_md_name)[0]
if roi_names is not None:
tracks = op.abspath(prefix + "_%s-%s.tck" % (roi_name_i, roi_name_j))
shutil.move(filtered_tracks, tracks)
else:
tracks = filtered_tracks
tdi = glob.glob(out_tdi_vol_name)[0]
nodes = result.nodes()
node_names = [s.name for s in nodes]
mean_fa_node = [nodes[idx] for idx, s in enumerate(node_names) if "mean_fa" in s][0]
mean_fa = mean_fa_node.result.outputs.out_stat
mean_md_node = [nodes[idx] for idx, s in enumerate(node_names) if "mean_md" in s][0]
mean_md = mean_md_node.result.outputs.out_stat
mean_tdi_node = [nodes[idx] for idx, s in enumerate(node_names) if "mean_tdi" in s][0]
mean_tdi = mean_tdi_node.result.outputs.out_stat
track_volume_node = [nodes[idx] for idx, s in enumerate(node_names) if "track_volume" in s][0]
track_volume = track_volume_node.result.outputs.out_stat[1] # First value is in voxels, 2nd is in volume
if track_volume == 0:
os.remove(fa_masked)
os.remove(md_masked)
os.remove(tdi)
else:
out_files.append(md_masked)
out_files.append(fa_masked)
out_files.append(tracks)
out_files.append(tdi)
if op.exists(trk_file):
out_files.append(trk_file)
track_files.append(trk_file)
assert(0 <= mean_fa < 1)
fa_matrix_thr[idx_i, idx_j] = mean_fa
md_matrix_thr[idx_i, idx_j] = mean_md
tdi_matrix[idx_i, idx_j] = mean_tdi
track_volume_matrix[idx_i, idx_j] = track_volume
fa_matrix = np.loadtxt(fa_out_matrix)
md_matrix = np.loadtxt(md_out_matrix)
fa_matrix = fa_matrix + fa_matrix.T
md_matrix = md_matrix + md_matrix.T
fa_matrix_thr = fa_matrix_thr + fa_matrix_thr.T
md_matrix_thr = md_matrix_thr + md_matrix_thr.T
tdi_matrix = tdi_matrix + tdi_matrix.T
invLen_invVol_matrix = np.loadtxt(invLen_invVol_out_matrix)
invLen_invVol_matrix = invLen_invVol_matrix + invLen_invVol_matrix.T
track_volume_matrix = track_volume_matrix + track_volume_matrix.T
if prefix is not None:
npz_data = op.abspath("%s_connectivity.npz" % prefix)
else:
_, prefix, _ = split_filename(track_file)
npz_data = op.abspath("%s_connectivity.npz" % prefix)
np.savez(npz_data, fa=fa_matrix, md=md_matrix, tdi=tdi_matrix, trkvol=track_volume_matrix,
fa_thr=fa_matrix_thr, md_thr=md_matrix_thr, invLen_invVol=invLen_invVol_matrix)
print("Saving heatmaps...")
fa_heatmap = save_heatmap(fa_matrix, roi_names, '%s_fa' % prefix)
fa_heatmap_thr = save_heatmap(fa_matrix_thr, roi_names, '%s_fa_thr' % prefix)
md_heatmap = save_heatmap(md_matrix, roi_names, '%s_md' % prefix)
md_heatmap_thr = save_heatmap(md_matrix_thr, roi_names, '%s_md_thr' % prefix)
tdi_heatmap = save_heatmap(tdi_matrix, roi_names, '%s_tdi' % prefix)
trk_vol_heatmap = save_heatmap(track_volume_matrix, roi_names, '%s_trk_vol' % prefix)
invLen_invVol_heatmap = save_heatmap(invLen_invVol_matrix, roi_names, '%s_invLen_invVol' % prefix)
summary_images = []
summary_images.append(fa_heatmap)
summary_images.append(fa_heatmap_thr)
summary_images.append(md_heatmap)
summary_images.append(md_heatmap_thr)
summary_images.append(tdi_heatmap)
summary_images.append(trk_vol_heatmap)
summary_images.append(invLen_invVol_heatmap)
out_merged_file = op.abspath('%s_MergedTracks.trk' % prefix)
skip = 80.
track_merge = pe.Node(interface=dtk.TrackMerge(), name='track_merge')
track_merge.inputs.track_files = track_files
track_merge.inputs.output_file = out_merged_file
track_merge.run()
track_names = []
for t in track_files:
_, name, _ = split_filename(t)
track_names.append(name)
out_scene = op.abspath("%s_MergedScene.scene" % prefix)
out_scene_file = write_trackvis_scene(out_merged_file, n_clusters=len(track_files), skip=skip, names=track_names, out_file=out_scene)
print("Merged track file written to %s" % out_merged_file)
print("Scene file written to %s" % out_scene_file)
out_files.append(out_merged_file)
out_files.append(out_scene_file)
return out_files, npz_data, summary_images
|
[
"erik.ziegler@ulg.ac.be"
] |
erik.ziegler@ulg.ac.be
|
57055c02b52144a20b3dc308a1a65e5d180c2744
|
5c70f2e44f915d42240d5283a6455417eb8182e4
|
/skills/views.py
|
47537b9e89c5675cc54981aa69a7e45513d50b0d
|
[] |
no_license
|
Lairion/MainAcadProg
|
3aafd788609a8aeae8598b97a576f18b37995751
|
62870f704938008571044120a8e07466ead7bff9
|
refs/heads/master
| 2021-04-06T20:36:07.727625
| 2018-04-09T14:13:23
| 2018-04-09T14:13:23
| 125,405,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from .models import Skill,Project
# Create your views here.
class SkillsViews(object):
"""docstring for SkillsViews"""
@staticmethod
def skills(request):
skills = Skill.objects.all()
print(request.user)
print(request.user.is_staff)
context = {
"title":"Skills",
"skills":skills
}
return render(request,"skills.html",context)
@staticmethod
def show_projects(request):
projects = Project.objects.all()
new_projects = []
for i in range(len(projects)):
new_projects.append({"instance":projects[i],"result":i % 2 == 0})
print(new_projects[0]['instance'].get_url())
context ={
"title":"Projects",
"projects": new_projects
}
return render(request, "projects.html",context)
@staticmethod
def get_project(request,id):
project = Project.objects.get(id=int(id))
context ={
"title":"Project",
"project": project
}
return render(request, "project.html",context)
@staticmethod
def form_sessions(request):
context = {
'title':'examp'
}
return render(request,"sessions_example.html",context)
@staticmethod
def add_sessions(request):
print(request.session['some_text'])
leadboard = request.session.get("leadboard",[])
leadboard += [{'name':request.GET.get("name",""),"score":0}]
request.session.update({"leadboard":leadboard})
return redirect("skills:form")
[{'name':'peter',"score":0}] + [{'name':'peter',"score":0}] == [{'name':'peter',"score":0},{'name':'peter',"score":0}]
|
[
"nunyes_m_a@ukr.net"
] |
nunyes_m_a@ukr.net
|
a44cdf5bac620404b03de57b61aafde1143c1c6c
|
c017800ec1ef8425ca74b6bcd9f33bbbb0c45181
|
/client.py
|
76ae12a2d0f874645ac58d608c5573e77fe88720
|
[] |
no_license
|
Ak47001/client-server
|
2de4d21029d2149e5e7aa27f9df0c82e144ef0b0
|
bc2705bed7c38588e3c4d2abe29b3ae226cdd404
|
refs/heads/master
| 2020-08-05T02:16:50.212121
| 2019-10-08T04:42:02
| 2019-10-08T04:42:02
| 212,359,856
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import socket
import sys
#read file
filename = 'filename.txt'
# client program using python
s = socket.socket()
ip = ''
port = 8777
s.connect((ip,port))
print("Successfully connected to ",ip)
with open(filename,'r') as fd:
str = fd.read(1024)
s.sendall(bytes(filename,'utf-8'))
print("Last Modified time",s.recv(1024))
while(str):
s.send(bytes(str,'utf-8'))
str=fd.read(1024)
fd.close()
s.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
427c47514b38a992a79fd0148c3b909718f39386
|
82aaf5317c6c5009548da0b0e440c5dfb38d1fb8
|
/SConstruct
|
c5641fe6243c20cfb9ebfcaeab1717dc1f2e98a4
|
[] |
no_license
|
listentowindy/to_silicon_valley
|
62835f1cf256ccf65c845482117e4f5350e06624
|
876a6db9c5cbc2c6c613d468abd5807908364093
|
refs/heads/master
| 2021-01-10T08:45:49.744093
| 2015-12-19T07:59:55
| 2015-12-19T07:59:55
| 47,193,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
env = Environment(tools=['default', 'packaging'])
env.Append(CCFLAGS = Split('-g -ggdb -Wall -Wextra -DBTDEBUG'))
#env.Append(CCFLAGS = Split('-g -O2 -Wall -Wextra -DNDEBUG'))
#set include path
comroot='../../../com/'
cpppath=[
'./',
comroot+'thirdparty/gflags-2.0/include',
comroot+'thirdparty/glog-0.3.3/include',
comroot+'thirdparty/gtest-1.6.0/include',
];
env.Append(CPPPATH = cpppath)
#set libpath
libpath=[
'./',
comroot+'thirdparty/gflags-2.0/lib/',
comroot+'thirdparty/glog-0.3.3/lib/',
comroot+'thirdparty/gtest-1.6.0/lib/',
comroot+'thirdparty/boost_1.52.0/lib/',
];
env.Append(LIBPATH = libpath)
#set libs
libs=['gflags', 'gtest', 'glog']
env.Append(LIBS = libs)
Export("env")
import os
import shutil
all_path = [
];
main_path = [
#'sort',
'6',
'8',
];
def FindTestSource(paths):
objs = []
for path in paths:
for file in os.listdir(path):
if file[-2:] != ".c" and file[-3:] != ".cc" and file[-4:] != ".cpp":
continue
if file.startswith("test_"):
file = '%s/%s' % (path, file)
objs.append(file)
return objs
def FindSource(paths):
objs = []
for path in paths:
for file in os.listdir(path):
if file[-2:] != ".c" and file[-3:] != ".cc" and file[-4:] != ".cpp":
continue
if not file.startswith("test_"):
file = '%s/%s' % (path, file)
objs.append(file)
return objs
for bin_source in FindSource(main_path):
bin_name, suffix = bin_source.split('.')
if suffix == 'c' or suffix == 'cc' or suffix == 'cpp':
env.Program(target = bin_name, source = [bin_source] + FindSource(all_path))
for bin_source in FindTestSource(all_path):
bin_name = bin_source.split('.')[0]
env.Program(target = bin_name, source = [bin_source] + FindSource(all_path))
|
[
"hongchunxiao.360.cn"
] |
hongchunxiao.360.cn
|
|
ab771edf0437105d900a63d8b5655d39a750a2ff
|
121c583ac01aa6ad3d5e417fe764236ecb98deb7
|
/src/Modeling and evaluation/NN-multi-targets/drawModel.py
|
79bcecb094ccdc523253f1d3c8e27316ca9735b9
|
[] |
no_license
|
thangdnsf/ML_DVFS
|
e00b82294d35c314209bc56458e7980abc200884
|
908810969588cbcf7c784c0f3ad829eb15bbe0b4
|
refs/heads/master
| 2023-06-07T03:35:52.696587
| 2021-06-25T18:03:13
| 2021-06-25T18:03:13
| 380,318,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 16:46:31 2021
@author: ndthang
"""
import pandas as pd
import numpy as np
import sys
import os
import time
import random
from model import DVFSModel
from datagenerator import HPC_DVFS, labels,labelsZ, make_weights_for_balanced_classes
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from tqdm import tqdm
import matplotlib.pyplot as plt
import util
from datetime import datetime
import json
from warnings import filterwarnings
filterwarnings("ignore")
# hyperparmeter
kfold = 0
seed = 42
warmup_epo = 5
init_lr = 1e-3
batch_size = 100
valid_batch_size = 100
n_epochs = 300#1000
num_batch = 400
warmup_factor = 10
num_workers = 4
use_amp = True
early_stop = 100
device = torch.device('cuda')
model_dir = 'logs7/'
class GradualWarmupSchedulerV2(GradualWarmupScheduler):
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
super(GradualWarmupSchedulerV2, self).__init__(optimizer, multiplier, total_epoch, after_scheduler)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True # for faster training, but not deterministic
seed_everything(seed)
#load dataset for training
df = pd.read_csv('../../data4train/train.csv', na_filter= False, index_col=0)
ignore_columns = ['typedata','expe','UID','target','targetZ','kfold', #'guest', 'guest_nice', 'irq',
#'steal','nice','emulation_faults','irxp', 'irxb',
#'itxp', 'itxb', 'core0','core1','iowait','softirq','txp',
]
feature_list = list(set(df.columns) - set(ignore_columns))
train_df = df[df.kfold != kfold]
train_df = train_df.sample(frac=1)
val_df = df[df.kfold == kfold]
#standarzation data training
sc = StandardScaler()
train_loader_= HPC_DVFS(df=train_df,mode='training',augmentation = True, feature_list=feature_list,sc=sc)
sc_train = train_loader_.StandardScaler()
val_loader_ = HPC_DVFS(df=val_df,mode='valid',feature_list= feature_list,sc=sc_train)
weights = make_weights_for_balanced_classes(train_df.target.values, len(labels))
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_loader = torch.utils.data.DataLoader(train_loader_, batch_size=batch_size,sampler = sampler, num_workers=num_workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(val_loader_,batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)
model = DVFSModel(n_cont=len(feature_list), out_sz=len(labels),out_szz=len(labelsZ), szs=[1024,512,256, 128, 64], drops=[0.001,0.01,0.05, 0.1,0.2])
model = model.to(device)
x,y = next(iter(train_loader))
x = x.to(device)
yhat = model(x)
from torchviz import make_dot
from torchsummary import summary
make_dot(yhat, params=dict(list(model.named_parameters()))).render("rnn_torchviz", format="png")
summary(model, [190])
|
[
"thangdn.tlu@gmail.com"
] |
thangdn.tlu@gmail.com
|
1eac67a4a0e78a788d99dc2ae97cc6d4071fe1e9
|
4986c32aa387a0231b4c9de9b32ddd5f62b41931
|
/app/extend.py
|
23cfce9697c46bb1db7366147c027f717059185b
|
[] |
no_license
|
Mrdorom/Pluto
|
08e8e03d0d0a3e265182b13bbe26acaffcf82e0d
|
cb0f6be168e4c182c06573b812fdc29c2dcf3076
|
refs/heads/master
| 2023-04-01T17:52:39.249643
| 2021-04-09T13:08:30
| 2021-04-09T13:08:30
| 349,041,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
"""
-------------------------------------------------
File Name: extend
Description : 本文件主要使用一些扩展
Author : shili
date: 2021/3/11
-------------------------------------------------
Change Activity: 2021/3/11:
-------------------------------------------------
"""
__author__ = 'shili'
from flask_mongoengine import MongoEngine
from flask_jwt_extended import JWTManager
from flask_marshmallow import Marshmallow
mongo = MongoEngine()
jwt = JWTManager()
ma = Marshmallow()
|
[
"shili@yizhoucp.cn"
] |
shili@yizhoucp.cn
|
9a83f9b0df96736170c0b233641592d12a80cb24
|
4fcf20223f971875c24c1be921ed86681063070a
|
/taco/migrations/0002_auto_20210227_1527.py
|
6af811ad7aada9f01bea2363f3fe02024d6254d0
|
[] |
no_license
|
victorhausen/taco-api
|
52ee266256d5e128f00b6ac2d045fe51ee1c7aa5
|
0596ba16cb454fef0cb4b56bea5b086b48e1ab98
|
refs/heads/main
| 2023-03-18T11:24:02.600897
| 2021-02-27T15:28:51
| 2021-02-27T15:28:51
| 342,873,953
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,546
|
py
|
# Generated by Django 3.1.7 on 2021-02-27 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taco', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Taco',
fields=[
('id', models.TextField(blank=True, db_column='id', primary_key=True, serialize=False)),
('nomedoalimento', models.TextField(blank=True, db_column='NomedoAlimento', null=True)),
('categoria', models.TextField(blank=True, db_column='Categoria', null=True)),
('umidade_field', models.TextField(blank=True, db_column='Umidade(%)', null=True)),
('energia_kcal_field', models.TextField(blank=True, db_column='Energia(kcal)', null=True)),
('energia_kj_field', models.TextField(blank=True, db_column='Energia(kJ)', null=True)),
('proteina_g_field', models.TextField(blank=True, db_column='Proteina(g)', null=True)),
('lipideos_g_field', models.TextField(blank=True, db_column='Lipideos(g)', null=True)),
('colesterol_mg_field', models.TextField(blank=True, db_column='Colesterol(mg)', null=True)),
('carboidrato_g_field', models.TextField(blank=True, db_column='Carboidrato(g)', null=True)),
('fibraalimentar_g_field', models.TextField(blank=True, db_column='FibraAlimentar(g)', null=True)),
('cinzas_g_field', models.TextField(blank=True, db_column='Cinzas(g)', null=True)),
('calcio_mg_field', models.TextField(blank=True, db_column='Calcio(mg)', null=True)),
('magnesio_mg_field', models.TextField(blank=True, db_column='Magnesio(mg)', null=True)),
('manganes_mg_field', models.TextField(blank=True, db_column='Manganes(mg)', null=True)),
('fosforo_mg_field', models.TextField(blank=True, db_column='Fosforo(mg)', null=True)),
('ferro_mg_field', models.TextField(blank=True, db_column='Ferro(mg)', null=True)),
('sodio_mg_field', models.TextField(blank=True, db_column='Sodio(mg)', null=True)),
('potassio_mg_field', models.TextField(blank=True, db_column='Potassio(mg)', null=True)),
('cobre_mg_field', models.TextField(blank=True, db_column='Cobre(mg)', null=True)),
('zinco_mg_field', models.TextField(blank=True, db_column='Zinco(mg)', null=True)),
('retinol_mg_field', models.TextField(blank=True, db_column='Retinol(mg)', null=True)),
('re_mcg_field', models.TextField(blank=True, db_column='RE(mcg)', null=True)),
('rae_mcg_field', models.TextField(blank=True, db_column='RAE(mcg)', null=True)),
('tiamina_mg_field', models.TextField(blank=True, db_column='Tiamina(mg)', null=True)),
('riboflavina_mg_field', models.TextField(blank=True, db_column='Riboflavina(mg)', null=True)),
('piridoxina_mg_field', models.TextField(blank=True, db_column='Piridoxina(mg)', null=True)),
('niacina_mg_field', models.TextField(blank=True, db_column='Niacina(mg)', null=True)),
('vitaminac_mg_field', models.TextField(blank=True, db_column='VitaminaC(mg)', null=True)),
],
options={
'db_table': 'Taco_4a_edicao_2011',
'managed': False,
},
),
migrations.DeleteModel(
name='Taco4AEdicao2011',
),
]
|
[
"hausen.victor@gmail.com"
] |
hausen.victor@gmail.com
|
0b35dcbf119952569f6b72bfb2a263970be81c51
|
92b2914d39142f241b9003b3dd0c36e64ae3dce0
|
/Learning/Courses/Applied Machine Learning in Python/Week2/Classifier+Visualization.py
|
caeb3a55011971016dfa1b33714ef79c22b4e575
|
[] |
no_license
|
akanumur/Data_Science_Everyday
|
1dd492cb9e48fd225ed8516ee79dd668f5b636b0
|
442784fe2c968723cd54d584ca3dfec626238960
|
refs/heads/master
| 2022-12-12T07:53:25.639936
| 2020-08-21T23:19:54
| 2020-08-21T23:19:54
| 268,927,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
#
# ---
# # Classifier Visualization Playground
#
# The purpose of this notebook is to let you visualize various classsifiers' decision boundaries.
#
# The data used in this notebook is based on the [UCI Mushroom Data Set](http://archive.ics.uci.edu/ml/datasets/Mushroom?ref=datanews.io) stored in `mushrooms.csv`.
#
# In order to better vizualize the decision boundaries, we'll perform Principal Component Analysis (PCA) on the data to reduce the dimensionality to 2 dimensions. Dimensionality reduction will be covered in a later module of this course.
#
# Play around with different models and parameters to see how they affect the classifier's decision boundary and accuracy!
# In[2]:
get_ipython().magic('matplotlib notebook')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
df = pd.read_csv('readonly/mushrooms.csv')
print(df)
# In[4]:
df2 = pd.get_dummies(df)
print(df2.head)
# In[5]:
df3 = df2.sample(frac=0.08)
X = df3.iloc[:,2:]
y = df3.iloc[:,1]
pca = PCA(n_components=2).fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(pca, y, random_state=0)
plt.figure(dpi=120)
plt.scatter(pca[y.values==0,0], pca[y.values==0,1], alpha=0.5, label='Edible', s=2)
plt.scatter(pca[y.values==1,0], pca[y.values==1,1], alpha=0.5, label='Poisonous', s=2)
plt.legend()
plt.title('Mushroom Data Set\nFirst Two Principal Components')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.gca().set_aspect('equal')
# In[6]:
def plot_mushroom_boundary(X, y, fitted_model):
plt.figure(figsize=(9.8,5), dpi=100)
for i, plot_type in enumerate(['Decision Boundary', 'Decision Probabilities']):
plt.subplot(1,2,i+1)
mesh_step_size = 0.01 # step size in the mesh
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_step_size), np.arange(y_min, y_max, mesh_step_size))
if i == 0:
Z = fitted_model.predict(np.c_[xx.ravel(), yy.ravel()])
else:
try:
Z = fitted_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
except:
plt.text(0.4, 0.5, 'Probabilities Unavailable', horizontalalignment='center',
verticalalignment='center', transform = plt.gca().transAxes, fontsize=12)
plt.axis('off')
break
Z = Z.reshape(xx.shape)
plt.scatter(X[y.values==0,0], X[y.values==0,1], alpha=0.4, label='Edible', s=5)
plt.scatter(X[y.values==1,0], X[y.values==1,1], alpha=0.4, label='Posionous', s=5)
plt.imshow(Z, interpolation='nearest', cmap='RdYlBu_r', alpha=0.15,
extent=(x_min, x_max, y_min, y_max), origin='lower')
plt.title(plot_type + '\n' +
str(fitted_model).split('(')[0]+ ' Test Accuracy: ' + str(np.round(fitted_model.score(X, y), 5)))
plt.gca().set_aspect('equal');
plt.tight_layout()
plt.subplots_adjust(top=0.9, bottom=0.08, wspace=0.02)
# In[7]:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[8]:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[9]:
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=3)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[10]:
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[11]:
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[ ]:
from sklearn.svm import SVC
model = SVC(kernel='linear')
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[12]:
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=1)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[13]:
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=10)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[14]:
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[15]:
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[ ]:
|
[
"akanumur@uncc.edu"
] |
akanumur@uncc.edu
|
fb5e14362c54bc9ed160c239f7c153c7f418275d
|
8d5fac378cb1f7c826996e442375c7ee8cb842d5
|
/ExpressSuiteTools/ExpressSuiteCore.py
|
260f425a82a9434266342c857f1a9fc2b60b8c4d
|
[] |
no_license
|
ichar/Express-Suite-DMS
|
6f4cf7064b774894995b2224a3ca1a13ac4aa64a
|
bdf3ad7c1ec4bcdec08000bf4ac5315ca6a0ad19
|
refs/heads/master
| 2021-01-11T10:59:15.101637
| 2018-02-16T02:09:12
| 2018-02-16T02:09:12
| 72,807,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,181
|
py
|
"""
ExpressSuiteCore and PortalGenerator classes
$Id: ExpressSuiteCore.py, v 1.0 2007/08/30 12:00:00 Exp $
*** Checked 09/06/2009 ***
"""
__version__ = '$Revision: 1.0 $'[11:-2]
import Zope2
import sys, os
from copy import copy
from locale import setlocale, getlocale, LC_ALL
from string import join
from urllib import splittype, splitport
from urlparse import urlparse
from types import StringType, UnicodeType
from Globals import HTMLFile, DTMLFile, package_home, get_request
from AccessControl import ClassSecurityInfo
from Acquisition import aq_get
from ZPublisher import Publish
from ZPublisher.HTTPRequest import default_port
from ZPublisher.BeforeTraverse import NameCaller, registerBeforeTraverse, queryBeforeTraverse
from Products.CMFCore import permissions as CMFCorePermissions
from Products.CMFCore.FSDTMLMethod import FSDTMLMethod
from Products.CMFCore.FSImage import FSImage
from Products.CMFCore.PortalObject import PortalObjectBase
from Products.CMFCore.DirectoryView import addDirectoryViews, createDirectoryView
from Products.CMFCore.utils import getToolByName, _checkPermission, _getAuthenticatedUser
from Products.CMFDefault import DiscussionItem, SkinnedFolder
from Products.CMFDefault import cmfdefault_globals
from Products.CMFDefault.DublinCore import DefaultDublinCoreImpl
try: from Products.AppTracker.AppTracker import AppTracker
except ImportError: AppTracker = None
from logging import getLogger
logger = getLogger( 'ExpressSuiteCore' )
import Config
if Config.IsSQLCatalog:
import ZSQLCatalogTool as CatalogTool
from Products.ZMySQLDA.DA import Connection as SQLConnection
else:
import CatalogTool
import ActionsTool
import BackupFSRoot
import CommentsTool
import DTMLDocument, DefaultCategories, DepartmentDictionary
import ErrorLogTool, Exceptions, FSFile, FSFolder, Features, GuardedTable
import HTMLDocument, HTMLCard
import Mail, MailFolder, MemberDataTool, MetadataTool
import PropertiesTool, Registry, SearchProfile, ServicesTool, Shortcut
import TaskItem, TypesTool
import UserFolder
# these may need to be upgraded
#from MigrationTool import MigrationTool
from Config import Roles
from Heading import Heading, factory_type_information as Heading_factory_type_information
from ManageCMFContent import ManageCMFContent
from SimpleObjects import ContainerBase
from Utils import InitializeClass, getLanguageInfo, makepath, joinpath, pathdelim, formatComments, \
GetSessionValue, SetSessionValue, ExpireSessionValue
import CustomDefinitions
from CustomObjects import CustomDefs, ObjectHasCustomCategory, ObjectShouldBeCleanedBeforePaste, \
CustomCheckPermission, CustomCookedTableTranslit, getJSCleanerAttrs
factory_type_information = ( \
DTMLDocument.factory_type_information
+ FSFile.factory_type_information
+ FSFolder.factory_type_information
+ GuardedTable.factory_type_information
+ Heading_factory_type_information
+ HTMLDocument.factory_type_information
+ HTMLCard.factory_type_information
+ MailFolder.factory_type_information
+ Registry.factory_type_information
+ SearchProfile.factory_type_information
+ Shortcut.factory_type_information
+ TaskItem.factory_type_information
)
DiscussionItem_fti = copy( DiscussionItem.factory_type_information )
DiscussionItem_fti[0]['disallow_manual'] = 1
SkinnedFolder_fti = copy( SkinnedFolder.factory_type_information )
SkinnedFolder_fti[0]['disallow_manual'] = 1
cmf_factory_type_information = DiscussionItem_fti + SkinnedFolder_fti
class ExpressSuiteCore( ContainerBase, PortalObjectBase, DefaultDublinCoreImpl ):
"""
Functions of this class help in the setup of a new ExpressSuiteCore
"""
_class_version = 1.01
meta_type = 'ExpressSuiteCore'
__implements__ = ( Features.isPortalRoot,
Features.isPrincipiaFolderish,
PortalObjectBase.__implements__,
DefaultDublinCoreImpl.__implements__,
)
isPrincipiaFolderish = 1
security = ClassSecurityInfo()
manage_options = PortalObjectBase.manage_options + \
ContainerBase.manage_options
_properties = (
{'id':'title', 'type':'string', 'mode':'w'},
{'id':'description', 'type':'text', 'mode':'w'},
{'id':'server_url', 'type':'string', 'mode':'w'},
{'id':'stemmer', 'type':'string', 'mode':'w'},
{'id':'product_version', 'type':'string', 'mode':'w'},
)
# overriden by Implicit in ItemBase
__of__ = PortalObjectBase.__of__
# overriden by ObjectManager in ContainerBase
_checkId = PortalObjectBase._checkId
_verifyObjectPaste = PortalObjectBase._verifyObjectPaste
# default attribute values
title = ''
description = ''
server_url = None
product_version = None
service_unavailable = DTMLFile( 'dtml/service_unavailable', globals() )
def __init__( self, id, title='' ):
"""
Initializes class instance
"""
ContainerBase.__init__( self )
PortalObjectBase.__init__( self, id, title )
DefaultDublinCoreImpl.__init__( self )
def _initstate( self, mode ):
"""
Initializes instance attributes
"""
if not ContainerBase._initstate( self, mode ):
return 0
# install our before_traverse hook
if not queryBeforeTraverse( self, __name__ ):
registerBeforeTraverse( self, NameCaller('_beforeTraverseHook'), __name__ )
if not mode:
return 1
if getattr( self, 'server_url', None ) is None:
REQUEST = get_request()
self._setPropValue( 'server_url', REQUEST and REQUEST.physicalPathToURL('') or '' )
self._upgrade( 'portal_actions', ActionsTool.ActionsTool )
self._upgrade( 'portal_catalog', CatalogTool.CatalogTool )
self._upgrade( 'portal_memberdata', MemberDataTool.MemberDataTool )
self._upgrade( 'portal_metadata', MetadataTool.MetadataTool )
self._upgrade( 'portal_properties', PropertiesTool.PropertiesTool )
self._upgrade( 'portal_types', TypesTool.TypesTool )
for view in self.portal_skins.objectValues():
if getattr( view, '_isDirectoryView', None ):
view._dirpath = view._dirpath.replace( '\\', pathdelim )
if not hasattr( self, 'portal_errorlog' ):
tool = ErrorLogTool.ErrorLogTool()
self._setObject( tool.getId(), tool )
if not hasattr( self, 'portal_comments' ):
tool = CommentsTool.CommentsTool()
self._setObject( tool.getId(), tool )
if not hasattr( self, 'portal_services' ):
tool = ServicesTool.ServicesTool()
self._setObject( tool.getId(), tool )
gen = PortalGenerator()
gen.setupMail( self )
return 1
def _afterValidateHook( self, user, published=None, REQUEST=None ):
"""
Prepares global enviroment after the user is authenticated
"""
self.setContentCharset( REQUEST )
self.fixFormLanguage( REQUEST )
if isinstance( published, FSImage ):
REQUEST.RESPONSE.setHeader( 'Cache-Control', 'public, max-age=7200, must-revalidate' )
elif isinstance( published, FSDTMLMethod ):
REQUEST.RESPONSE.setHeader('Expires', 'Tue, 22 Jan 1980 01:01:01 GMT')
def _beforeTraverseHook( self, container, REQUEST, *args ):
"""
Prepares global enviroment before any object inside is accessed
"""
try:
self.fixProxiedRequest( REQUEST )
self.setPortalLocale()
self.setContentCharset( REQUEST )
except:
pass
try: mpath = list( Config.MaintainanceMode.get( self._p_oid ) or [] )
except: mpath = None
if not mpath:
return
stack = REQUEST['TraversalRequestNameStack']
mpath.reverse()
if stack and ( stack[-1] in ['portal_errorlog', 'scripts.js', 'styles.css'] or \
stack[0] == 'manage' or stack[0].startswith('manage_') ):
return
if stack[ -len(mpath): ] != mpath:
REQUEST['TraversalRequestNameStack'] = ['maintainance']
def _containment_onAdd( self, item, container ):
"""
Is called after our parent *item* is added to the *container*
"""
# Not calling base class's methods from here avoids reinitialization
# of all the content objects after product version change.
# Setup is carried by generator anyway.
# need to realize same as Scheduler schema to provide non-conflict database backup
# if more than one ExpressSuiteCore in ZODB is presented.
loop_app = self.getPhysicalRoot()
if not hasattr( loop_app, 'ExpressSuiteBackup' ):
try:
b = BackupFSRoot.BackupFSRoot()
loop_app._setObject( b.id, b )
except:
pass
def _containment_onDelete( self, item, container ):
"""
Is called before our parent *item* is deleted from its *container*
"""
root = self.getPhysicalRoot()
backupFSRoot = getattr(root, 'ExpressSuiteBackup', None)
if backupFSRoot is not None:
backupFSRoot.unregistryAppBackup( joinpath( item.getPhysicalPath() ) )
PortalObjectBase.manage_beforeDelete( self, item, container )
def _instance_onCreate( self ):
self.product_version = Config.ProductVersion
security.declareProtected( CMFCorePermissions.View, 'maintainance' )
def maintainance( self, REQUEST=None ):
"""
Maintainance mode
"""
if _checkPermission( CMFCorePermissions.ManagePortal, self ):
mpath = Config.MaintainanceMode.get( self._p_oid )
return self.redirect( action='/'.join(mpath) )
return self.service_unavailable( self, REQUEST )
#
# ==========================================================================================================
#
def view( self, REQUEST=None ):
""" Invokes the default view of the content storage """
REQUEST = REQUEST or self.REQUEST
return self.storage(REQUEST)
security.declarePrivate( 'fixProxiedRequest' )
def fixProxiedRequest( self, REQUEST ):
""" Fixes environment if request was processed by frontend server """
# mod_proxy: X-Forwarded-Server
# mod_accel: X-Host, X-Real-IP, X-URI, X-Method
server = REQUEST.get('SERVER_URL')
real_host = REQUEST.get('HTTP_X_FORWARDED_SERVER') or REQUEST.get('HTTP_X_HOST')
real_addr = REQUEST.get('HTTP_X_REAL_IP')
real_uri = REQUEST.get('HTTP_X_URI')
# change SERVER_URL to frontend server's address and protocol
if server and real_host:
proto = REQUEST.get('HTTP_X_METHOD') or splittype( server )[0]
host, port = splitport( real_host )
REQUEST.setServerURL( proto, host, port or default_port.get( proto ) )
# set REMOTE_ADDR to the real client's address
if real_addr:
REQUEST.environ['REMOTE_ADDR'] = real_addr
# modify SCRIPT_NAME for proxied requests like
# http://frontend/prefix/portal -> http://backend/portal
if real_uri:
# TODO: handle different portal name on frontend
pos = real_uri.find( REQUEST['PATH_INFO'] )
if pos > 0:
REQUEST._script = real_uri[ 1:pos ].split('/')
security.declarePrivate( 'setPortalLocale' )
def setPortalLocale( self ):
""" Changes system locale according to the portal language """
info = getLanguageInfo( self )
# find default and effective locale settings
def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )
cur_locale = getlocale()
cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''
# check whether locale is already ok
if def_locale is None or cur_locale.lower() == def_locale.lower():
return
# change effective locale
try:
setlocale( LC_ALL, def_locale )
except Exceptions.LocaleError:
pass
security.declarePublic( 'setContentCharset' )
def setContentCharset( self, REQUEST=None ):
""" Sets response charset according to the user's selected language """
REQUEST = REQUEST or aq_get( self, 'REQUEST', None )
if REQUEST is None:
return
lang = REQUEST.cookies.get( 'LOCALIZER_LANGUAGE' )
info = getLanguageInfo( lang, None )
if lang is None or info is None:
membership = getToolByName( self, 'portal_membership', None )
if membership is not None:
lang = membership.getLanguage( preferred=1, REQUEST=REQUEST )
info = getLanguageInfo( lang )
REQUEST.set( 'LOCALIZER_LANGUAGE', lang )
if not membership.isAnonymousUser():
path = joinpath( '', REQUEST._script, self.absolute_url( relative=1 ) )
REQUEST.RESPONSE.setCookie( 'LOCALIZER_LANGUAGE', lang, path=path )
charset = info['http_charset']
REQUEST.set( 'LOCALIZER_CHARSET', charset )
REQUEST.set( 'management_page_charset', charset )
REQUEST.RESPONSE.setHeader( 'content-type', 'text/html; charset=%s' % charset )
security.declarePublic( 'fixFormLanguage' )
def fixFormLanguage( self, REQUEST ):
"""
Replaces HTML-encoded entities with their corresponding
characters in the POST form data
"""
if REQUEST is None:
return
lang = REQUEST.get( 'LOCALIZER_LANGUAGE' )
map = Config.LanguageEntitiesMap.get( lang )
if map is None:
return
for key, value in REQUEST.form.items():
if type(value) in ( StringType, UnicodeType, ):
for entity, char in map.items():
value = value.replace( entity, char )
REQUEST.form[ key ] = value
if REQUEST.REQUEST_METHOD == 'PUT':
value = REQUEST.other.get('BODY')
if value is not None:
for entity, char in map.items():
value = value.replace( entity, char )
REQUEST.other['BODY'] = value
security.declareProtected( CMFCorePermissions.View, 'isEffective' )
def isEffective( self, date ):
""" Override DefaultDublinCoreImpl's test, since we are always viewable """
return 1
def reindexObject( self, idxs=[] ):
""" Overrides DefaultDublinCoreImpl's method """
pass
def productVersion( self ):
""" Returns version string of the product """
return Config.ProductVersion
#
# Portal global utilities ==================================================================================
#
security.declarePublic( 'getPortalObject' )
def getPortalObject( self ):
""" Returns the portal object itself """
return self
security.declarePublic( 'getPortalConfiguration' )
def getPortalConfiguration( self ):
""" Returns the PortalConfiguration object """
return CustomDefinitions.portalConfiguration
security.declarePublic( 'getDepartmentDictionary' )
def getDepartmentDictionary( self ):
""" Returns the DepartmentDictionary object """
return DepartmentDictionary.departmentDictionary
security.declarePublic( 'getCustomDefinitions' )
def getCustomDefinitions( self, defs, *args, **kw ):
""" Returns given custom definition value """
return CustomDefs( defs, *args, **kw )
security.declarePublic( 'hasCustomCategory' )
def hasCustomCategory( self, context ):
""" Returns given custom definition value """
return ObjectHasCustomCategory( context )
def shouldBeCleanedBeforePaste( self, context ):
""" Verifies whether content body should be cleaned before paste """
return ObjectShouldBeCleanedBeforePaste( context )
security.declarePublic( 'getJSCleanerForCategory' )
def getJSCleanerAttrsForCategory( self, context, category, **kw ):
""" Returns js cleaner attrs """
return getJSCleanerAttrs( context, category, **kw )
security.declarePublic( 'getCustomCookedTableTranslit' )
def getCustomCookedTableTranslit( self, context, id, values ):
""" Returns translitted custom data table values """
return CustomCookedTableTranslit( context, id, values )
security.declarePublic( 'getFormattedComments' )
def getFormattedComments( self, text, mode=None ):
""" Returns formatted comments text """
return formatComments( text, mode )
security.declarePublic( 'hasCustomPermissions' )
def hasCustomPermissions( self, context, permission ):
""" Returns given custom definition value """
return CustomCheckPermission( context, permission )
security.declarePublic( 'getSession' )
def getSession( self, name, default=None, REQUEST=None, cookie=None ):
""" Returns session data value """
return GetSessionValue( self, name, default, REQUEST, cookie )
security.declarePublic( 'setSession' )
def setSession( self, name, value, REQUEST=None, cookie=None ):
""" Stores session data value """
SetSessionValue( self, name, value, REQUEST, cookie )
InitializeClass( ExpressSuiteCore )
class PortalGenerator:
klass = ExpressSuiteCore
def setupTools( self, p ):
"""
Setup initial tools
"""
addCMFCoreTool = p.manage_addProduct['CMFCore'].manage_addTool
addCMFCoreTool( 'CMF Skins Tool', None )
addCMFCoreTool( 'CMF Undo Tool', None )
addCMFCoreTool( 'CMF URL Tool', None )
addCMFDefaultTool = p.manage_addProduct['CMFDefault'].manage_addTool
addCMFDefaultTool( 'Default Discussion Tool', None )
addCMFDefaultTool( 'Default Registration Tool', None )
addExpressSuiteTool = p.manage_addProduct['ExpressSuiteTools'].manage_addTool
addExpressSuiteTool( 'ExpressSuite Actions Tool', None )
addExpressSuiteTool( 'ExpressSuite Catalog Tool', None )
addExpressSuiteTool( 'ExpressSuite Comments Tool', None )
addExpressSuiteTool( 'ExpressSuite DocumentLink Tool', None )
addExpressSuiteTool( 'ExpressSuite ErrorLog Tool', None )
addExpressSuiteTool( 'ExpressSuite Followup Actions Tool', None )
addExpressSuiteTool( 'ExpressSuite Help Tool', None )
addExpressSuiteTool( 'ExpressSuite Member Data Tool', None )
addExpressSuiteTool( 'ExpressSuite Membership Tool', None )
addExpressSuiteTool( 'ExpressSuite Metadata Tool', None )
addExpressSuiteTool( 'ExpressSuite Properties Tool', None )
addExpressSuiteTool( 'ExpressSuite Types Tool', None )
addExpressSuiteTool( 'ExpressSuite Workflow Tool', None )
addExpressSuiteTool( 'ExpressSuite Services Tool', None )
addExpressSuiteTool( 'Portal Scheduler Tool', None )
#addExpressSuiteTool( 'ExpressSuite Migration Tool', None )
def setupMessageCatalog( self, p, language ):
langs = Config.Languages
p.manage_addProduct['Localizer'].manage_addMessageCatalog( 'msg', 'Messages', langs.keys())
msg = p._getOb( 'msg' )
path = joinpath( package_home( globals() ), 'locale' )
msg.manage_changeDefaultLang( language or Config.DefaultLanguage )
for lang, info in langs.items():
charset = info['python_charset'].upper()
msg.update_po_header( lang, '', '', '', charset )
# import PO file into the Message Catalog
try:
file = open( joinpath( path, '%s.po' % lang ), 'rt' )
except IOError:
pass
else:
msg.manage_import( lang, file )
file.close()
# fix empty string (just in case...)
msg.manage_editLS( '', (lang, '') )
# select default language
p.setPortalLocale()
p.setContentCharset()
def setupMail( self, p ):
"""
Create mail objects
"""
mh = getattr( p, 'MailHost', None )
if not ( mh is None or isinstance( mh, Mail.MailServerBase ) ):
p._delObject( 'MailHost' )
mh = None
if mh is None:
Mail.manage_addMailSender( p, 'MailHost', host='' )
if getattr( p, 'MailServer', None ) is None:
Mail.manage_addMailServer( p, 'MailServer', host='' )
def setupUserFolder( self, p ):
p.manage_addProduct['ExpressSuiteTools'].addUserFolder()
def setupCookieAuth( self, p ):
p.manage_addProduct['CMFCore'].manage_addCC( id='cookie_authentication' )
p.cookie_authentication.auto_login_page = ''
def setupRoles( self, p ):
p.__ac_roles__ = ( 'Member', 'Visitor', 'Editor', 'Writer', 'Reader', 'Author', 'VersionOwner' )
def setupPermissions( self, p ):
"""
Setup some suggested roles to permission mappings
"""
mp = p.manage_permission
for entry in Config.PortalPermissions:
apply( mp, entry )
def setupDefaultSkins( self, p ):
"""
Setup portal skins
"""
pstool = getToolByName( p, 'portal_skins', None )
#pstool = getattr( p, 'portal_skins', None )
if pstool is None:
return
cmf_manager = ManageCMFContent()
for view in Config.SkinViews:
cmf_manager.register_view( pstool, 'skins/%s' % view )
# these skin elements are available for anonymous visitors
#for name in Config.PublicViews:
# pstool[ name ].manage_permission( CMFCorePermissions.View, [Roles.Anonymous], 1 )
addDirectoryViews( pstool, 'skins', cmfdefault_globals )
pstool.manage_addProduct['OFSP'].manage_addFolder( id='custom' )
default_skins = ', '.join( ['custom'] + Config.SkinViews )
pstool.addSkinSelection( 'Site', default_skins, make_default=1 )
pstool.addSkinSelection( 'Mail', 'mail_templates' )
p.setupCurrentSkin()
def setupTypes( self, p, initial_types=factory_type_information ):
"""
Setup portal types
"""
tptool = getToolByName( p, 'portal_types', None )
#tptool = getattr( p, 'portal_types', None )
if tptool is None:
return
for x in initial_types:
if not tptool.getTypeInfo( x['id'] ):
tptool.addType( x['id'], x )
def setupCategories( self, p, categories=None, **kw ):
"""
Setup default categories
"""
metadata = getToolByName( p, 'portal_metadata', None )
if metadata is None:
return
if not categories:
categories = ['Document', 'SimpleDocs']
default_categories = DefaultCategories.DefaultCategories()
for id in categories:
if metadata.getCategoryById( id ):
continue
category = DefaultCategories.setupCategory( default_categories, id, metadata )
if category is None:
continue
workflow = category.getWorkflow()
if workflow is None:
continue
DefaultCategories.setupWorkflow( default_categories, workflow, id, metadata )
del default_categories
def setupMimetypes( self, p ):
"""
Setup mime types
"""
p.manage_addProduct[ 'CMFCore' ].manage_addRegistry()
reg = p.content_type_registry
reg.addPredicate( 'dtml', 'extension' )
reg.getPredicate( 'dtml' ).edit( extensions="dtml" )
reg.assignTypeName( 'dtml', 'DTMLDocument' )
reg.addPredicate( 'link', 'extension' )
reg.getPredicate( 'link' ).edit( extensions="url, link" )
reg.assignTypeName( 'link', 'Link' )
reg.addPredicate( 'news', 'extension' )
reg.getPredicate( 'news' ).edit( extensions="news" )
reg.assignTypeName( 'news', 'News Item' )
reg.addPredicate( 'document', 'major_minor' )
reg.getPredicate( 'document' ).edit( major="text", minor="" )
reg.assignTypeName( 'document', 'HTMLDocument' )
reg.addPredicate( 'image', 'major_minor' )
reg.getPredicate( 'image' ).edit( major="image", minor="" )
reg.assignTypeName( 'image', 'Site Image' )
reg.addPredicate( 'file', 'major_minor' )
reg.getPredicate( 'file' ).edit( major="application", minor="" )
reg.assignTypeName( 'file', 'File' )
def setupWorkflow( self, p, check=0 ):
"""
Setup default workflow
"""
workflow = getToolByName( p, 'portal_workflow', None )
tptool = getToolByName( p, 'portal_types', None )
if workflow is None or tptool is None:
return
cbt = workflow._chains_by_type
count = 0
seen = []
for chain, types in Config.WorkflowChains.items():
seen.extend( types )
for pt in types:
if not cbt or cbt.get( pt ) != chain:
count += 1
if not check:
wf_id = 'heading_workflow'
workflow.createWorkflow( wf_id )
workflow.setChainForPortalTypes( Config.WorkflowChains['heading_workflow'], ( wf_id, ) )
workflow.setChainForPortalTypes( Config.WorkflowChains['__empty__'], ('', ) )
DefaultCategories.setupHeadingWorkflow( workflow.getWorkflowById( wf_id ) )
return count
def setupDefaultMembers( self, p, lang='ru' ):
"""
Adds default members and groups
"""
membership = getToolByName( p, 'portal_membership', None )
msg = getToolByName( p, 'msg', None )
if None in ( membership, msg ):
return None
membership._addGroup( 'all_users', msg.gettext( 'All users', lang=lang ) )
membership._addGroup( '_managers_', msg.gettext( 'Managers', lang=lang ) )
username = None
try: username = _getAuthenticatedUser().getUserName()
except: pass
if not username:
username = 'admin'
roles = ( 'Member', 'Manager', )
properties = { 'lname' : msg.gettext( 'admin', lang=lang ) }
membership.addMember( id=username, password='123', roles=roles, domains='', properties=properties )
member = membership.getMemberById( username )
if member is None:
return None
users = [ username ]
membership.manage_changeGroup( group='all_users', group_users=users )
membership.manage_changeGroup( group='_managers_', group_users=users )
return member
def setupStorage( self, p, create_userfolder=None ):
"""
Setup storage folders
"""
if p is None:
return
base = p.manage_addProduct['ExpressSuiteTools']
if base is None:
return
msg = getToolByName( p, 'msg', None )
if msg is None:
return
lang = msg.get_default_language()
member = create_userfolder and self.setupDefaultMembers( p, lang ) or None
storage = self._makeHeading( p.manage_addProduct['ExpressSuiteTools'], 'storage', \
msg.gettext( 'Content storage', lang=lang ) )
if storage:
self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'members', \
msg.gettext( 'Home folders', lang=lang ) )
self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'user_defaults', \
msg.gettext( 'Default content', lang=lang ) )
system = self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'system', \
msg.gettext( 'System folders', lang=lang ) )
else:
system = None
if system:
self._makeHeading( p.storage.system.manage_addProduct['ExpressSuiteTools'], 'templates', \
msg.gettext( 'Document templates', lang=lang ) )
if storage:
mp = p.storage.manage_permission
mp('List folder contents', ['Owner','Manager', 'Editor', 'Writer', 'Reader', 'Author'], 0)
mp('View', ['Owner','Manager', 'Member'], 1)
if create_userfolder and member is not None:
home = member.getHomeFolder( create=1 )
# add access rights for system folder
if system:
p.storage.system.manage_setLocalGroupRoles( 'all_users', ['Reader'] )
if storage:
if member is not None:
p.storage.changeOwnership( member, recursive=1 )
p.storage.reindexObject( recursive=1 ) #idxs=['allowedRolesAndUsers'],
def setupTracker( self, p ):
"""
Setup tracker
"""
pass
def setupActions( self, p ):
"""
Setup portal actions
"""
actions = getToolByName( p, 'portal_actions', None )
if actions is None:
return
actions.action_providers = ( \
'portal_comments'
, 'portal_discussion'
, 'portal_help'
, 'portal_membership'
, 'portal_metadata'
, 'portal_properties'
, 'portal_registration'
, 'portal_services'
, 'portal_scheduler'
, 'portal_undo'
, 'portal_workflow'
)
def setupCatalog( self, p ):
"""
Setup portal catalogs
"""
tool_ids = ( 'portal_catalog', 'portal_followup', 'portal_links', )
for id in tool_ids:
ob = getToolByName( p, id, None )
if ob is None:
return
if Config.IsSQLCatalog and ob.implements('IZSQLCatalog'):
ob.sql_db_name = p.getId()
ob.sql_prefix = ''.join([ x[0:1] for x in id.split('_') ] )
ob.sql_root = '_Root'
ob.sql_user = Config.SQLDBUser
ob.setup()
ob.setupIndexes()
def setup( self, p, language, create_userfolder ):
"""
Setup portal object
"""
logger.info('Setup new ExpressSuite instance, id: %s, IsSQLCatalog: %s' % ( p.getId(), Config.IsSQLCatalog ) )
if Config.IsSQLCatalog:
id = Config.SQLDBConnectorID
addZMySQLConnection( p, id, 'Z MySQL Database Connection', 1 )
self.setupTools( p )
self.setupCatalog( p )
self.setupMessageCatalog( p, language )
self.setupMail( p )
if int(create_userfolder) != 0: self.setupUserFolder( p )
self.setupCookieAuth( p )
self.setupRoles( p )
self.setupPermissions( p )
self.setupDefaultSkins( p )
# SkinnedFolders are only for customization;
# they aren't a default type.
default_types = tuple( filter( lambda x: x['id'] != 'Skinned Folder', factory_type_information ) )
self.setupTypes( p, default_types )
self.setupTypes( p, cmf_factory_type_information )
self.setupCategories( p )
self.setupMimetypes( p )
self.setupWorkflow( p )
self.setupActions( p )
self.setupManual( p, 'manual' )
logger.info('Successfully created new instance')
def setupManual( self, target, path, ctype=None ):
"""
Setup manual
"""
createDirectoryView( target, makepath( path ) )
def create( self, parent, id, language, create_userfolder ):
"""
Creates an instance
"""
id = str(id)
portal = self.klass( id=id )
parent._setObject( id, portal )
# Return the fully wrapped object
p = parent.this()._getOb( id )
self.setup( p, language, create_userfolder )
return p
def setupDefaultProperties( self, p, id, title, description, email_from_address, email_from_name,
validate_email, server_url, stemmer ):
"""
Setup default portal properties
"""
p._setProperty( 'email_from_address', email_from_address, 'string' )
p._setProperty( 'email_from_name', email_from_name, 'string' )
p._setProperty( 'validate_email', validate_email and 1 or 0, 'boolean' )
p._setProperty( 'email_antispam', '', 'string' )
p._setProperty( 'email_error_address', '', 'string' )
p._setProperty( 'instance', id, 'string' )
p._setProperty( 'remote_url', '', 'string' )
p._setProperty( 'apply_threading', 1, 'boolean' )
p._setProperty( 'use_timeout', 1, 'boolean' )
p._setProperty( 'duration', 0.001, 'float' )
p._setProperty( 'p_resolve_conflict', 0, 'boolean' )
p._setProperty( 'max_involved_users', 10, 'int' )
p._setProperty( 'service_timeout', 30, 'int' )
p._setProperty( 'created_search_interval', 999, 'int' )
p._setProperty( 'common_url', '', 'string' )
p._setProperty( 'send_to_support', 0, 'boolean' )
p._setProperty( 'member_activity', 1, 'boolean' )
p._setProperty( 'emergency_service', 0, 'boolean' )
p._setProperty( 'p_log', 0, 'boolean' )
p._setProperty( 'suspended_mail', 1, 'boolean' )
p._setProperty( 'mail_frequency', 1, 'int' )
p._setProperty( 'mail_threshold', 500, 'int' )
p._setPropValue( 'server_url', server_url )
p._setPropValue( 'stemmer', stemmer )
p.title = title
p.description = description
def setupAfterCreate( self, p, create_userfolder ):
"""
Setup portal catalog and folders storage
"""
self.setupStorage( p, create_userfolder )
def _makeHeading( self, ob, id, title=None ):
"""
Creates Heading instance
"""
try:
folder = Heading( id=id, title=title )
if folder is not None:
ob._setObject( id, folder, set_owner=1 )
return 1
except:
raise
return 0
def addZMySQLConnection( dispatcher, id, title='', check=None ):
"""
Adds MySQL DB Connection
"""
connection_string = '-mysql root'
conn = SQLConnection( id, title, connection_string, check )
if conn.connected():
DB = conn._v_database_connection
if DB is not None and DB.is_opened():
instance = dispatcher.getId()
if instance:
DB.query( "CREATE DATABASE IF NOT EXISTS %s" % instance )
acl_users = aq_get(dispatcher, 'acl_users', None, 1)
if acl_users is not None:
userid = Config.SQLDBUser
user = acl_users.getUserById( userid )
passwd = user.__
servers = ( 'localhost', '%', )
for x in servers:
DB.query( "GRANT ALL PRIVILEGES ON %s.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION" % ( \
instance, userid, x, passwd ) )
DB.query( "SET PASSWORD FOR '%s'@'%s' = OLD_PASSWORD('%s')" % ( \
userid, x, passwd ) )
DB.close()
if instance and userid:
connection_string = Config.connection_string % { \
'instance' : instance,
'user' : userid,
'passwd' : passwd
}
Publish.setupProduct( DB, connection_string, dispatcher )
dispatcher._setObject(id, conn)
def manage_addExpressSuiteForm( self ):
"""
Returns ExpressSuite instance generator form
"""
add_expresssuite_form = HTMLFile('dtml/addExpressSuite', globals())
all_languages = []
for lang, info in Config.Languages.items():
all_languages.append( {
'id' : lang,
'title' : info['title'],
'default' : lang == Config.DefaultLanguage,
} )
try:
from Products.TextIndexNG2 import allStemmers
all_stemmers = allStemmers(self)
except ImportError:
all_stemmers = []
return add_expresssuite_form( self, all_languages=all_languages, all_stemmers=all_stemmers )
#manage_addExpressSuiteForm.__name__ = 'addExpressSuite'
def manage_addExpressSuite( self, id='common', title='Express Suite DMS', description='',
create_userfolder=1,
email_from_address=None,
email_from_name=None,
validate_email=0,
language=None,
stemmer=None,
REQUEST=None
):
"""
Adds ExpressSuite instance
"""
id = id.strip()
server_url = self.getPhysicalRoot().absolute_url()
if email_from_address is None:
email_from_address = 'postmaster@%s' % urlparse( server_url )[1].split(':')[0]
if email_from_name is None:
email_from_name = title
gen = PortalGenerator()
p = gen.create( self, id, language, create_userfolder )
gen.setupDefaultProperties( p, id, title, description, email_from_address, email_from_name,
validate_email, server_url, stemmer )
gen.setupAfterCreate( p, create_userfolder )
if REQUEST is not None:
REQUEST.RESPONSE.redirect(p.absolute_url() + '/finish_site_construction')
|
[
"ichar@g2.ru"
] |
ichar@g2.ru
|
5faff836f01be1ca229e5d45ff91386da1400121
|
bc7cd6689a8052d442ded8e876de1e5f22bfad6c
|
/lsml/feature/provided/shape.py
|
c4f910d38d3f18cbd76060977e01cb6f94890147
|
[
"BSD-3-Clause"
] |
permissive
|
tor4z/level-set-machine-learning
|
3a359e0d55137f3c0a9cbcaf25048c61573abd25
|
38460e514d48f3424bb8d3bd58cb3eb330153e64
|
refs/heads/master
| 2022-04-08T08:04:27.200188
| 2020-01-26T03:09:56
| 2020-01-26T03:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,052
|
py
|
import numpy
from skimage.measure import marching_cubes_lewiner as marching_cubes
from skimage.measure import find_contours, mesh_surface_area
from lsml.feature.base_feature import (
BaseShapeFeature, GLOBAL_FEATURE_TYPE, LOCAL_FEATURE_TYPE)
class Size(BaseShapeFeature):
""" Computes the size of the region enclosed by the zero level set of u.
In 1D, this is length. In 2D, it is area, and in 3D, it is volume.
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
if self.ndim == 1:
return 'Length'
elif self.ndim == 2:
return 'Area'
elif self.ndim == 3:
return 'Volume'
else:
return 'Hyper-volume'
def compute_feature(self, u, dist, mask, dx):
size = (u > 0).sum() * numpy.prod(dx)
feature = numpy.empty_like(u)
feature[mask] = size
return feature
class BoundarySize(BaseShapeFeature):
""" Computes the size of the zero-level set of u. In 2D, this is
the length of the implicit curve. In 3D, it is surface area.
"""
locality = GLOBAL_FEATURE_TYPE
def __init__(self, ndim=2):
if ndim < 2 or ndim > 3:
msg = ("Boundary size is only defined for dimensions 2 and 3; "
"ndim provided = {}")
raise ValueError(msg.format(ndim))
super(BoundarySize, self).__init__(ndim)
@property
def name(self):
if self.ndim == 2:
return 'Curve length'
elif self.ndim == 3:
return 'Surface area'
def compute_feature(self, u, dist, mask, dx):
feature = numpy.empty_like(u)
if self.ndim == 2:
boundary_size = self._compute_arc_length(u, dx)
elif self.ndim == 3:
boundary_size = self._compute_surface_area(u, dx)
else:
msg = "Cannot compute boundary size for ndim = {}"
raise RuntimeError(msg.format(self.ndim))
feature[mask] = boundary_size
return feature
def _compute_arc_length(self, u, dx):
contours = find_contours(u, 0)
total_arc_length = 0.
for contour in contours:
closed_contour = numpy.vstack((contour, contour[0]))
closed_contour *= dx[::-1] # find_contours points in index space
arc_length = numpy.linalg.norm(numpy.diff(closed_contour, axis=0),
axis=1).sum()
total_arc_length += arc_length
return total_arc_length
def _compute_surface_area(self, u, dx):
verts, faces, _, _ = marching_cubes(u, 0., spacing=dx)
return mesh_surface_area(verts, faces)
class IsoperimetricRatio(BaseShapeFeature):
""" Computes the isoperimetric ratio, which is a measure of
circularity in two dimensions and a measure of sphericity in three.
In both cases, the maximum ratio value of 1 is achieved only for
a perfect circle or sphere.
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
if self.ndim == 2:
return 'Circularity'
else:
return 'Sphericity'
def __init__(self, ndim=2):
if ndim < 2 or ndim > 3:
msg = ("Isoperimetric ratio defined for dimensions 2 and 3; "
"ndim provided = {}")
raise ValueError(msg.format(ndim))
super(IsoperimetricRatio, self).__init__(ndim)
def compute_feature(self, u, dist, mask, dx):
if self.ndim == 2:
return self.compute_feature2d(
u=u, dist=dist, mask=mask, dx=dx)
else:
return self.compute_feature3d(
u=u, dist=dist, mask=mask, dx=dx)
def compute_feature2d(self, u, dist, mask, dx):
# Compute the area
size = Size(ndim=2)
area = size.compute_feature(u=u, dist=dist, mask=mask, dx=dx)
# Compute the area
boundary_size = BoundarySize(ndim=2)
curve_length = boundary_size.compute_feature(
u=u, dist=dist, mask=mask, dx=dx)
feature = numpy.empty_like(u)
feature[mask] = 4*numpy.pi*area[mask] / curve_length[mask]**2
return feature
def compute_feature3d(self, u, dist, mask, dx):
# Compute the area
size = Size(ndim=3)
volume = size(u=u, dist=dist, mask=mask, dx=dx)
# Compute the area
boundary_size = BoundarySize(ndim=3)
surface_area = boundary_size(u=u, dist=dist, mask=mask, dx=dx)
feature = numpy.empty_like(u)
feature[mask] = 36*numpy.pi*volume[mask]**2 / surface_area[mask]**3
return feature
class Moments(BaseShapeFeature):
""" Computes the normalized statistical moments of a given order along
a given axis
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
return "Moments (axes={}; orders={})".format(self.axes, self.orders)
@property
def size(self):
return len(self.axes) * len(self.orders)
def __init__(self, ndim=2, axes=(0, 1), orders=(1, 2)):
""" Initialize a normalized statistical moment feature
ndim: int
Number of dimensions
axes: list[int], default=[0, 1]
The axes along which the moment should be computed
order: list[int], default=[1, 2]
The orders of the moments, e.g., order=1 yields the 'center of
mass' coordinate along the given axis and order=2 yields a measure
of spread along the given axis
"""
super(Moments, self).__init__(ndim)
for axis in axes:
if axis < 0 or axis > ndim-1:
msg = "axis provided ({}) must be one of 0 ... {}"
raise ValueError(msg.format(axis, ndim-1))
for order in orders:
if order < 1:
msg = "Moments order should be greater than or equal to 1"
raise ValueError(msg)
self.axes = axes
self.orders = orders
def _compute_center_of_mass(self, u, dx):
# Initialize center of mass container and mask with singular entry
center_of_mass = numpy.zeros(self.ndim)
mask = numpy.empty(u.shape, dtype=numpy.bool)
mask.ravel()[0] = True
for i in range(self.ndim):
center_of_mass[i] = self._compute_moment(
u=u, dist=u, mask=mask, dx=dx, axis=i, order=1)
return center_of_mass
def _compute_moment(self, u, dist, mask, dx, axis, order):
""" Computes the feature for just a single axis and order """
indices = numpy.indices(u.shape, dtype=numpy.float)
mesh = indices[axis] * dx[axis]
size = Size(ndim=self.ndim)
# Normalize by centering if order is greater than 1
if order > 1:
center_of_mass = self._compute_center_of_mass(u=u, dx=dx)
mesh -= center_of_mass[axis]
measure = size(u=u, dist=dist, mask=mask, dx=dx)[mask].ravel()[0]
moment = (mesh**order)[u > 0].sum() * numpy.prod(dx) / measure
return moment
def compute_feature(self, u, dist, mask, dx):
from itertools import product
features = numpy.empty(u.shape + (self.size,))
for i, (axis, order) in enumerate(product(self.axes, self.orders)):
features[mask, i] = self._compute_moment(
u, dist, mask, dx, axis, order)
return features
class DistanceToCenterOfMass(BaseShapeFeature):
""" Computes the distance to the computed center of mass
"""
locality = LOCAL_FEATURE_TYPE
@property
def name(self):
return "Distance to center of mass"
def compute_feature(self, u, dist, mask, dx):
# Sneakily use the center of mass utility buried in the
# moment feature class
moment_feature = Moments(ndim=self.ndim)
center_of_mass = moment_feature._compute_center_of_mass(u, dx)
# Add extra axes for some broadcasting below
slicer = tuple([slice(None), ] + [None for _ in range(self.ndim)])
indices = numpy.indices(u.shape, dtype=numpy.float)
mesh = indices * dx[slicer]
feature = numpy.empty_like(u)
feature[mask] = numpy.linalg.norm(
mesh - center_of_mass[slicer], axis=0)[mask]
return feature
def get_basic_shape_features(ndim=2, moment_orders=[1, 2]):
""" Generate a list of basic shape features at multiple sigma values
Parameters
----------
ndim : int, default=2
The number of dimension of the image to which these features
will be applied
moment_orders : list[float], default=[1, 2]
Orders for which we compute moments
Returns
-------
features : list[BaseImageFeature]
A list of image feature instances
"""
feature_classes = [
BoundarySize,
DistanceToCenterOfMass,
IsoperimetricRatio,
Moments,
Size,
]
return [
feature_class(ndim=ndim)
for feature_class in feature_classes
]
|
[
"mhancock743@gmail.com"
] |
mhancock743@gmail.com
|
dd5e7f37317396c24b71e4bac839eacd831d2205
|
54f75e2cf3094bd4073c24321adfd7b7b9a2d88a
|
/dsClass/mtcnn_detect.py
|
d0b08628acb852fc359914d01ac8df21ef222ba2
|
[] |
no_license
|
goolig/dsClass
|
2bff1ac0a45f448246dc0e552f6a7952762a27bc
|
67567d1d71a8a747c8a68c621045fe53ff139103
|
refs/heads/master
| 2021-11-26T20:24:12.761009
| 2021-11-11T11:43:40
| 2021-11-11T11:43:40
| 150,427,519
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,718
|
py
|
'''
Tensorflow implementation of the mtcnn face detection algorithm
Credit: DavidSandBerg for implementing this method on tensorflow
'''
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
import cv2
import os
class MTCNNDetect(object):
def __init__(self, face_rec_graph, model_path = "", threshold = [0.6, 0.7, 0.7], factor = 0.709, scale_factor = 1):
'''
:param face_rec_sess: FaceRecSession
:param threshold: detection threshold
:param factor: default 0.709 image pyramid -- magic number
:param model_path:
'''
self.threshold = threshold
self.factor = factor
self.scale_factor = scale_factor;
with face_rec_graph.graph.as_default():
print("Loading MTCNN Face detection model")
self.sess = tf.Session()
if not model_path:
model_path, _ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None, None, None, 3), 'input')
pnet = PNet({'data': data})
pnet.load(os.path.join(model_path, 'det1.npy'), self.sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input')
rnet = RNet({'data': data})
rnet.load(os.path.join(model_path, 'det2.npy'), self.sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input')
onet = ONet({'data': data})
onet.load(os.path.join(model_path, 'det3.npy'), self.sess)
self.pnet = lambda img: self.sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})
self.rnet = lambda img: self.sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0': img})
self.onet = lambda img: self.sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'),
feed_dict={'onet/input:0': img})
print("MTCNN Model loaded")
def detect_face(self, img, minsize):
# im: input image
# minsize: minimum of faces' size
if(self.scale_factor > 1):
img = cv2.resize(img,(int(len(img[0])/self.scale_factor), int(len(img)/self.scale_factor)))
factor_count = 0
total_boxes = np.empty((0, 9))
points = []
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# creat scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(self.factor, factor_count)]
minl = minl * self.factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale = scales[j]
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = self.pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = self.rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > self.threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = self.onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > self.threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick, :]
points = points[:, pick]
simple_points = np.transpose(
points) # points is stored in a very weird datastructure, this transpose it to process eaiser
rects = [(max(0,(int(rect[0]))) * self.scale_factor,max(0,int(rect[1])) * self.scale_factor,
int(rect[2] - rect[0]) * self.scale_factor,int(rect[3] - rect[1]) * self.scale_factor) for rect in total_boxes]
return rects, simple_points * self.scale_factor
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item() # pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target - max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3, name='prob1'))
(self.feed('PReLU3') # pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1, name='prob1'))
(self.feed('prelu4') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox, reg):
# calibrate bounding boxes
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y, x)]
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
if reg.size == 0:
reg = np.empty((0, 3))
bb = np.transpose(np.vstack([y, x]))
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
# convert bboxA to square
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = np.maximum(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.transpose(np.tile(l, (2, 1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) # @UndefinedVariable
return im_data
|
[
"shtar@post.bgu.ac.il"
] |
shtar@post.bgu.ac.il
|
72adbd0109a8bea3523886155b79efd08bf30fe3
|
f4f2f8f85da06dda03435ad225c11f1a5dceeeec
|
/UI_UX/ui_app/utils/datasets.py
|
6bf51815f5c762e74aa9e157836aa752d43c9d7f
|
[] |
no_license
|
Djangojuniors/Sandeep_STW
|
f1ca6a244129a1bf8104e132be7948d42d557a9a
|
194e328920fab98d14bae3ac7dd87abcca09aadf
|
refs/heads/master
| 2023-08-25T15:00:08.289651
| 2021-10-23T08:10:47
| 2021-10-23T08:10:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,361
|
py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from .utils import xyxy2xywh, xywh2xyxy
from django.core.files.storage import default_storage
from django.conf import settings
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
print("PATH", path)
files = []
if os.path.isdir(os.path.join(settings.MEDIA_ROOT, path)):
print("IF DIR")
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(os.path.join(settings.MEDIA_ROOT, path)):
print("IS file")
files = [path]
print("FILES", files)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
print(nI, nV)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
print(self.nF)
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(os.path.join(settings.MEDIA_ROOT, path)) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(os.path.join(settings.MEDIA_ROOT, sources), 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_labels=True, cache_images=False, single_cls=False):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(os.path.join(settings.MEDIA_ROOT, path), 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shapefile path
try:
with open(os.path.join(settings.MEDIA_ROOT, sp), 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(os.path.join(settings.MEDIA_ROOT, file), 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open(os.path.join(settings.MEDIA_ROOT, './datasubset/images.txt'), 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
nf, nm, ne, nd, n)
assert nf > 0, 'No labels found. See %s' % help_url
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(os.path.join(settings.MEDIA_ROOT, label_path), 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, img_path, shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and (r != 1)): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_LINEAR if self.augment else cv2.INTER_AREA # LINEAR for training, AREA for testing
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
x = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x).clip(None, 255).astype(np.uint8)
np.clip(img_hsv[:, :, 0], None, 179, out=img_hsv[:, :, 0]) # inplace hue clip (0 - 179 deg)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
img4 = np.zeros((s * 2, s * 2, 3), dtype=np.uint8) + 128 # base image with 4 tiles
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Load labels
label_path = self.label_files[index]
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(os.path.join(settings.MEDIA_ROOT, label_path), 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
else:
labels = np.zeros((0, 5), dtype=np.float32)
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'] * 1,
translate=self.hyp['translate'] * 1,
scale=self.hyp['scale'] * 1,
shear=self.hyp['shear'] * 1,
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(128, 128, 128),
auto=True, scaleFill=False, scaleup=True, interp=cv2.INTER_AREA):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = max(new_shape) / max(shape)
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=interp) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
changed = (border != 0) or (M != np.eye(3)).any()
if changed:
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA, borderValue=(128, 128, 128))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(os.path.join(settings.MEDIA_ROOT, file), 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(os.path.join(settings.MEDIA_ROOT, file.replace('.txt', 'bmp.txt')), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(os.path.join(settings.MEDIA_ROOT, p), 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(os.path.join(settings.MEDIA_ROOT, p), 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(os.path.join(settings.MEDIA_ROOT, path), 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
[
"djnagojuniors@gmail.com"
] |
djnagojuniors@gmail.com
|
2138801a48a4de3e3f328339e1a0f23bbedcdf77
|
32d6370fe51c42cd99af52b03f81a82d687e5bb3
|
/Desafio073.py
|
c73ea9d3a3a68dad7fa5feb6d681f879f71d1e3e
|
[] |
no_license
|
AndreLovo/Python
|
a5f4b10c72641048dfd65b6c58bce3b233abeb07
|
ea98a2ca572dfebf9be32502f041016ec14ac0d5
|
refs/heads/master
| 2022-05-03T13:08:37.178401
| 2022-04-10T16:41:54
| 2022-04-10T16:41:54
| 157,377,691
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
# Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre:
# a) Os 5 primeiros times.
# b) Os últimos 4 colocados.
# c) Times em ordem alfabética.
# d) Em que posição está o time do São Paulo.
times =('Atlético-MG', 'Flamengo', 'Palmeiras', 'Fortaleza', 'Corinthians', 'RB Bragantino', 'Fluminense', 'América-MG', 'Atlético-GO', 'Santos', 'Ceará', 'Internacional', 'São Paulo', 'Athletico-PR', 'Cuiabá', 'Juventude')
print('==' *15)
#a) "Fatiando" a tupla
# Mostrando os 5 primeiros times:
for t in times:
print (t)
print('==' *15)
a= times[:5]
print(f'Os cinco primeiros times são: {a}')
print('==' *15)
#b) Mostrando os quatro últimos times
b= times[-4:]
print(f'Os quatro últimos times são: {b}')
print('==' *15)
#c) Mostrando os times em ordem alfabética:
c= print(sorted(times))
#d) Mostrando a posição do São Paulo:
d=print(f'O São Paulo está na posição {times.index("São Paulo")+1}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
24de7c918a9da758aeae9ce57ebec7dee3270ce3
|
dd4ea40348f994151e5d4279b483363526c54936
|
/constants/search_page.py
|
35c43a56c2e63eca3c835429c086381c9207ca89
|
[] |
no_license
|
myshkarus/qa-demo-shop
|
a0aed167f5febfc471100173bff03a3c1568a84a
|
517f3e82f4632f8472564e155a0d121796101291
|
refs/heads/master
| 2023-08-02T21:24:52.248299
| 2021-09-14T18:52:28
| 2021-09-14T18:52:28
| 406,486,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
class SearchPageConstants:
"""Store constants related to the SearchPage object"""
pass
|
[
"m.shpilenko@gmail.com"
] |
m.shpilenko@gmail.com
|
bec79e0333db1af0c3c99276cfa189282d7dd9c2
|
8e040542e4796b4f70352e0162b7a75b18034b6c
|
/blogapp/myblog/views.py
|
9de86d4f964331945786fea38fc8d7247771b1b4
|
[] |
no_license
|
Atul18341/django-blog-app
|
4e4e688a4e9559227db40b68824e7b69b6cffa80
|
149c1d9952da079d1764c11bd91f53e03930f299
|
refs/heads/main
| 2023-03-28T02:11:01.620590
| 2021-04-01T06:46:11
| 2021-04-01T06:46:11
| 347,375,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,539
|
py
|
from django.shortcuts import render,redirect
from .models import blog
from .forms import NewUserForm
from django.contrib.auth import login,authenticate
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
# Create your views here.
def blogs(request):
blogs=blog.objects.all()
return render(request,"index.html",{'blogs':blogs})
def user_login(request):
if request.method=="POST":
form=AuthenticationForm(request,request.POST)
if form.is_valid():
username=form.cleaned_data.get('username')
password=form.cleaned_data.get('password')
user=authenticate(username=username,password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as:{username}.")
return redirect("Blog")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid usename or password.Correct it")
form=AuthenticationForm()
return render(request,"login.html",{'login_form':form})
def signup(request):
if request.method=="POST":
form=NewUserForm(request.POST)
if form.is_valid():
user=form.save()
login(request, user)
messages.success(request, "Registration Successful.")
return redirect("Login")
messages.error(request, "Unsuccessful registration. Invalid information.")
form=NewUserForm
return render(request,"signup.html",{'register_form':form})
|
[
"atulkumar987613@gmail.com"
] |
atulkumar987613@gmail.com
|
152865b4e9ca49df00660bb1023111f00e83ff72
|
096711aabd6f09aaf501be8919344a2da63e2c91
|
/parse_input.py
|
9ee4f9c9019e487ea5e6af6d7ce46e641a836ba3
|
[] |
no_license
|
KseniiaPrytkova/Computor_v1
|
c85f6c0d1f5c49d3582014d96dde330d234666fe
|
8cdec686eee3d6f178cf8a1c5612ebae4dcc79e8
|
refs/heads/master
| 2022-07-06T01:23:22.879725
| 2020-05-13T20:25:56
| 2020-05-13T20:25:56
| 257,624,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
#!/usr/bin/env python
import re
import sys
def print_reduced_form(m):
signs = []
s = ""
for i, e in enumerate(m[0]):
if (e > 0):
signs += [" + "]
else:
signs += [" - "]
s = "Reduced form: " + str(m[0][0]) + " * X^" + str(0)
if (len(m[0]) >= 2):
s += signs[1] + str(m[0][1]) + " * X^" + str(1)
if (len(m[0]) >= 3):
s += signs[2] + str(m[0][2]) + " * X^" + str(2)
s += " = 0"
s = s.replace("+ -", "-")
# print('\x1b[6;33;45m' + s + '\x1b[0m')
print(s)
def check_parsed_input(s, r_1, r_2):
s_2 = "".join([e[0] for e in r_1]) + "=" + "".join([e[0] for e in r_2])
if s_2 != s:
sys.stderr.write("usage: all terms must be of the form a * x^p\n" +
"example: 5 * X^0 + 4 * X^1 - 9.3 * X^2 = 1 * X^0\n")
sys.exit(1)
def parse_input(s):
# delete all spaces in a string:
s = s.replace(" ", "")
# ['5*X^0+4*X^1-9.3*X^2', '1*X^0']
s_l = s.split('=')
if len(s_l) != 2:
sys.stderr.write("ERROR: exactly one equal sign expected\n")
sys.exit(1)
regexp = r"(([+-]?\d+(\.\d+)?)[*][Xx]\^([+-]?\d+(\.\d+)?))"
r_1, r_2 = re.findall(regexp, s_l[0]), re.findall(regexp, s_l[1])
check_parsed_input(s, r_1, r_2)
m = [{0: [], 1: [], 2: []}, {0: [], 1: [], 2: []}]
for res in r_1:
m[0][int(res[3])] += [float(res[1])]
for res in r_2:
m[1][int(res[3])] += [float(res[1])]
n = [{}, {}]
# Simplify: left.
for key in m[0]:
n[0][key] = sum(m[0][key])
# Simplify: right.
for key in m[1]:
n[1][key] = sum(m[1][key])
# Simplify: move all elements in the right to the left.
for key in n[1]:
if key in n[0]:
n[0][key] = n[0][key] - n[1][key]
else:
n[0][key] = -n[1][key]
print_reduced_form(n)
# Get all x^y values from y=0 to y=2.
res = [ n[0][i] if i in n[0] else "-" for i in range(len(n[0])) ]
return(res)
|
[
"prytkovakseniia@gmail.com"
] |
prytkovakseniia@gmail.com
|
32509504483d88e28dab7859a56a2999d721accc
|
4a0c58dfeef5e444b9f3d78f9d13d597ebc87d6b
|
/exercise_48_all_lines/exercise_48_all_lines.py
|
29809e0f2fed1e27273bb63f3b29841710f7af2c
|
[] |
no_license
|
jahokas/Python_Workout_50_Essential_Exercises_by_Reuven_M_Lerner
|
e2dc5a08515f30b91514c8eb9b76bb10552ba7a2
|
96ddabda7871faa0da9cd9e0563b16df99f0c853
|
refs/heads/master
| 2023-06-21T11:54:41.794571
| 2021-07-20T08:46:49
| 2021-07-20T08:46:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
import os
def all_lines(path):
for filename in os.listdir(path):
full_filename = os.path.join(path, filename)
try:
for line in open(full_filename):
yield line
except OSError:
pass
|
[
"dmitriyrubanov1988@gmail.com"
] |
dmitriyrubanov1988@gmail.com
|
a6fd729b46af741538efa1be1f8af4c5325dd1ad
|
ffc076dc622cf3b745c6da899aff193c340cdbd1
|
/graphs_weak.py
|
4777964d866c02524f5af83a4dbe9303430657a6
|
[] |
no_license
|
casperhansen/RBSH
|
e10fb69af9e3ea67c67be1cc4ca0a77468bc81e5
|
f464790c6d05f909a86f7b76defd4fec993ed5e7
|
refs/heads/master
| 2020-09-26T05:30:31.378722
| 2019-12-16T13:22:45
| 2019-12-16T13:22:45
| 226,176,650
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,570
|
py
|
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import numpy as np
from tensorflow.losses import compute_weighted_loss, Reduction
def hinge_loss_eps(labels, logits, epsval, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_eps = array_ops.ones_like(labels)*epsval
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_eps, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def make_graph_queue_SingleWeak(sigma_annealing, rank_weight, kl_weight, bits, dropout_keep, vocab_size, emb_size,
embedding, importance_embedding, optimizer, batch_size, inputs, test_placeholder_set,
is_eval, maskvalue, output_activation_function=tf.nn.tanh, deterministic_eval=True,
noise_type=1, pretrained_emb=False, emb_input_embedding=None, use_sigma_directly=False,
use_ranking=True, hinge_val=1.0):
print("network, use ranking", use_ranking)
hidden_neurons_encode = 1000
encoder_layers = 2
used_input = tf.cond(is_eval, lambda: test_placeholder_set, lambda: inputs, name="train_or_test_cond")
doc, doc1, doc2, doc1weak, doc2weak, masking = used_input
if emb_input_embedding is not None:
print("apply Importance embedding on docs")
doc_enc = doc * importance_embedding #tf.matmul(doc, tf.expand_dims(importance_embedding, -1) * emb_input_embedding)
doc1_enc = doc1 * importance_embedding #tf.matmul(doc1, tf.expand_dims(importance_embedding, -1) * emb_input_embedding)
doc2_enc = doc2 * importance_embedding #tf.matmul(doc2, tf.expand_dims(importance_embedding, -1) * emb_input_embedding)
else:
doc_enc = doc
doc1_enc = doc1
doc2_enc = doc2
#################### Bernoulli Sample #####################
## ref code: https://r2rt.com/binary-stochastic-neurons-in-tensorflow.html
def bernoulliSample(x):
"""
Uses a tensor whose values are in [0,1] to sample a tensor with values in {0, 1},
using the straight through estimator for the gradient.
E.g.,:
if x is 0.6, bernoulliSample(x) will be 1 with probability 0.6, and 0 otherwise,
and the gradient will be pass-through (identity).
"""
g = tf.get_default_graph()
with ops.name_scope("BernoulliSample") as name:
with g.gradient_override_map({"Ceil": "Identity", "Sub": "BernoulliSample_ST"}):
if deterministic_eval:
mus = tf.cond(is_eval, lambda: tf.ones(tf.shape(x))*0.5, lambda: tf.random_uniform(tf.shape(x)))
else:
mus = tf.random_uniform(tf.shape(x))
return tf.ceil(x - mus, name=name)
@ops.RegisterGradient("BernoulliSample_ST")
def bernoulliSample_ST(op, grad):
return [grad, tf.zeros(tf.shape(op.inputs[1]))]
###########################################################
# encode
def encoder(doc, hidden_neurons_encode, encoder_layers):
doc_layer = tf.layers.dense(doc, hidden_neurons_encode, name="encode_layer0",
reuse=tf.AUTO_REUSE, activation=tf.nn.relu)
#doc_layer = tf.nn.dropout(doc_layer, dropout_keep)
for i in range(1,encoder_layers):
doc_layer = tf.layers.dense(doc_layer, hidden_neurons_encode, name="encode_layer"+str(i),
reuse=tf.AUTO_REUSE, activation=tf.nn.relu)
doc_layer = tf.nn.dropout(doc_layer, tf.cond(is_eval, lambda: 1.0, lambda: dropout_keep))
doc_layer = tf.layers.dense(doc_layer, bits, name="last_encode", reuse=tf.AUTO_REUSE, activation=tf.nn.sigmoid)
bit_vector = bernoulliSample(doc_layer)
return bit_vector, doc_layer
bit_vector, cont_vector = encoder(doc_enc, hidden_neurons_encode, encoder_layers)
if use_ranking:
bit_vector_doc1, cont1 = encoder(doc1_enc, hidden_neurons_encode, encoder_layers)
bit_vector_doc2, cont2 = encoder(doc2_enc, hidden_neurons_encode, encoder_layers)
# decode
# transform s from [None, bits] into [None, emb_size]
log_sigma2 = tf.layers.dense(cont_vector, bits, name="decode_logsigma2", activation=tf.nn.sigmoid)
e = tf.random.normal([batch_size, bits])
if noise_type == 2: #annealing
print("use annealing")
noisy_bit_vector = tf.math.multiply(e, sigma_annealing) + bit_vector
#noisy_bit_vector = tf.maximum(noisy_bit_vector, 0)
elif noise_type == 1: #learned
if use_sigma_directly:
print("use sigma directly")
noisy_bit_vector = tf.math.multiply(e, log_sigma2) + bit_vector
else:
noisy_bit_vector = tf.math.multiply(e, tf.sqrt(tf.exp(log_sigma2))) + bit_vector
elif noise_type == 0: #none
noisy_bit_vector = bit_vector
else:
print("unknown noise_type", noise_type)
exit()
# s * Emb
softmax_bias = tf.Variable(tf.zeros(vocab_size), name="softmax_bias")
#print(importance_embedding, tf.transpose(embedding))
#print( tf.multiply(tf.transpose(embedding), importance_embedding) )
#exit()
#if pretrained_emb:
print("pretrained embedding downscaling layer")
embedding = tf.layers.dense(embedding, bits, name="lower_dim_embedding_layer")
dot_emb_vector = tf.linalg.matmul(noisy_bit_vector, tf.multiply(tf.transpose(embedding), importance_embedding) ) + softmax_bias
softmaxed = tf.nn.softmax(dot_emb_vector)
logaritmed = tf.math.log(tf.maximum(softmaxed, 1e-10))
logaritmed = tf.multiply(logaritmed, tf.cast(doc > 0, tf.float32)) #tf.cast(doc>0, tf.float32)) # set words not occuring to 0
# loss
num_samples = tf.reduce_sum(masking)
def my_dot_prod(a,b):
return tf.reduce_sum(tf.multiply(a, b), 1)
if use_ranking:
use_dot = False
if use_dot:
bit_vector_sub = 2*bit_vector - 1
bit_vector_doc1_sub = 2*bit_vector_doc1 - 1
bit_vector_doc2_sub = 2*bit_vector_doc2 - 1
dist1 = my_dot_prod(bit_vector_sub, bit_vector_doc1_sub) #tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc1, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc1, axis=1)
dist2 = my_dot_prod(bit_vector_sub, bit_vector_doc2_sub) #tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc2, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc2, axis=1)
signpart = tf.cast(doc1weak > doc2weak, tf.float32)
else:
dist1 = tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc1, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc1, axis=1)
dist2 = tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc2, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc2, axis=1)
signpart = tf.cast(doc1weak > doc2weak, tf.float32)
if use_dot:
rank_loss = hinge_loss_eps(labels=(signpart), logits=(dist1-dist2), epsval= hinge_val)#bits/4.0)
else:
equal_score = tf.cast( tf.abs(doc1weak - doc2weak) < 1e-10, tf.float32)
unequal_score = tf.cast( tf.abs(doc1weak - doc2weak) >= 1e-10, tf.float32)
rank_loss_uneq = hinge_loss_eps(labels=(signpart), logits=(dist2 - dist1), epsval=hinge_val, weights=unequal_score)#bits / 8.0)
eq_dist = tf.abs(dist2 - dist1)
rank_loss_eq = compute_weighted_loss( eq_dist, weights=equal_score, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS)
rank_loss = rank_loss_uneq + rank_loss_eq
if use_ranking:
e1 = tf.random.normal([batch_size, bits])
e2 = tf.random.normal([batch_size, bits])
noisy_bit_vector1 = tf.math.multiply(e1, sigma_annealing) + bit_vector_doc1
noisy_bit_vector2 = tf.math.multiply(e2, sigma_annealing) + bit_vector_doc2
dot_emb_vector1 = tf.linalg.matmul(noisy_bit_vector1,
tf.multiply(tf.transpose(embedding), importance_embedding)) + softmax_bias
dot_emb_vector2 = tf.linalg.matmul(noisy_bit_vector2,
tf.multiply(tf.transpose(embedding), importance_embedding)) + softmax_bias
softmaxed1 = tf.nn.softmax(dot_emb_vector1)
logaritmed1 = tf.math.log(tf.maximum(softmaxed1, 1e-10))
logaritmed1 = tf.multiply(logaritmed1, tf.cast(doc1 > 0, tf.float32))
softmaxed2 = tf.nn.softmax(dot_emb_vector2)
logaritmed2 = tf.math.log(tf.maximum(softmaxed2, 1e-10))
logaritmed2 = tf.multiply(logaritmed2, tf.cast(doc2 > 0, tf.float32))
loss_recon1 = tf.reduce_sum(tf.multiply(tf.reduce_sum(logaritmed1, 1), masking) / num_samples, axis=0)
loss_recon2 = tf.reduce_sum(tf.multiply(tf.reduce_sum(logaritmed2, 1), masking) / num_samples, axis=0)
doc_1_2_recon_loss = -(loss_recon1 + loss_recon2)
# VAE loss part
loss_recon = tf.reduce_sum( tf.multiply(tf.reduce_sum(logaritmed, 1), masking)/num_samples, axis=0)
recon_per_word = logaritmed #print("--------", logaritmed)
print("#################", importance_embedding)
loss_kl = tf.multiply(cont_vector, tf.math.log( tf.maximum(cont_vector/0.5, 1e-10) )) + \
tf.multiply(1 - cont_vector, tf.math.log( tf.maximum((1 - cont_vector)/0.5, 1e-10) ))
loss_kl = tf.reduce_sum( tf.multiply(tf.reduce_sum(loss_kl, 1), masking)/num_samples, axis=0)
loss_vae = -(loss_recon - kl_weight*loss_kl)
if use_ranking:
loss_rank_weighted = rank_weight * rank_loss
loss = loss_rank_weighted + loss_vae + doc_1_2_recon_loss # we want to maximize, but Adam only support minimize
loss_rank_unweighted = rank_loss - (loss_recon - kl_weight*loss_kl) + doc_1_2_recon_loss
else:
loss = loss_vae
rank_loss = loss*0
loss_rank_weighted = -1
loss_rank_unweighted = -1
dist1 = -1
dist2 = -1
signpart = -1
rank_loss_eq = rank_loss
rank_loss_uneq = rank_loss
tf.summary.scalar('loss_vae', loss_vae)
tf.summary.scalar('loss_kl', loss_kl)
tf.summary.scalar('loss_recon', loss_recon)
tf.summary.scalar('loss_rank_raw', rank_loss)
tf.summary.scalar('loss_rank_weighted', loss_rank_weighted)
tf.summary.scalar('loss_total', loss)
tf.summary.scalar("kl_weight", kl_weight)
tf.summary.scalar("rank_weight", rank_weight)
tf.summary.scalar("unweighted_loss", loss_rank_unweighted)
tf.summary.scalar('loss_rank_raw_eq', rank_loss_eq)
tf.summary.scalar('loss_rank_raw_uneq', rank_loss_uneq)
#tf.summary.scalar("learned sigma value", tf.reduce_sum(log_sigma2)/(num_samples*bits))
#tf.summary.scalar("learned sigma value (as used)", tf.reduce_sum(tf.sqrt(tf.exp(log_sigma2)))/(num_samples*bits))
tf.summary.scalar("sigma annealing value", sigma_annealing)
print("vae",loss_vae)
print("recon",loss_recon)
print("kl",loss_kl)
print("rank weighted",loss_rank_weighted)
print("rank loss", rank_loss)
print("total loss", loss)
print("kl weight", kl_weight)
# optimize
grad = optimizer.minimize(loss)
init = tf.global_variables_initializer()
# make input dictionary
def input_dict(docval, maska):
return {doc: docval, masking: maska}
merged = tf.summary.merge_all()
return init, grad, loss, input_dict, bit_vector, cont_vector, is_eval, dist1, dist2, signpart, \
rank_loss, merged, loss_rank_unweighted, importance_embedding, recon_per_word
|
[
"noreply@github.com"
] |
noreply@github.com
|
2fa80adb5365c206c46eb8da749aa6e4b59a116f
|
c6838a47be5b22f1202867e577890ab76973184b
|
/polyphony/compiler/verilog_common.py
|
36572a37cf5497b2df899d14577bbe3fe78bf903
|
[
"MIT"
] |
permissive
|
Chippiewill/polyphony
|
2d129fc26d716d5b989dff085120b7303ca39815
|
3f7a90779f082bbc13855d610b31d1058f11da9c
|
refs/heads/master
| 2021-09-13T11:48:56.471829
| 2017-08-31T01:47:15
| 2017-08-31T01:47:15
| 112,966,318
| 0
| 0
| null | 2017-12-03T21:44:12
| 2017-12-03T21:44:11
| null |
UTF-8
|
Python
| false
| false
| 1,457
|
py
|
PYTHON_OP_2_VERILOG_OP_MAP = {
'And': '&&', 'Or': '||',
'Add': '+', 'Sub': '-', 'Mult': '*', 'FloorDiv': '/', 'Mod': '%',
'LShift': '<<', 'RShift': '>>>',
'BitOr': '|', 'BitXor': '^', 'BitAnd': '&',
'Eq': '==', 'NotEq': '!=', 'Lt': '<', 'LtE': '<=', 'Gt': '>', 'GtE':'>=',
'IsNot': '!=',
'USub': '-', 'UAdd': '+', 'Not': '!', 'Invert':'~'
}
def pyop2verilogop(op):
return PYTHON_OP_2_VERILOG_OP_MAP[op]
_keywords = {
'always', 'assign', 'automatic',
'begin',
'case', 'casex', 'casez', 'cell', 'config',
'deassign', 'default', 'defparam', 'design', 'disable',
'edge', 'else', 'end', 'endcase', 'endconfig', 'endfunction', 'endgenerate',
'endmodule', 'endprimitive', 'endspecify', 'endtable', 'endtask', 'event',
'for', 'force', 'forever', 'fork', 'function',
'generate', 'genvar',
'if', 'ifnone', 'incdir', 'include', 'initial', 'inout', 'input', 'instance',
'join', 'liblist', 'library', 'localparam',
'macromodule', 'module',
'negedge', 'noshowcancelled',
'output',
'parameter', 'posedge', 'primitive', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'reg', 'release', 'repeat',
'scalared', 'showcancelled', 'signed', 'specparam', 'strength',
'table', 'task', 'tri', 'tri0', 'tri1', 'triand', 'trior', 'trireg',
'unsigned', 'use',
'vectored',
'wait', 'wand', 'while', 'wor', 'wire'
}
def is_verilog_keyword(word):
return word in _keywords
|
[
"kataoka@sinby.com"
] |
kataoka@sinby.com
|
040bb40356755d5212e78b84510e1694a8c54de4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03211/s451237439.py
|
f9efa41c7796ee9c183843e22d4cccb747349d8b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
s =input()
m = 100000
for i in range(len(s)-2):
a=(s[i:i+3])
m=min(abs(753-int(a)),m)
print(m)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a2fc15d839aa393c58038566f984b77acf7cd3c5
|
ef5706049bc847cab93cd43a8ff42a5b987b2fbf
|
/poem.py
|
f3648d6a5ff2a5cfc50a85c593de49dddf2725c2
|
[] |
no_license
|
justakaigood/banana
|
77ea89fc78b8557b444127312fbf95ce9671033f
|
a6994656fb5574f81ebbe977a1531ee96197f89f
|
refs/heads/master
| 2020-08-04T18:44:25.023640
| 2020-06-08T18:12:19
| 2020-06-08T18:12:19
| 212,241,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# coding: utf-8
from numpy.random import randint
for i in range(10):
print(randint(1,10))
words=""
dfj
aasf
afdefa
afefa
afdf
sgs
sgrfg
hjh
hbk
njbjb
|
[
"108701036@nccu.edu.tw"
] |
108701036@nccu.edu.tw
|
207bee7e203e906fc119bb7df61d83adcdec1d35
|
d49f28ea7867cf9ce9512c0521b136934e97b7d2
|
/tests/backends/base/test_client.py
|
4573bbe97bfb174d2998b800e8ce5e119a7d4da8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
tamirverthim/django
|
cdbc198a055deeb526caff6b18ae874445f217c5
|
666b7048a0dc6b067c1e3f58653f3c7ca00371a2
|
refs/heads/master
| 2023-04-14T00:51:11.507226
| 2020-12-07T12:19:20
| 2020-12-07T12:19:20
| 319,310,225
| 0
| 0
|
BSD-3-Clause
| 2023-04-03T23:53:00
| 2020-12-07T12:17:41
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
from django.db import connection
from django.db.backends.base.client import BaseDatabaseClient
from django.test import SimpleTestCase
class SimpleDatabaseClientTests(SimpleTestCase):
def setUp(self):
self.client = BaseDatabaseClient(connection=connection)
def test_settings_to_cmd_args_env(self):
msg = (
'subclasses of BaseDatabaseClient must provide a '
'settings_to_cmd_args_env() method or override a runshell().'
)
with self.assertRaisesMessage(NotImplementedError, msg):
self.client.settings_to_cmd_args_env(None, None)
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
6a0f5439c7a515742c41b2b89a24418d3e311706
|
4b47c77e3bd7ac31f230bcc46f8a08c70fd66893
|
/src/processing/test.py
|
b73d32aceb1db509bf4a9d1e650719bf56a990cd
|
[] |
no_license
|
DaniilRoman/predictiveAnalyticsForTimeSeries
|
1d48c7fde7c1c642e8304a9ee011ba49c81e1a8f
|
362e7d0f743416f3685a1e4ffc4382df6505d6f4
|
refs/heads/master
| 2020-04-15T02:58:29.427886
| 2019-05-26T17:47:35
| 2019-05-26T17:47:35
| 164,330,957
| 0
| 0
| null | 2019-11-01T13:54:21
| 2019-01-06T17:46:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
from multiprocessing import Pool, Process
import time
from src.processing.DataHolder import DataHolder
# def func(arg="check"):
# time.sleep(1)
# print(arg)
# if __name__ == '__main__':
# pass
# p = Process(target=func, args=("test",))
# p.start()
# p.join() # означает что мы присоединяем процесс p к мейн процессу и получается что мы выполняем эо последоватеьно то есть дожидаемся завершения работы процесса p
# dataHolder = DataHolder()
# p = Process(target=dataHolder.storeNewValue)
# p.start()
#
# import time
# from multiprocessing import Process, Value, Lock
#
# def func(val, lock):
# for i in range(50):
# time.sleep(0.01)
# with lock:
# val.value += 1
# print(val.value)
#
# if __name__ == '__main__':
# v = Value('i', 0)
# lock = Lock()
# v.value += 1
# procs = [Process(target=func, args=(v, lock)) for i in range(10)]
#
# for p in procs: p.start()
# # for p in procs: p.join()
#
# # print(v.value)
#
# from multiprocessing import Process, Pipe
# import time
#
# def reader_proc(pipe):
# p_output, p_input = pipe
# p_input.close()
# while p_output.poll():
# msg = p_output.recv()
# # print(msg)
#
# def writer(count, p_input):
# for ii in range(0, count):
# p_input.send(ii)
#
# if __name__=='__main__':
# for count in [10]:
# p_output, p_input = Pipe()
# reader_p = Process(target=reader_proc, args=((p_output, p_input),))
# reader_p.daemon = True
# reader_p.start()
# p_output.close()
#
# _start = time.time()
# writer(count, p_input) # Send a lot of stuff to reader_proc()
# p_input.close()
# reader_p.join()
# print("Sending {0} numbers to Pipe() took {1} seconds".format(count,
# (time.time() - _start)))
import matplotlib.pylab as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn import datasets
diabetes = datasets.load_diabetes()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(diabetes.data, diabetes.target, test_size=0.2, random_state=0)
model = LinearRegression()
# print(X_train)
# print(X_train.shape)
X_train = X_train[:, 0].reshape(-1, 1)
X_test = X_test[:, 0].reshape(-1, 1)
# print(X_train)
# print(X_train.shape)
# print(y_train)
# print(y_train.shape)
# 2. Use fit
model.fit(X_train, y_train)
# 3. Check the score
print(X_test)
print(X_test.shape)
predict = model.predict(X_test)
# print(predict)
# print(predict.shape)
|
[
"danroman17397@gmail.com"
] |
danroman17397@gmail.com
|
3985abdfdeb1870b995aaeaa8ca347149181b77c
|
0cf39bf6a4a5aee36c8229a2f527a77ea3cd3a3d
|
/notebooks/analysis.py
|
ef59efd2cb500d1ea589f25a85b4ceb931407462
|
[
"MIT"
] |
permissive
|
vipinsharma0586/Bank-deposit-predictive-model
|
78812a8b37ceeeec2dc8b4ca0b976ecf60363a54
|
1916267e71b58ca2d5082ca36da6dc3765c09931
|
refs/heads/master
| 2022-12-10T23:18:56.248394
| 2020-08-29T09:53:13
| 2020-08-29T09:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,537
|
py
|
# -*- coding: utf-8 -*-
"""Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1F77mSkc3dvmjKeeoW7o6CfEHZvJhH9uq
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import cnames
from pyod.models.knn import KNN
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import xgboost as xgb
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
# %matplotlib inline
path = "../data/raw/bank-additional-full.csv"
df = pd.read_csv(path, sep= ';')
df.drop("duration", axis=1)
df.rename(columns={'y':'deposit'}, inplace=True)
df.dtypes
# y column
# Binary Encoding
df['deposit'] = np.where(df.deposit == 'yes', 1, 0)
"""###### CLEANING OUTLIERS USING PYOD"""
import random
from matplotlib.colors import cnames
corr = df.corr()['deposit'].abs().sort_values(ascending=False)
h_corr_cols = corr[corr < 1].index.tolist()
colors = list(cnames.keys())
sns.set_style('darkgrid')
fig , ax = plt.subplots(4,3,figsize = (16,12))
ax = ax.ravel()
for i,col in enumerate(h_corr_cols):
sns.boxplot(df[col], ax = ax[i],color = random.choice(colors))
x = df[h_corr_cols].values
model = KNN(contamination=.1)
model.fit(x)
predicted = model.predict(x)
outliers = df.loc[(predicted == 1),:]
inliers = df.loc[(predicted == 0),:]
df = df.drop(index = df.loc[(predicted == 1),:].index )
"""###### Treating imbalance data"""
df.education.value_counts().to_frame()
df['education'].replace({'basic.9y': 'basic','basic.4y': 'basic','basic.6y':'basic'},inplace=True)
df['education'].value_counts().to_frame()
df.job.value_counts().to_frame()
df['job'].replace({'entrepreneur': 'self-employed', 'technician': 'blue-collar',
'admin.': 'white-collar', 'management': 'white-collar',
'services': 'pink-collar', 'housemaid': 'pink-collar'}, inplace=True)
df.job.value_counts().to_frame()
df.shape
# categorical columns
# OneHotEncoding
cat_cols = df.select_dtypes(include=[
'object']).columns
df = pd.get_dummies(df, columns=cat_cols)
#standard Scaler for Numerical Variables
scaler = StandardScaler()
num_cols = df.select_dtypes(include=['float64', 'int64']).columns
num_cols = num_cols.drop('deposit')
df[num_cols] = scaler.fit_transform(df[num_cols])
df.head(2)
df.shape
X = df.drop(columns=['duration', 'deposit'])
y = df['deposit']
print(X.shape)
print(y.shape)
y.value_counts().to_frame()
sampler = RandomOverSampler(random_state=42)
X_sampled, y_sampled = sampler.fit_resample(X, y)
pd.Series(y_sampled).value_counts().to_frame()
"""###### Dimensionality Reduction: Principal Component Analysis"""
from sklearn.decomposition import PCA
pca = PCA(n_components = 10)
pca.fit(X_sampled)
X = pca.transform(X_sampled)
print(X_sampled.shape)
print(y_sampled.shape)
print(X.shape)
df_y = pd.DataFrame(data = y_sampled, columns = ['deposit'])
df_X = pd.DataFrame(data = X, columns = ['PC_1', 'PC_2','PC_3', 'PC_4','PC_5','PC_6', 'PC_7','PC_8', 'PC_9','PC_10'])
df_X
df_y.to_csv('../data/processed/results.csv', index=False)
df_X.to_csv('../data/processed/features.csv', index=False)
|
[
"lotomej12@gmail.com"
] |
lotomej12@gmail.com
|
bd889e11569d36e3109b85c5a0a51fcde69bafc1
|
14a853584c0c1c703ffd8176889395e51c25f428
|
/sem1/csa/project-euler/1/1.py
|
2781f342cd824654222ed7b2a8bc9e4e36f07637
|
[] |
no_license
|
harababurel/homework
|
d0128f76adddbb29ac3d805c235cdedc9af0de71
|
16919f3b144de2d170cd6683d54b54bb95c82df9
|
refs/heads/master
| 2020-05-21T12:25:29.248857
| 2018-06-03T12:04:45
| 2018-06-03T12:04:45
| 43,573,199
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
print(sum([x for x in range(1, 100000001) if x % 3 == 0 or x % 5 == 0]))
|
[
"srg.pscs@gmail.com"
] |
srg.pscs@gmail.com
|
02fec4aed47becb9f92da777026cef2e2df0cec4
|
094639d004fd3c1342b53cd311e295fcc61c82ee
|
/inputBoxes.py
|
66ed12032a546c5668edeeaefb9d2761f737cf4c
|
[] |
no_license
|
IdanErgaz/Pytest
|
cd0d4420094c8f25cd9dc351d98a3ff7c667c0f7
|
a501a2431a1a9973ea7ebaf85a23041a04a1c1e5
|
refs/heads/main
| 2023-02-05T05:55:02.858991
| 2020-12-31T11:53:54
| 2020-12-31T11:53:54
| 319,232,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver=webdriver.Chrome(executable_path="C:\Projects\Automation\Drivers\chromedriver.exe")
driver.get("https://fs2.formsite.com/meherpavan/form2/index.html?1537702596407")
status1=driver.find_element_by_id("RESULT_TextField-1").is_displayed()
print("Field1 status is:" ,status1) #print status
status2=driver.find_element_by_id("RESULT_TextField-1").is_enabled()
print("Field1 enabled status:" ,status1) #print ENABLED status
driver.find_element_by_id("RESULT_TextField-1").send_keys("pavan")
driver.find_element_by_id("RESULT_TextField-2").send_keys("kumar")
driver.find_element_by_id("RESULT_TextField-3").send_keys("123456789")
driver.quit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
f0365d989dd7c876fa5c7fca77f76477b90906d6
|
44baa6621306c6b9810db48b3c1479cb8db294b3
|
/test/test_summaries.py
|
890a49aaf4ebb8b1bd8020b972c18679946c46be
|
[
"Apache-2.0"
] |
permissive
|
codeninja/tensorforce
|
ecc216e2970194d086209fb726fc64b4b9cd8e93
|
212b115d10a21b8241e1d9df56c4851ffd370f34
|
refs/heads/master
| 2020-08-13T08:16:11.046478
| 2019-10-18T17:36:03
| 2019-10-18T17:36:03
| 214,937,969
| 2
| 0
|
Apache-2.0
| 2019-10-18T17:36:04
| 2019-10-14T03:15:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import unittest
from test.unittest_base import UnittestBase
class TestSummaries(UnittestBase, unittest.TestCase):
exclude_bounded_action = True # TODO: shouldn't be necessary!
require_observe = True
directory = 'test-summaries'
def test_summaries(self):
# FEATURES.MD
self.start_tests()
# 'dropout', 'kl-divergence'
reward_estimation = dict(horizon=2, estimate_horizon='late')
baseline_policy = dict(network=dict(type='auto', size=8, internal_rnn=1))
baseline_objective = 'policy_gradient'
baseline_optimizer = 'adam'
self.unittest(
summarizer=dict(directory=self.__class__.directory, labels='all', frequency=2),
reward_estimation=reward_estimation, baseline_policy=baseline_policy,
baseline_objective=baseline_objective, baseline_optimizer=baseline_optimizer
)
for directory in os.listdir(path=self.__class__.directory):
directory = os.path.join(self.__class__.directory, directory)
for filename in os.listdir(path=directory):
os.remove(path=os.path.join(directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=directory)
os.rmdir(path=self.__class__.directory)
self.finished_test()
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
570d5e5d5fbd8600a45c78d01b6b02a8b09ce153
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/set_database_user_privilege_request.py
|
150b872cab2546ae4611dfa32d9ac8d91350c989
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,906
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SetDatabaseUserPrivilegeRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'x_language': 'str',
'body': 'SetDatabaseUserPrivilegeReqV3'
}
attribute_map = {
'instance_id': 'instance_id',
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, instance_id=None, x_language=None, body=None):
"""SetDatabaseUserPrivilegeRequest
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param x_language: 语言
:type x_language: str
:param body: Body of the SetDatabaseUserPrivilegeRequest
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._instance_id = None
self._x_language = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:return: The instance_id of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:param instance_id: The instance_id of this SetDatabaseUserPrivilegeRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def x_language(self):
"""Gets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:return: The x_language of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:param x_language: The x_language of this SetDatabaseUserPrivilegeRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this SetDatabaseUserPrivilegeRequest.
:return: The body of this SetDatabaseUserPrivilegeRequest.
:rtype: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SetDatabaseUserPrivilegeRequest.
:param body: The body of this SetDatabaseUserPrivilegeRequest.
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetDatabaseUserPrivilegeRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
172d2609dd65bb545dff186364df94b7fd883faf
|
4dca9fd1f26e7cb58cf6133c13acf585ca9fda66
|
/LanaBackend/asgi.py
|
4d8a67ca46cd9120225667ed6fa2e1de891dd7fe
|
[] |
no_license
|
Codes-Cleans-Transports/LanaBackend
|
a2215036bbefded7509b6655a855b63d82f21c4e
|
cdb0c6be53ca726ea483b64d02fe65002ec1e7df
|
refs/heads/main
| 2023-03-22T18:59:46.567104
| 2021-02-28T13:17:56
| 2021-02-28T13:17:56
| 342,724,184
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for LanaBackend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LanaBackend.settings')
application = get_asgi_application()
|
[
"rrhubenov@gmail.com"
] |
rrhubenov@gmail.com
|
1b2ba10d76817a94e1225ffb2400157c386d970e
|
b908c5116edc954342561663ee15f235562943a3
|
/eih-raspberrypi-body-detect/draft/auth.py
|
1364d2ef48081f9f616e881f286d8e1f32b8b113
|
[
"CC0-1.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
OAbouHajar/projectEIH
|
578465037f22aed6e13e5311629d7f52582cb501
|
2fcf072a03f8b0b86991abf26cfa9597db5560ff
|
refs/heads/master
| 2023-04-13T18:08:19.788985
| 2021-04-26T10:47:16
| 2021-04-26T10:47:16
| 214,017,815
| 1
| 0
|
MIT
| 2020-07-02T09:17:45
| 2019-10-09T20:31:30
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
from firebase import firebase
firebase = firebase.FirebaseApplication('https://projecteih.firebaseio.com', authentication=None)
result = firebase.get('/users', None, {'print': 'pretty'})
print (result)
|
[
"smsm.sy@hotmail.com"
] |
smsm.sy@hotmail.com
|
dcb914d3f2a8ae52a2f670667bd89a5fd0671f3c
|
e446918cc531f839706b63bf38269dd7b3c37432
|
/scrapy_distributed/settings.py
|
449546f2f8a2354986ec14124e31a1b88f073e28
|
[] |
no_license
|
leosudalv2010/scrapy-redis-distributed
|
5a99547cc7f35a016f4242ddf92e4a11aaa5d59a
|
e790d5144088f325d0d52d85781081b8e0fa2bf2
|
refs/heads/master
| 2020-03-21T02:35:53.784444
| 2018-06-20T08:48:15
| 2018-06-20T08:57:25
| 138,006,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scrapy_distributed project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapy_distributed'
SPIDER_MODULES = ['scrapy_distributed.spiders']
NEWSPIDER_MODULE = 'scrapy_distributed.spiders'
KEYWORDS = ['shirt']
MAXPAGE = 100
DOWNLOADER_MIDDLEWARES = {
'scrapy_distributed.middlewares.SeleniumMiddleware': 300
}
ITEM_PIPELINES = {
'scrapy_distributed.pipelines.MySQLPipeline': 300
}
FEED_EXPORT_ENCODING = 'utf8'
# Scrapy-Redis related settings
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
REDIS_URL = 'redis://:sd89fjmn12s5dsf5x@192.168.2.200:6379'
LOG_FILE = 'log'
LOG_LEVEL = 'INFO'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
|
[
"leosudalv2010@163.com"
] |
leosudalv2010@163.com
|
3009068a1939ffcb3f8fec5364f73b3488b41100
|
55c1bcc5958b825a8a4208eca766729bba4b9722
|
/samples/secrets-manager/secretLambda.py
|
d6be92d6e870e45510c1afb4a7ff4103c7c68cb7
|
[
"MIT"
] |
permissive
|
Derek-Ashmore/AWSDevOpsUtilities
|
93683a49dc7c7481508c1246e975647c8f66346b
|
fd4cc98449a19b747335ca0dd874b4631439ee13
|
refs/heads/master
| 2020-03-10T21:42:16.648772
| 2019-01-19T20:10:18
| 2019-01-19T20:10:18
| 129,600,347
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
"""
secretLambda.py
This lambda is a simple example of decrypting secrets using KMS to reduce exposure
for needed items like database passwords.
Environment Settings:
-- Secret_Name Secret name to use
Source Control: https://github.com/Derek-Ashmore/AWSDevOpsUtilities
"""
import sys
import json
import os
import boto3
import base64
def secretHandler(event, context):
try:
secretName = os.getenv('Secret_Name')
if secretName == None:
raise Exception('Secret_Name environment variable not set')
print( showSecret(secretName) )
except Exception as e:
e.args += (event,vars(context), secretName)
raise
return 0;
def showSecret(secretName):
secretsMgrClient = boto3.client('secretsmanager')
get_secret_value_response = secretsMgrClient.get_secret_value(
SecretId=secretName
)
return get_secret_value_response['SecretString']
|
[
"dashmore@force66.com"
] |
dashmore@force66.com
|
7c77aa89634056fa43a39b2384cd8d53186bca2f
|
f264800326fe36503ea115861b4ebe4ebf4f22ef
|
/499proj/matrixMain.py
|
21da14235b4a75873ffdcc24a3d3c029773ec13f
|
[] |
no_license
|
Jett-Ma/499-Final-Project
|
c3b6d7908c98ba48e845b2b4457cb3c59058ff52
|
378fc24e04f127060c7fd04ecd4e98411493ff47
|
refs/heads/main
| 2023-03-19T12:48:10.314639
| 2021-03-24T01:17:47
| 2021-03-24T01:17:47
| 350,908,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,311
|
py
|
# coding:utf-8
import sys
import numpy
import pandas as pd
from sqlalchemy import create_engine
import pymysql
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.tokenize import MWETokenizer
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction import DictVectorizer
import copy
def pre_processing(list):
# 导入用来删除停用词和stemming的包
porter_stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
sr = stopwords.words('english')
sr_append = ["rt", "http", "com", ]
# 用re正则表达式删除网页
results = re.compile(r'[http|https]*://[a-zA-Z0-9.?/&=:]*', re.S)
# 这里就是删除停用词,做lemmatize,做stem,做分词的地方,word_tokenize就是分词
sentences = list.lower()
grammar = "NP: {<DT>?<JJ>*<NN>|<NNP>*}"
cp = nltk.RegexpParser(grammar)
words = word_tokenize(sentences)
sentence = nltk.pos_tag(word_tokenize(sentences))
tree = cp.parse(sentence)
#print
#"\nNoun phrases:"
list_of_noun_phrases = extract_phrases(tree, 'NP')
for phrase in list_of_noun_phrases:
word = "_".join([x[0] for x in phrase.leaves()])
if word not in words:
words.append(word)
#print(words)
test_temp = []
for z in words:
# filter web link
z = re.sub(results, '', z)
# alphabet characters only
z = re.sub('[^A-Za-z0-9_]+', '', z)
z = lemmatizer.lemmatize(z)
# z = porter_stemmer.stem(z)
# filter stopwords
if z in sr:
continue
if z == '':
continue
if z in sr_append:
continue
test_temp.append(z)
# print("After pre-process : ")
# print(test_temp)
return test_temp
def extract_phrases(my_tree, phrase):
my_phrases = []
if my_tree.label() == phrase:
my_phrases.append(my_tree.copy(True))
for child in my_tree:
if type(child) is nltk.Tree:
list_of_phrases = extract_phrases(child, phrase)
if len(list_of_phrases) > 0:
my_phrases.extend(list_of_phrases)
return my_phrases
if __name__ == '__main__':
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
numpy.set_printoptions(threshold=sys.maxsize)
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',30)
con_engine = create_engine('mysql+pymysql://root:@localhost/499db2?charset=utf8')
# 数据库的设置
sql_ = 'select * from zctweets;'
df_data = pd.read_sql_query(sql_, con_engine)
del df_data['id']
del df_data['screen_name']
del df_data['source']
del df_data['in_reply_to_screen_name']
del df_data['in_reply_to_status_id_str']
del df_data['retweet_count']
del df_data['favorite_count']
# 删除无用的列
df_sort = df_data.sort_values('userid_str')
# 按照userid 进行排序,相当于自动分类了
user_list = df_sort['userid_str'].to_list() # 变成列表方便
time_list = df_sort['created_at'].to_list()
text_list = df_sort['text'].to_list()
time_list = [i.date() for i in time_list] # 这里就把所有的时间都变成日期了
# 初始化一些列表
user_result = []
time_result = []
text_result = []
aready = []
# 这里的目的就是按照每个id进行分类,把时间,文本填好
for i in range(len(user_list)) :
if i not in aready:
time_now = time_list[i]
aready.append(i)
user_result.append(user_list[i])
tem_time_list = [time_list[i]]
tem_text_list = [text_list[i]]
for j in range(len(user_list)):
if j not in aready:
time_tem = time_list[j]
if user_list[j] == user_list[i] and time_now == time_tem:
tem_time_list.append(time_list[j])
tem_text_list.append(text_list[j])
aready.append(j)
time_result.append(tem_time_list)
text_result.append(tem_text_list)
text_clean_list = copy.deepcopy(text_result)
for i in range(len(text_clean_list)):
for j in range(len(text_clean_list[i])):
text_clean_list[i][j] = pre_processing(text_clean_list[i][j])
print(text_clean_list[i][j])
df_tem_1 = pd.DataFrame({'user_id':user_result,
'time':time_result,
'text':text_result,
'perticiple':text_clean_list})
# 设置sparse=False获得numpy ndarray形式的结果
v = DictVectorizer(sparse=False)
word_pre = []
all_word = []
# 对text进行处理
# 把同一个user的text放在一起
for i in range(len(text_clean_list)):
for j in range(len(text_clean_list[i])):
for z in text_clean_list[i][j]:
all_word.append(z)
# print(all_word)
# 对每个单词进行去重
all_word = set(all_word)
tem_dict = {}
# 用字典的形式存储好单词,word做key,频率做value
for i in all_word:
tem_dict[i] = 0
#
for i in range(len(text_clean_list)):
# 这里要做深拷贝,防止改变原始数据
tem_dict_i = copy.deepcopy(tem_dict)
for j in range(len(text_clean_list[i])):
for z in text_clean_list[i][j]:
tem_dict_i[z] = text_clean_list[i][j].count(z)
word_pre.append(tem_dict_i)
# print(word_pre)
# print(len(word_pre))
df_tem_1['word_pre'] = word_pre
# 去重
user_id_set = set(df_tem_1['user_id'].to_list())
text_list_2 = []
word_freq = []
# list of key
first_pre = list(df_tem_1['word_pre'][0].keys())
# 处理df_tem_2
# 主要做的是把同一个user的所有时间的都放在一起
for user in user_id_set:
# user_id column in dataframe
tem_df = df_tem_1[df_tem_1['user_id']==user]
tem_text = ''
tem_word_freq = {}
for key in first_pre:
# set each value to 0
tem_word_freq[key] = 0
# get all word
for text in tem_df['text']:
for j in text:
tem_text += j
for i in tem_df['word_pre']:
for j in first_pre:
tem_word_freq[j] += i[j]
text_list_2.append(tem_text)
word_freq.append(tem_word_freq)
df_tem_2 = pd.DataFrame({'user_id':list(user_id_set),
'text':text_list_2,
'word_freq':word_freq
})
# df_tem_2.to_csv('4.csv')
# df_tem_1.to_csv('3.csv')
time_orin_list = df_tem_1['time'].to_list()
for j in range(len(time_orin_list)):
time_orin_list[j] = time_orin_list[j][0]
df_tem_1['time'] = time_orin_list
del df_tem_1['text']
del df_tem_1['perticiple']
df_tem_1.to_csv('./doc/6_l.csv')
del df_tem_2['text']
df_tem_2.to_csv('./doc/7_l.csv')
|
[
"68134569+Jett-Ma@users.noreply.github.com"
] |
68134569+Jett-Ma@users.noreply.github.com
|
42d1cac53d347072386f233fdab7116d0e8200e9
|
939ca9c100b2b8d7d4c2825a9ef16dd4d7267455
|
/pageimages/templatetags/pageimage_tags.py
|
68f9c371b5917631968437e3f38c07a41abc7264
|
[
"Apache-2.0"
] |
permissive
|
ethoos/mezzanine-pageimages
|
523671cbe5fc9d0a04d697ad86ecaa1d859167f0
|
b529b4acc204aa26734e3c9f9ffb5c68fae58cf1
|
refs/heads/master
| 2021-01-12T04:46:47.578968
| 2017-01-02T10:48:46
| 2017-01-02T10:48:46
| 77,791,604
| 0
| 1
| null | 2017-01-02T10:48:47
| 2017-01-01T18:40:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
#
# Copyright 2013, 2014
# by Arnold Krille for bcs kommunikationsloesungen
# <a.krille@b-c-s.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django import template
from ..models import DefaultImage
from mezzanine.pages.models import Page
register = template.Library()
def get_default_image(type):
defaultimgs = DefaultImage.objects.filter(type=type)
if len(defaultimgs):
return defaultimgs[0].file.url
return u''
def get_image_for_page(page, type):
imgs = page.pageimage_set.filter(type=type)
if len(imgs):
return imgs[0].file.url
if page.parent:
return get_image_for_page(page.parent, type)
return get_default_image(type)
@register.simple_tag(takes_context=True)
def pageimage(context, type, page=None):
if isinstance(page, str):
page = Page.objects.get(titles=page)
if not page and 'page' in context:
page = context['page']
if page:
return get_image_for_page(page, type)
return get_default_image(type)
|
[
"a.krille@b-c-s.de"
] |
a.krille@b-c-s.de
|
b2329edad1a265e6327257d7b599655e15dc6cfd
|
3015b07ab56da859507abc3881385f4995980600
|
/fisher/spider/fisher_book.py
|
ca3eb35af32edf97a292eac9bc822d59581a2186
|
[] |
no_license
|
xuewen1696/fisher-book-practice
|
f65c559651f5a51d08cfdcb96a4fc8f96f481238
|
93ce16de333381196aaa2de4811559d5c27d7e0c
|
refs/heads/master
| 2022-12-10T06:31:25.912229
| 2018-07-28T07:26:32
| 2018-07-28T07:26:32
| 142,654,555
| 0
| 1
| null | 2022-12-08T02:22:41
| 2018-07-28T06:53:34
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
from fisher.libs.http_fisher import HTTP
from flask import current_app
class FisherBook:
pre_page = 15
isbn_url = 'http://t.yushu.im/v2/book/isbn/{}'
keyword_url = 'http://t.yushu.im/v2/book/search?q={}&start={}&count={}'
def __init__(self):
self.total = 0
self.books = []
def search_by_isbn(self, isbn):
url = self.isbn_url.format(isbn)
##self.isbn_url 也可以取到isbn_url---链式查找
result = HTTP.get(url)
self.__fill_single(result)
def search_by_keyword(self, keyword, page=1):
# url = cls.keyword_url.format(keyword, current_app.config['PRE_PAGE'], cls.calculate_start(page))
url = self.keyword_url.format(keyword, self.calculate_start(page), current_app.config['PRE_PAGE'])
result = HTTP.get(url)
self.__fill_collection(result)
@staticmethod
def calculate_start(page):
return (page - 1)*current_app.config['PRE_PAGE']
def __fill_single(self, data):
if data:
self.total = 1
self.books.append(data)
def __fill_collection(self, data):
self.total = data['total']
self.books = data['books']
@property
def first(self):
return self.books[0] if self.total >= 1 else None
|
[
"xuewen1696@163.com"
] |
xuewen1696@163.com
|
760d04f4f37ec49446c5810324797d3ef73de59c
|
c947a71a16ed180c920d4b362347f980d93bd2fe
|
/src/Classes/MSDS400/Module 3/workout.py
|
c7f40dafdf59f5c1f52238d5010dc1fa5ddcbc10
|
[
"MIT"
] |
permissive
|
bmoretz/Python-Playground
|
b69cac015e95d97f46ebd678c4493a44befb556f
|
a367ec7659b85c24363c21b5c0ac25db08ffa1f6
|
refs/heads/master
| 2021-05-13T23:35:31.986884
| 2019-11-23T19:07:58
| 2019-11-23T19:07:58
| 116,520,816
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
# As part of a weight reduction program, a man designs a monthly exercise program consisting of bicycling, jogging, and swimming.
# He would like to
# exercise at most 28 hours,
# devote at most 6 hours to swimming,
# and jog for no more than the total number of hours bicycling and swimming.
# The calories burned by this person per hour by bicycling, jogging, and swimming are 200, 427, and 283, respectively.
# How many hours should be allotted to each activity to maximize the number of calories burned? What is the maximum number of calories he will burn?
# (Hint: Write the constraint involving jogging in the form less than or equals 0.)
# Let x 1 be the number of hours spent bicycling,
# let x 2 be the number of hours spent jogging,
# and let x 3 be the number of hours spent swimming.
#
# What is the objective function?
from pulp import *
workout = LpProblem( "Workout Problem", LpMaximize )
x1 = LpVariable( "x1", 0 ) # Bicycling
x2 = LpVariable( "x2", 0 ) # Jogging
x3 = LpVariable( "x3", 0 ) # Swimming
w = LpVariable( "w" )
workout += 200*x1 + 427*x2 + 283*x3
# Constraints
workout += x1 + x2 + x3 <= 28 # no more than total hours
workout += x3 <= 6 # at most hours swimming
workout += x2 <= x1 + x3 # jog no more than Bicycling + Swimming
workout.solve()
workout.LpStatus[ workout.status ]
for variable in workout.variables():
print("{0} = {1}".format( variable.name, variable.varValue ))
print( 'Optimal Sln: {0}'.format(pulp.value( workout.objective )))
|
[
"bmoretz@ionicsolutions.net"
] |
bmoretz@ionicsolutions.net
|
b0dcde257cf60b3ff95c8d677121bbedec3ea846
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/cytoolz-0.7.5-py27_0/lib/python2.7/site-packages/cytoolz/tests/test_none_safe.py
|
62f6280f931530d908a7249f648b54df00f1d677
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 11,403
|
py
|
""" Test that functions are reasonably behaved with None as input.
Typed Cython objects (like dict) may also be None. Using functions from
Python's C API that expect a specific type but receive None instead can cause
problems such as throwing an uncatchable SystemError (and some systems may
segfault instead). We obviously don't what that to happen! As the tests
below discovered, this turned out to be a rare occurence. The only changes
required were to use `d.copy()` instead of `PyDict_Copy(d)`, and to always
return Python objects from functions instead of int or bint (so exceptions
can propagate).
The vast majority of functions throw TypeError. The vast majority of
functions also behave the same in `toolz` and `cytoolz`. However, there
are a few minor exceptions. Since passing None to functions are edge cases
that don't have well-established behavior yet (other than raising TypeError),
the tests in this file serve to verify that the behavior is at least
reasonably well-behaved and don't cause SystemErrors.
"""
# XXX: This file could be back-ported to `toolz` once unified testing exists.
import cytoolz
from cytoolz import *
from cytoolz.utils import raises
from operator import add
class GenException(object):
def __init__(self, exc):
self.exc = exc
def __iter__(self):
return self
def __next__(self):
raise self.exc
def next(self):
raise self.exc
def test_dicttoolz():
tested = []
assert raises((TypeError, AttributeError), lambda: assoc(None, 1, 2))
tested.append('assoc')
assert raises((TypeError, AttributeError), lambda: dissoc(None, 1))
tested.append('dissoc')
# XXX
assert (raises(TypeError, lambda: get_in(None, {})) or
get_in(None, {}) is None)
assert raises(TypeError, lambda: get_in(None, {}, no_default=True))
assert get_in([0, 1], None) is None
assert raises(TypeError, lambda: get_in([0, 1], None, no_default=True))
tested.append('get_in')
assert raises(TypeError, lambda: keyfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: keyfilter(identity, None))
tested.append('keyfilter')
# XXX
assert (raises(TypeError, lambda: keymap(None, {1: 2})) or
keymap(None, {1: 2}) == {(1,): 2})
assert raises((AttributeError, TypeError), lambda: keymap(identity, None))
tested.append('keymap')
assert raises(TypeError, lambda: merge(None))
assert raises((TypeError, AttributeError), lambda: merge(None, None))
tested.append('merge')
assert raises(TypeError, lambda: merge_with(None, {1: 2}, {3: 4}))
assert raises(TypeError, lambda: merge_with(identity, None))
assert raises((TypeError, AttributeError),
lambda: merge_with(identity, None, None))
tested.append('merge_with')
assert raises(TypeError, lambda: update_in({1: {2: 3}}, [1, 2], None))
assert raises(TypeError, lambda: update_in({1: {2: 3}}, None, identity))
assert raises((TypeError, AttributeError),
lambda: update_in(None, [1, 2], identity))
tested.append('update_in')
assert raises(TypeError, lambda: valfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: valfilter(identity, None))
tested.append('valfilter')
# XXX
assert (raises(TypeError, lambda: valmap(None, {1: 2})) or
valmap(None, {1: 2}) == {1: (2,)})
assert raises((AttributeError, TypeError), lambda: valmap(identity, None))
tested.append('valmap')
assert (raises(TypeError, lambda: itemmap(None, {1: 2})) or
itemmap(None, {1: 2}) == {1: (2,)})
assert raises((AttributeError, TypeError), lambda: itemmap(identity, None))
tested.append('itemmap')
assert raises(TypeError, lambda: itemfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: itemfilter(identity, None))
tested.append('itemfilter')
s1 = set(tested)
s2 = set(cytoolz.dicttoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_functoolz():
tested = []
assert raises(TypeError, lambda: complement(None)())
tested.append('complement')
assert compose(None) is None
assert raises(TypeError, lambda: compose(None, None)())
tested.append('compose')
assert raises(TypeError, lambda: curry(None))
tested.append('curry')
assert raises(TypeError, lambda: do(None, 1))
tested.append('do')
assert identity(None) is None
tested.append('identity')
assert raises(TypeError, lambda: juxt(None))
assert raises(TypeError, lambda: list(juxt(None, None)()))
tested.append('juxt')
assert memoize(identity, key=None)(1) == 1
assert memoize(identity, cache=None)(1) == 1
tested.append('memoize')
assert raises(TypeError, lambda: pipe(1, None))
tested.append('pipe')
assert thread_first(1, None) is None
tested.append('thread_first')
assert thread_last(1, None) is None
tested.append('thread_last')
assert flip(lambda a, b: (a, b))(None)(None) == (None, None)
tested.append('flip')
s1 = set(tested)
s2 = set(cytoolz.functoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_itertoolz():
tested = []
assert raises(TypeError, lambda: list(accumulate(None, [1, 2])))
assert raises(TypeError, lambda: list(accumulate(identity, None)))
tested.append('accumulate')
assert raises(TypeError, lambda: concat(None))
assert raises(TypeError, lambda: list(concat([None])))
tested.append('concat')
assert raises(TypeError, lambda: list(concatv(None)))
tested.append('concatv')
assert raises(TypeError, lambda: list(cons(1, None)))
tested.append('cons')
assert raises(TypeError, lambda: count(None))
tested.append('count')
# XXX
assert (raises(TypeError, lambda: list(drop(None, [1, 2]))) or
list(drop(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(drop(1, None)))
tested.append('drop')
assert raises(TypeError, lambda: first(None))
tested.append('first')
assert raises(TypeError, lambda: frequencies(None))
tested.append('frequencies')
assert raises(TypeError, lambda: get(1, None))
assert raises(TypeError, lambda: get([1, 2], None))
tested.append('get')
assert raises(TypeError, lambda: groupby(None, [1, 2]))
assert raises(TypeError, lambda: groupby(identity, None))
tested.append('groupby')
assert raises(TypeError, lambda: list(interleave(None)))
assert raises(TypeError, lambda: list(interleave([None, None])))
assert raises(TypeError,
lambda: list(interleave([[1, 2], GenException(ValueError)],
pass_exceptions=None)))
tested.append('interleave')
assert raises(TypeError, lambda: list(interpose(1, None)))
tested.append('interpose')
assert raises(TypeError, lambda: isdistinct(None))
tested.append('isdistinct')
assert isiterable(None) is False
tested.append('isiterable')
assert raises(TypeError, lambda: list(iterate(None, 1)))
tested.append('iterate')
assert raises(TypeError, lambda: last(None))
tested.append('last')
# XXX
assert (raises(TypeError, lambda: list(mapcat(None, [[1], [2]]))) or
list(mapcat(None, [[1], [2]])) == [[1], [2]])
assert raises(TypeError, lambda: list(mapcat(identity, [None, [2]])))
assert raises(TypeError, lambda: list(mapcat(identity, None)))
tested.append('mapcat')
assert raises(TypeError, lambda: list(merge_sorted(None, [1, 2])))
tested.append('merge_sorted')
assert raises(TypeError, lambda: nth(None, [1, 2]))
assert raises(TypeError, lambda: nth(0, None))
tested.append('nth')
assert raises(TypeError, lambda: partition(None, [1, 2, 3]))
assert raises(TypeError, lambda: partition(1, None))
tested.append('partition')
assert raises(TypeError, lambda: list(partition_all(None, [1, 2, 3])))
assert raises(TypeError, lambda: list(partition_all(1, None)))
tested.append('partition_all')
assert raises(TypeError, lambda: list(pluck(None, [[1], [2]])))
assert raises(TypeError, lambda: list(pluck(0, [None, [2]])))
assert raises(TypeError, lambda: list(pluck(0, None)))
tested.append('pluck')
assert raises(TypeError, lambda: reduceby(None, add, [1, 2, 3], 0))
assert raises(TypeError, lambda: reduceby(identity, None, [1, 2, 3], 0))
assert raises(TypeError, lambda: reduceby(identity, add, None, 0))
tested.append('reduceby')
assert raises(TypeError, lambda: list(remove(None, [1, 2])))
assert raises(TypeError, lambda: list(remove(identity, None)))
tested.append('remove')
assert raises(TypeError, lambda: second(None))
tested.append('second')
# XXX
assert (raises(TypeError, lambda: list(sliding_window(None, [1, 2, 3]))) or
list(sliding_window(None, [1, 2, 3])) == [])
assert raises(TypeError, lambda: list(sliding_window(1, None)))
tested.append('sliding_window')
# XXX
assert (raises(TypeError, lambda: list(take(None, [1, 2])) == [1, 2]) or
list(take(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(take(1, None)))
tested.append('take')
# XXX
assert (raises(TypeError, lambda: list(tail(None, [1, 2])) == [1, 2]) or
list(tail(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(tail(1, None)))
tested.append('tail')
# XXX
assert (raises(TypeError, lambda: list(take_nth(None, [1, 2]))) or
list(take_nth(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(take_nth(1, None)))
tested.append('take_nth')
assert raises(TypeError, lambda: list(unique(None)))
assert raises(TypeError, lambda: list(unique([1, 1, 2], key=None)))
tested.append('unique')
assert raises(TypeError, lambda: join(first, None, second, (1, 2, 3)))
assert raises(TypeError, lambda: join(first, (1, 2, 3), second, None))
tested.append('join')
assert raises(TypeError, lambda: topk(None, [1, 2, 3]))
assert raises(TypeError, lambda: topk(3, None))
tested.append('topk')
assert raises(TypeError, lambda: list(diff(None, [1, 2, 3])))
assert raises(TypeError, lambda: list(diff(None)))
assert raises(TypeError, lambda: list(diff([None])))
assert raises(TypeError, lambda: list(diff([None, None])))
tested.append('diff')
assert raises(TypeError, lambda: peek(None))
tested.append('peek')
s1 = set(tested)
s2 = set(cytoolz.itertoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_recipes():
tested = []
# XXX
assert (raises(TypeError, lambda: countby(None, [1, 2])) or
countby(None, [1, 2]) == {(1,): 1, (2,): 1})
assert raises(TypeError, lambda: countby(identity, None))
tested.append('countby')
# XXX
assert (raises(TypeError, lambda: list(partitionby(None, [1, 2]))) or
list(partitionby(None, [1, 2])) == [(1,), (2,)])
assert raises(TypeError, lambda: list(partitionby(identity, None)))
tested.append('partitionby')
s1 = set(tested)
s2 = set(cytoolz.recipes.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
8cfd3c66b9a03394e87c6cbbac0e72ae02d96b6b
|
77ae7c76d36009daa01b2317439c1f975f7932b2
|
/exercicios/ex115/arquivo.py
|
dbcbd133583ca6ae2edba87857cfb65ef4e83003
|
[] |
no_license
|
MatheusOldAccount/Exerc-cios-de-Python-do-Curso-em-Video
|
5f26b5a2867fa1a2e36b486a809dfbe8b107b8c2
|
5696c49d3caf5cae817217a2da0598d1cf794f5b
|
refs/heads/master
| 2022-03-22T10:49:33.666660
| 2019-11-25T21:24:43
| 2019-11-25T21:24:43
| 224,052,682
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
def verPessoas():
print('-' * 30)
arq = open('lista.txt', 'r')
print(arq.read())
arq.close()
def adicionarPessoas():
print('-' * 30)
arq = open('lista.txt', 'a')
nome = str(input('Nome: ')).strip().capitalize()
válido = False
while True:
try:
idade = int(input('Idade: '))
except:
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
else:
print(f'Novo registro de {nome} adicionado')
arq.write(f'\n{nome:<30}{idade} anos')
válido = True
if válido:
break
arq.close()
|
[
"matheustavares1165@gmail.com"
] |
matheustavares1165@gmail.com
|
fc420d11845612290556385f4ef93a72a5b9d5d1
|
454365e5c77ff9e3c2fba0d60766e2ee0dac1ac6
|
/noticias_ner/api/teste_clliente.py
|
95d706990bf32127292eb16f18502afc54f7b6b2
|
[] |
no_license
|
SecexSaudeTCU/noticias_ner
|
6aa48f9b076cb20a784244cef58ac270a53471c5
|
2d64041bc18c8c53d463d34e41553b5c2ad4f48e
|
refs/heads/master
| 2023-05-04T00:30:53.364722
| 2021-05-25T19:28:31
| 2021-05-25T19:28:31
| 296,355,526
| 14
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import requests
headers = {
'accept': 'application/json',
'Content-Type': 'text/plain',
}
params = (
('tipos', 'ORGANIZAÇÃO,INSTITUIÇÃO PÚBLICA,LOCAL,PESSOA'),
('buscar-cnpj', 'N'),
)
texto = 'O Tribunal de Contas da União é um órgão público sediado em Brasília, com atribuição de julgamento de contas de' \
' gestores que utilizam recursos públicos. Também aprecia as contas do Presidente da República. A empresa ' \
'SKY LINE teve suas contas julgadas irregulares por má gestão de recurso público.'
r = requests.post('http://localhost:5000/ner/entidades-texto', headers=headers, params=params,
data=texto.encode(encoding='utf-8'))
r.json()
|
[
"moniquelouise@gmail.com"
] |
moniquelouise@gmail.com
|
075d28bc2668d9a7c0e17558b267dfb9c839c594
|
2fe37b71c486d4e2de6fb263d89993c9b99f0d37
|
/02Backstage/Python/00Test/Utils/AutoUtils/Scp.py
|
dfb2a211b4ea76d7726b73a04bd840003be28233
|
[] |
no_license
|
smart-town/MyNotes
|
87cb058753163ab7df41f73389af4e31f8288a52
|
87a0e689c1dcf9a79ef1059e6332c1a89a309ecc
|
refs/heads/master
| 2022-07-19T19:48:15.055165
| 2022-07-17T00:15:09
| 2022-07-17T00:15:09
| 160,528,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
""" copy source files to destination """
import os
import OsOrders
class CopyBasic(object):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def dealRemote(data):
if type(data) == dict:
return "{}@{}".format(data["user"], data["host"])
else:
return data
def doCopy(self):
pass
class ScpCopy(CopyBasic):
def __init__(self, source, destination, remote=""):
super(source, destination)
self.remote = remote
def doCopy(self):
order = OsOrders.doScp(self.remote, self.source, self.destination)
print("do scp: %s" % (order))
class LocalCopy(CopyBasic):
def doCopy(self):
print("DO LOCAL COPY %s -> %s" % (self.source, self.destination))
def execute(self):
self.doCopy()
if __name__ == "__main__":
LocalCopy("C:", "D:").doCopy()
ScpCopy("C:/Users/luhha/Desktop", "C:/Users/luhha/Desktop/test", "root@127.0.0.1").doCopy()
|
[
"luhh18@outlook.com"
] |
luhh18@outlook.com
|
4fa582c5ada4c2d880f47491a4e012018bc74dbb
|
8c38028da7a6c9443c3b9163a1db64773e39e755
|
/users.py
|
2680f52e307d0118cb8595d737a3149390923672
|
[] |
no_license
|
Vanhatai/PWS-B4.12
|
bf8e8f230c7f547256e7f8be4628dd3342769ae3
|
a1061d7989e7c18bd9d6b84238a99a506cb91345
|
refs/heads/master
| 2020-07-06T18:54:06.837311
| 2019-08-19T06:16:24
| 2019-08-19T06:16:24
| 203,109,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,987
|
py
|
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# константа, указываюзая на способ соединения с базой
DB_PATH = "sqlite:///sochi_athletes.sqlite3"
# базовый класс моделей таблиц
Base = declarative_base()
class User(Base):
"""
Описывает структуру таблицы user для хранения регистрационных данных пользователей
"""
# задаем название таблицы
__tablename__ = "user"
# идентификатор пользователя, первичный ключ
id = sa.Column(sa.Integer, primary_key=True)
first_name = sa.Column(sa.Text)
last_name = sa.Column(sa.Text)
email = sa.Column(sa.Text)
gender = sa.Column(sa.Text)
birthdate = sa.Column(sa.Text)
height = sa.Column(sa.Float)
def connect_db():
"""
Устанавливает соединение с базой данных, создает таблицы, если их еще нет и возвращает обьект сессии
"""
# создаем соединение с базой данных
engine = sa.create_engine(DB_PATH)
# создаем описанные таблицы
Base.metadata.create_all(engine)
# создаем фабрику сессий
session = sessionmaker(engine)
# возвращаем сессию
return session()
def request_data():
"""
Запрашивает у пользователя данные и добавляет их в список users
"""
# запрашиваем данные
first_name = input("Введите имя: ")
last_name = input("Введите фамилию: ")
email = input("Адрес электронной почты: ")
gender = input("Пол (Male/Female): ")
birthdate = input("Дата рождения (YYYY-MM-DD): ")
height = float(input("Рост (м): "))
# создаем нового пользователя
user = User(
first_name=first_name,
last_name=last_name,
email=email,
gender=gender,
birthdate=birthdate,
height=height,
)
# возвращаем созданного пользователя
return user
def main():
"""
Осуществляет взаимодействие с пользователем и обрабатывает пользовательский ввод
"""
session = connect_db()
# запрашиваем данные пользователя
user = request_data()
# добавляем нового пользователя в сессию
session.add(user)
session.commit()
print("Спасибо, данные сохранены!")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d7c32cef7fa6a9d3bb7a7a05a8ea899c77750ba8
|
90207cc0222440c069b261795bba1e902834f545
|
/MAGIC-UNICORNS/project/main/apps/course/models.py
|
bf35453ec0a8d046ddcb4ece77d2ef64c1138387
|
[] |
no_license
|
Stormlight-Coding/random-projects
|
13d1cc3d32cb86399296b923ab450034891979c0
|
605e196337dfa9bd5480b428a5a92ce193081871
|
refs/heads/MASTER
| 2021-06-14T09:44:03.150283
| 2020-04-09T22:25:35
| 2020-04-09T22:25:35
| 254,489,696
| 0
| 0
| null | 2021-06-10T22:45:08
| 2020-04-09T22:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
from __future__ import unicode_literals
from django.db import models
class Course(models.Model):
name = models.CharField(max_length=255)
desc = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
|
[
"jonposo.music@gmail.com"
] |
jonposo.music@gmail.com
|
a1900950b36a1a0eeada9e202f153c8985039b65
|
e342abb1306e4b083f235a2992ffb863c96c9a86
|
/examples/user/user_playlists.py
|
f71f755bceeeb2c38e3122cc3e6f50cb403624cb
|
[
"MIT"
] |
permissive
|
LorenzoCavatorta/spotify.py
|
102422e6588cb6c49cff026562e37f28cb0650eb
|
7f375f030fbac4ef3dbbd577a898b4d72f37b72b
|
refs/heads/master
| 2020-08-01T17:09:06.795264
| 2019-09-30T12:24:57
| 2019-09-30T12:24:57
| 211,055,943
| 0
| 0
|
MIT
| 2019-09-26T09:50:46
| 2019-09-26T09:50:46
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import asyncio
import spotify
client = spotify.Client('someid', 'somesecret')
async def main():
# You can use a user with a http presence
user = await client.user_from_token('sometoken')
# Or you can get a generic user
user = await client.get_user(user_id)
# returns a list of spotify.Playlist objects
playlists = await user.get_playlists()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
|
[
"m3nta1@yahoo.com"
] |
m3nta1@yahoo.com
|
26a21150eb40414f26615bc32ee8f6ff76b8e9bc
|
102f8a77f7e16d5df7775f97e741adf3fa43f8c3
|
/practice-painting/painting.py
|
20add441638a39bfc6b33f077ddf5dc29c69d390
|
[] |
no_license
|
Horgix/google-hashcode-2016
|
f516d489d7b9a9af570e1ebfc6a7f7dea9ec7eb5
|
6b256ce9e9f23276fa1c03ae6979f77befb7e6e5
|
refs/heads/master
| 2021-01-20T11:23:24.646360
| 2016-02-19T11:07:30
| 2016-02-19T11:07:30
| 51,439,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
#! /usr/bin/env python3
from enum import Enum
class Cell(Enum):
painted = '#'
clear = '.'
class Surface:
def __init__(self):
self.rows = 0
self.columns = 0
self.matrix = {}
def import_from_file(self, filename):
with open('simple.in', 'r') as f:
self.rows, self.columns = tuple(f.readline().split())
self.rows = int(self.rows)
self.columns = int(self.columns)
for lineNb, line in enumerate(f.readlines()):
if lineNb >= self.rows:
raise Exception("Line number out of bounds")
line = line.rstrip('\n')
self.matrix[lineNb] = {}
for columnNb, cell in enumerate(line):
if columnNb >= self.columns:
raise Exception("Column number out of bounds")
self.matrix[lineNb][columnNb] = Cell(cell)
def __str__(self):
out = ""
for i in range(self.rows):
for j in range(self.columns):
out += self.matrix[i][j].value
out += '\n'
return out
s = Surface()
s.import_from_file('simple.in')
print(s)
|
[
"alexis.horgix.chotard@gmail.com"
] |
alexis.horgix.chotard@gmail.com
|
b01a5b2a81825618861d0c4319567fa150a4eb6b
|
c7b958c683f916b924e3f4e74e41561e037ef34c
|
/sneeu/apps/tumble/urls.py
|
51565e3a303ad7295f4fdc58dab634b8b11bf0bf
|
[] |
no_license
|
sneeu/sneeu_com
|
c32f6d044a598830d53e6334611f2b5a2c8b4c2f
|
653388c50f00966369fd5a1a43bd4ff910300633
|
refs/heads/master
| 2021-01-25T04:08:53.556659
| 2009-04-26T19:45:06
| 2009-04-26T19:45:06
| 56,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from django.conf.urls.defaults import *
from models import Log
import views
info_dict = {
'queryset': Log.objects.all().select_related(),
'paginate_by': 20,
}
urlpatterns = patterns('',
url(r'^$',
'django.views.generic.list_detail.object_list', info_dict, name='log_list'),
url(r'^tumble/update/$',
views.update, name='update'),
# url(r'^tumble/(?P<year>\d{4})/(?P<month>1[012]?|[2-9])/(?P<slug>[^/]+)/$',
# views.post_detail, name='post_detail'),
# url(r'^tumble/(?P<year>\d{4})/(?P<month>1[012]?|[2-9])/(?P<slug>[^/]+)/add-comment/$',
# views.add_comment, name='add_comment'),
# url(r'^tumble/(?P<url>[a-z]+)/$', 'django.contrib.syndication.views.feed',
# {'feed_dict': feeds.FEEDS}),
)
|
[
"john@sneeu.com"
] |
john@sneeu.com
|
b0e487b584903313154d9dd72e6c085f2b3b95d9
|
4664328482163fd927603d66f47209b28471cf0f
|
/venv/lib/python3.7/site-packages/datalad/metadata/extractors/tests/test_datacite_xml.py
|
30ed2525d0915a74e0f941dc65be94d72cbe0d4c
|
[
"MIT"
] |
permissive
|
emmetaobrien/dats-validator
|
08706ddab795d272391b3611cd3ba0de8c4a91a1
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
refs/heads/master
| 2020-12-19T05:03:17.179117
| 2020-01-22T17:28:38
| 2020-01-22T17:28:38
| 235,626,049
| 0
| 0
|
MIT
| 2020-01-22T17:24:56
| 2020-01-22T17:24:56
| null |
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test datacite metadata extractor """
from simplejson import dumps
from datalad.metadata.extractors.datacite import MetadataExtractor
from datalad.metadata.metadata import _get_metadatarelevant_paths
from nose.tools import assert_equal
from datalad.tests.utils import with_tree
from datalad.api import create
xml_content = """\
<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-2.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd">
<identifier identifierType="DOI">10.6080/K0QN64NG</identifier>
<creators>
<creator>
<creatorName>Last1, First1</creatorName>
</creator>
<creator>
<creatorName>Last2, First2</creatorName>
</creator>
</creators>
<titles>
<title>Main
title</title>
<title titleType="AlternativeTitle">CRCNS.org xxx-1</title>
</titles>
<publisher>CRCNS.org</publisher>
<publicationYear>2011</publicationYear>
<subjects>
<subject>Neuroscience</subject>
<subject>fMRI</subject>
</subjects>
<language>eng</language>
<resourceType resourceTypeGeneral="Dataset">Dataset/Neurophysiology</resourceType>
<sizes>
<size>10 GB</size>
</sizes>
<formats>
<format>application/matlab</format>
<format>NIFTY</format>
</formats>
<version>1.0</version>
<descriptions>
<description descriptionType="Other">
Some long
description.
</description>
</descriptions>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="DOI" relationType="IsDocumentedBy">10.1016/j.cub.2011.08.031</relatedIdentifier>
</relatedIdentifiers>
</resource>
"""
@with_tree(tree={'.datalad': {'meta.datacite.xml': xml_content}})
@with_tree(tree={'elsewhere': {'meta.datacite.xml': xml_content}})
def test_get_metadata(path1, path2):
for p in (path1, path2):
print('PATH')
ds = create(p, force=True)
ds.add('.')
meta = MetadataExtractor(
ds,
_get_metadatarelevant_paths(ds, []))._get_dataset_metadata()
assert_equal(
dumps(meta, sort_keys=True, indent=2),
"""\
{
"author": [
"Last1, First1",
"Last2, First2"
],
"citation": [
"10.1016/j.cub.2011.08.031"
],
"description": "Some long description.",
"formats": [
"application/matlab",
"NIFTY"
],
"name": "CRCNS.org xxx-1",
"sameas": "10.6080/K0QN64NG",
"shortdescription": "Main title",
"tag": [
"Neuroscience",
"fMRI"
],
"version": "1.0"
}""")
|
[
"giulia.ippoliti@mail.mcgill.ca"
] |
giulia.ippoliti@mail.mcgill.ca
|
bebb7ac47a7598ad344f55ae7d57daba858e56ea
|
c07380914a44df334194f234c33858f357365c19
|
/ENV/lib/python2.7/site-packages/theano/sandbox/gpuarray/neighbours.py
|
1f0c7529213f8f6c6d23f989bd3a641915b97fa9
|
[] |
no_license
|
damianpolan/Music-Genre-Classification
|
318952ae7de5d0b0bdf5676e28071c7b38d0e1c5
|
acd723ae1432ce798866ebb97ef3c484db37e971
|
refs/heads/master
| 2022-12-24T09:23:55.514337
| 2016-03-22T14:49:28
| 2016-03-22T14:49:28
| 42,965,899
| 4
| 4
| null | 2022-12-12T20:26:24
| 2015-09-22T23:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 18,919
|
py
|
import numpy
from theano import Op, Apply, config
from theano.gof import local_optimizer
from theano.tensor.nnet.neighbours import Images2Neibs
import theano.tensor as T
try:
import pygpu
from pygpu import gpuarray, elemwise
except ImportError:
pass
from theano.sandbox.gpuarray.basic_ops import (as_gpuarray_variable,
host_from_gpu, gpu_from_host)
from theano.sandbox.gpuarray.opt import register_opt as register_gpu_opt
from theano.sandbox.gpuarray.opt import op_lifter as op_lifter
from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.comp import NVCC_compiler
class GpuImages2Neibs(Images2Neibs, Op):
def __init__(self, mode='valid'):
if mode not in ['valid', 'ignore_borders', 'wrap_centered']:
raise NotImplementedError("Only the mode valid, ignore_borders"
" and wrap_centered"
" have been implemented for the op"
" GpuImages2Neibs")
self.mode = mode
def make_node(self, ten4, neib_shape, neib_step):
ten4 = as_gpuarray_variable(ten4)
neib_shape = T.as_tensor_variable(neib_shape)
neib_step = T.as_tensor_variable(neib_step)
assert ten4.ndim == 4
assert neib_shape.ndim == 1
assert neib_step.ndim == 1
assert "int" in neib_shape.dtype
assert "int" in neib_step.dtype
return Apply(self, [ten4, neib_shape, neib_step],
[GpuArrayType(broadcastable=(False, False),
dtype=ten4.type.dtype)()])
def c_code_cache_version(self):
return (9,1)
def c_headers(self):
return ['cuda.h', '<gpuarray/extension.h>', '<numpy_compat.h>',
'<gpuarray/ext_cuda.h>']
def c_compiler(self):
return NVCC_compiler
def c_init_code(self):
return ['setup_ext_cuda();']
def c_support_code_apply(self, node, nodename):
dtype_ten4 = node.inputs[0].dtype
dtype_z = node.outputs[0].dtype
mode = self.mode
return """
//a version that use less register but don't work in all case.
static __global__ void k_multi_warp_less_%(nodename)s(
const int nb_batch,
const int nb_stack,
const int height,
const int width,
const int c,
const int d,
const int step_x,
const int step_y,
const int grid_c,
const int grid_d,
const int stride0, const int stride1,
const int stride2, const int stride3,
npy_%(dtype_ten4)s * global_ten4,
const int out_s0, const int out_s1,
npy_%(dtype_z)s * global_out
)
{
const int wrap_centered_idx_shift_x = c/2;
const int wrap_centered_idx_shift_y = d/2;
for(int tblock = blockIdx.x*blockDim.z+threadIdx.z;
tblock<nb_batch*nb_stack*grid_c*grid_d;
tblock+=gridDim.x*blockDim.z){
const int b = tblock%%grid_d;
int left = tblock/grid_d;
const int a = left%%grid_c;
left = left/grid_c;
const int s = left%%nb_stack;
left = left/nb_stack;
const int n = left;
if(n>nb_batch)continue;
if(s>nb_stack)continue;
if(a>grid_c)continue;
if(b>grid_d)continue;
int z_row = b + grid_d*(a + grid_c*
(s + nb_stack*n));
int i = threadIdx.y; // loop over c
{
int ten4_2 = i + a * step_x;
if("%(mode)s"=="wrap_centered"){
ten4_2 -= wrap_centered_idx_shift_x;
if ( ten4_2 < 0 )
ten4_2 += height;
else if (ten4_2 >= height)
ten4_2 -= height;
}
int j = threadIdx.x; // loop over d
{
int ten4_3 = j + b * step_y;
if("%(mode)s"=="wrap_centered"){
ten4_3 -= wrap_centered_idx_shift_y;
if ( ten4_3 < 0 )
ten4_3 += width;
else if (ten4_3 >= width)
ten4_3 -= width;
}
int ten4_idx = stride3*ten4_3 +
stride2*ten4_2 +
stride1*s + stride0*n;
int z_col = j + d * i;
int z_idx = z_col * out_s1 +
z_row * out_s0;
global_out[z_idx] = global_ten4[ten4_idx];
}
}
}
}
static __global__ void k_multi_warp_%(nodename)s(
const int nb_batch,
const int nb_stack,
const int height,
const int width,
const int c,
const int d,
const int step_x,
const int step_y,
const int grid_c,
const int grid_d,
const int stride0, const int stride1,
const int stride2, const int stride3,
npy_%(dtype_ten4)s * global_ten4,
const int out_s0, const int out_s1,
npy_%(dtype_z)s * global_out
)
{
const int wrap_centered_idx_shift_x = c/2;
const int wrap_centered_idx_shift_y = d/2;
for(int tblock = blockIdx.x*blockDim.z+threadIdx.z;
tblock<nb_batch*nb_stack*grid_c*grid_d;
tblock+=gridDim.x*blockDim.z){
const int b = tblock%%grid_d;
int left = tblock/grid_d;
const int a = left%%grid_c;
left = left/grid_c;
const int s = left%%nb_stack;
left = left/nb_stack;
const int n = left;
if(n>nb_batch)continue;
if(s>nb_stack)continue;
if(a>grid_c)continue;
if(b>grid_d)continue;
int z_row = b + grid_d*(a + grid_c*
(s + nb_stack*n));
// loop over c
for (int i = threadIdx.y; i < c; i+=blockDim.y)
{
int ten4_2 = i + a * step_x;
if("%(mode)s"=="wrap_centered"){
ten4_2 -= wrap_centered_idx_shift_x;
if ( ten4_2 < 0 )
ten4_2 += height;
else if (ten4_2 >= height)
ten4_2 -= height;
}
// loop over d
for (int j = threadIdx.x; j < d; j+=blockDim.x)
{
int ten4_3 = j + b * step_y;
if("%(mode)s"=="wrap_centered"){
ten4_3 -= wrap_centered_idx_shift_y;
if ( ten4_3 < 0 )
ten4_3 += width;
else if (ten4_3 >= width)
ten4_3 -= width;
}
int ten4_idx = stride3*ten4_3 +
stride2*ten4_2 +
stride1*s + stride0*n;
int z_col = j + d * i;
int z_idx = z_col * out_s1 +
z_row * out_s0;
global_out[z_idx] = global_ten4[ten4_idx];
}
}
}
}
""" % locals()
def c_code(self, node, name, inp, out, sub):
dtype_ten4 = node.inputs[0].dtype
dtype_neib_shape = node.inputs[1].dtype
dtype_neib_step = node.inputs[2].dtype
dtype_z = node.outputs[0].dtype
itemsize_ten4 = numpy.dtype(dtype_ten4).itemsize
itemsize_z = numpy.dtype(dtype_z).itemsize
typecode_z = pygpu.gpuarray.dtype_to_typecode(node.outputs[0].dtype)
ten4, neib_shape, neib_step = inp
z, = out
fail = sub['fail']
mode = self.mode
if config.gpuarray.sync:
cnda_thread_sync = "GpuArray_sync(&%(z)s->ga);" % dict(z=z)
else:
cnda_thread_sync = ""
return """
#ifndef CEIL_INTDIV
#define CEIL_INTDIV(a, b) ((a/b) + ((a %% b) ? 1: 0))
#endif
int grid_c = -1;
int grid_d = -1;
{
if (PyGpuArray_NDIM(%(ten4)s) != 4)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: pvals wrong rank");
%(fail)s;
}
if (PyArray_NDIM(%(neib_shape)s) != 1)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: unis wrong rank");
%(fail)s;
}
if (PyArray_DIMS(%(neib_shape)s)[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"GpuImages2Neibs: neib_shape has to contain two"
" elements");
%(fail)s;
}
const int c = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 0);
const int d = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 1);
const npy_intp step_x = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 0);
const npy_intp step_y = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 1);
if ( "%(mode)s" == "wrap_centered") {
if (c%%2!=1 || d%%2!=1){
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: in mode wrap_centered need patch with odd shapes");
%(fail)s;
}
if ( PyGpuArray_DIMS(%(ten4)s)[2] < c ||
PyGpuArray_DIMS(%(ten4)s)[3] < d)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: in wrap_centered mode,"
" don't support image shapes smaller then"
" the patch shapes: neib_shape=(%%d,%%d),"
" ten4[2:]=[%%d,%%d]",
c, d, PyGpuArray_DIMS(%(ten4)s)[2],
PyGpuArray_DIMS(%(ten4)s)[3]);
%(fail)s;
}
grid_c = CEIL_INTDIV(((PyGpuArray_DIMS(%(ten4)s))[2]),
step_x);
grid_d = CEIL_INTDIV(((PyGpuArray_DIMS(%(ten4)s))[3]),
step_y);
}else if ( "%(mode)s" == "valid") {
if ( ((PyGpuArray_DIMS(%(ten4)s))[2] < c) ||
((((PyGpuArray_DIMS(%(ten4)s))[2]-c) %% step_x)!=0))
{
PyErr_Format(PyExc_TypeError, "GpuImages2Neibs:"
" neib_shape[0]=%%d, neib_step[0]=%%d and"
" ten4.shape[2]=%%d not consistent",
c, step_x,
PyGpuArray_DIMS(%(ten4)s)[2]);
%(fail)s;
}
if ( ((PyGpuArray_DIMS(%(ten4)s))[3] < d) ||
((((PyGpuArray_DIMS(%(ten4)s))[3]-d) %% step_y)!=0))
{
PyErr_Format(PyExc_TypeError, "GpuImages2Neibs:"
" neib_shape[1]=%%d, neib_step[1]=%%d and"
" ten4.shape[3]=%%d not consistent",
d, step_y,
PyGpuArray_DIMS(%(ten4)s)[3]);
%(fail)s;
}
//number of patch in height
grid_c = 1+(((PyGpuArray_DIMS(%(ten4)s))[2]-c)/step_x);
//number of patch in width
grid_d = 1+(((PyGpuArray_DIMS(%(ten4)s))[3]-d)/step_y);
}else if ( "%(mode)s" == "ignore_borders") {
//number of patch in height
grid_c = 1+(((PyGpuArray_DIMS(%(ten4)s))[2]-c)/step_x);
//number of patch in width
grid_d = 1+(((PyGpuArray_DIMS(%(ten4)s))[3]-d)/step_y);
}else{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs:: unknown mode '%(mode)s'");
%(fail)s;
}
// new dimensions for z
const int z_dim1 = c * d;
const int z_dim0 = grid_c
* grid_d
* PyGpuArray_DIMS(%(ten4)s)[1]
* PyGpuArray_DIMS(%(ten4)s)[0];
if ((NULL == %(z)s)
|| (PyGpuArray_DIMS(%(z)s)[0] != z_dim0)
|| (PyGpuArray_DIMS(%(z)s)[1] != z_dim1))
{
Py_XDECREF(%(z)s);
size_t dims[2];
dims[0] = z_dim0;
dims[1] = z_dim1;
%(z)s = pygpu_empty(2, dims, %(typecode_z)s,
GA_C_ORDER, pygpu_default_context(),
Py_None);
if (!%(z)s)
{
PyErr_SetString(PyExc_MemoryError, "GpuImages2Neibs:"
" failed to alloc z output");
%(fail)s;
}
}
}
{ // NESTED SCOPE
const int nb_batch = PyGpuArray_DIMS(%(ten4)s)[0];
const int nb_stack = PyGpuArray_DIMS(%(ten4)s)[1];
const int height = PyGpuArray_DIMS(%(ten4)s)[2];
const int width = PyGpuArray_DIMS(%(ten4)s)[3];
const int c = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 0);
const int d = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 1);
const npy_intp step_x = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 0);
const npy_intp step_y = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 1);
dim3 n_threads(d,c,1);
//Their is a max of 512 threads per blocks
while(n_threads.x*n_threads.y>512 && n_threads.y>1)n_threads.y--;
while(n_threads.x*n_threads.y>512 && n_threads.x>1)n_threads.x--;
//Make bigger block to have better memory access pattern and
//a higher core utilisation. for smaller patch size
while(c*d*(n_threads.z+1) < 128 && n_threads.z<64 &&
n_threads.z<PyGpuArray_DIMS(%(z)s)[0]){
n_threads.z++;
}
int nb_block;
if (PyGpuArray_DIMS(%(z)s)[0] %% n_threads.z == 0)
nb_block = PyGpuArray_DIMS(%(z)s)[0] / n_threads.z;
else
nb_block = (PyGpuArray_DIMS(%(z)s)[0] / n_threads.z) + 1;
dim3 n_blocks(std::min(32*1024,nb_block));
int n_shared = 0;
void (*f)(int, int, int ,int,
int, int, int ,int,
int, int,
int, int, int, int,
npy_%(dtype_ten4)s*,
int, int,
npy_%(dtype_z)s*);
if(n_threads.x==d && n_threads.y==c){
f = k_multi_warp_less_%(name)s;
}else{
f = k_multi_warp_%(name)s;
}
f<<<n_blocks, n_threads, n_shared>>>(
nb_batch,
nb_stack,
height, width,
c, d, step_x, step_y,
grid_c, grid_d,
PyGpuArray_STRIDES(%(ten4)s)[0] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[1] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[2] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[3] / %(itemsize_ten4)s,
(npy_%(dtype_ten4)s*)(
((char *)cuda_get_ptr(%(ten4)s->ga.data)) +
%(ten4)s->ga.offset),
PyGpuArray_STRIDES(%(z)s)[0] / %(itemsize_z)s,
PyGpuArray_STRIDES(%(z)s)[1] / %(itemsize_z)s,
(npy_%(dtype_z)s*)(((char *)cuda_get_ptr(%(z)s->ga.data)) +
%(z)s->ga.offset)
);
%(cnda_thread_sync)s
cudaError_t sts = cudaGetLastError();
if (cudaSuccess != sts)
{
PyErr_Format(PyExc_RuntimeError, "GpuImages2Neibs:"
" Cuda error: %%s: %%s. (grid: %%i x %%i;"
" block: %%i x %%i x %%i; shared: %%i)\\n",
"k_multi_warp_%(name)s",
cudaGetErrorString(sts),
n_blocks.x,
n_blocks.y,
n_threads.x,
n_threads.y,
n_threads.z,
n_shared);
%(fail)s;
}
} // END NESTED SCOPE
""" % locals()
@op_lifter([Images2Neibs])
def use_gpu_images2neibs(node):
if node.op.mode in ['valid', 'ignore_borders', 'wrap_centered']:
return GpuImages2Neibs(node.op.mode)
register_gpu_opt()(use_gpu_images2neibs)
|
[
"damian.polan@gmail.com"
] |
damian.polan@gmail.com
|
7e61c3ab69667c4955fafc38691acf34ab01cb3a
|
1f3e98e3bb36765f869ca3177a47c53ce302ec70
|
/test/output/001.py
|
909bc70c708589058665a3782d4e0a85027d506b
|
[
"MIT"
] |
permissive
|
EliRibble/pyfmt
|
d73dec1061e93a28ad738139edf523e1678d0e19
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
refs/heads/master
| 2020-04-01T10:57:18.521463
| 2019-05-24T21:39:18
| 2019-05-24T21:39:18
| 153,139,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
def main():
print("hello world")
|
[
"eli@authentise.com"
] |
eli@authentise.com
|
b442efd2544e3d179838e135dad1cad579f97c26
|
ef9a176d58d6b6b5c3c135580bd2283dafe2047e
|
/product_catalogs/odbc_wrappers/MongoConnector.py
|
f178d61c62cc66d3079b6295c5591d7b49b48928
|
[] |
no_license
|
stevesette/DS_4300
|
fe60f15e33be023697266afceb11949116ceaa55
|
9971036a4f67782f76eac736ea8cd7122c1e0100
|
refs/heads/master
| 2020-12-05T19:03:06.571948
| 2020-04-11T17:03:23
| 2020-04-11T17:03:23
| 232,217,392
| 0
| 0
| null | 2020-03-21T03:07:57
| 2020-01-07T01:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 779
|
py
|
import pymongo
class MongoConnector:
"""
We define the database as 'hw3' manually in this assignment to make the assignment simpler, we could have taken
that in as a variable when we defined the connection but with no other odbc connector to compare to in this assignment
it seemed a bit pointless.
"""
def __init__(self):
self.connection = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
self.db = self.connection["hw3"]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def insert_file(self, filename, filedata):
self.db[filename].insert_many(filedata)
def run_query(self, collection, query):
return [x for x in self.db[collection].find(query)]
|
[
"setteducati.s@husky.neu.edu"
] |
setteducati.s@husky.neu.edu
|
d91d8ef9fcacbf89506605d4bc882aabff544142
|
edc3a9f7bbaf119f44c4a7ff73fbcf26a2e05881
|
/table/views.py
|
fe050c5eee7315d825597befc4bb61d6ae5947ec
|
[] |
no_license
|
w39z/VM-Store
|
eb37a05a5bd2cf081980d5eba3d3d4cfbe4042c7
|
0c62f8999e9f83e0e96c5f8057a5337327709561
|
refs/heads/master
| 2023-09-04T21:37:13.976938
| 2021-10-22T16:35:06
| 2021-10-22T16:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from django.shortcuts import render
from table.filters import *
def index(request):
items = Item.objects.all()
filter = ItemFilter(request.GET, queryset=items)
items = filter.qs
return render(request, 'index.html',
{'items': items,
'filter': filter})
|
[
"w39z@mail.ru"
] |
w39z@mail.ru
|
f5dc838839a6a1297a8ed33656fed6d294e04a4c
|
b2755ce7a643ae5c55c4b0c8689d09ad51819e6b
|
/anuvaad-etl/anuvaad-extractor/aligner/etl-aligner/service/alignwflowservice.py
|
4621a73752a35a772c9831afdbb8a067d95dc515
|
[
"MIT"
] |
permissive
|
project-anuvaad/anuvaad
|
96df31170b27467d296cee43440b6dade7b1247c
|
2bfcf6b9779bf1abd41e1bc42c27007127ddbefb
|
refs/heads/master
| 2023-08-17T01:18:25.587918
| 2023-08-14T09:53:16
| 2023-08-14T09:53:16
| 265,545,286
| 41
| 39
|
MIT
| 2023-09-14T05:58:27
| 2020-05-20T11:34:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
#!/bin/python
import logging
from configs.alignerconfig import anu_dp_wf_aligner_out_topic
from utilities.alignmentutils import AlignmentUtils
from repository.alignmentrepository import AlignmentRepository
from validator.alignmentvalidator import AlignmentValidator
from kafkawrapper.alignmentproducer import Producer
log = logging.getLogger('file')
alignmentutils = AlignmentUtils()
repo = AlignmentRepository()
producer = Producer()
util = AlignmentUtils()
validator = AlignmentValidator()
class AlignWflowService:
def __init__(self):
pass
# Wrapper to build response compatibile with the anuvaad etl wf manager.
def getwfresponse(self, result, object_in):
wfresponse = {"taskID": object_in["taskID"], "jobID": object_in["jobID"], "input": result["input"],
"output": result["output"], "workflowCode": object_in["workflowCode"],
"stepOrder": object_in["stepOrder"], "status": "SUCCESS", "state": "SENTENCES-ALIGNED",
"tool": object_in["tool"], "metadata": object_in["metadata"],
"taskStartTime": result["startTime"], "taskEndTime": result["endTime"]}
return wfresponse
def update_wflow_details(self, result, object_in):
wf_res = self.getwfresponse(result, object_in)
producer.push_to_queue(wf_res, anu_dp_wf_aligner_out_topic)
|
[
"vishalmahuli8@gmail.com"
] |
vishalmahuli8@gmail.com
|
7c3dcf56c5f50a0d2b92114c2556ecdb85840978
|
a67676617c4777a3a9c6226624c0fd86557cb277
|
/27_remove_element.py
|
35a4a832b623ba749c14e457b64cf12e360abee5
|
[] |
no_license
|
AdditionalPylons/leetcode
|
2599d3c67825d4a50eccc5ea2b56ae2e33e9fbf2
|
b1873d8bb023d1e7182276692cb8c953a8bab8f6
|
refs/heads/master
| 2022-12-04T15:09:48.461400
| 2020-08-18T19:44:25
| 2020-08-18T19:44:25
| 288,321,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
"""Given an array nums and a value val, remove all instances of that value in-place and return the new length.
Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length."""
"""Runtime: 36 ms, faster than 57.44% of Python3 online submissions for Remove Element.
Memory Usage: 13.8 MB, less than 68.23% of Python3 online submissions for Remove Element."""
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
shift_vector = 0
final_length = len(nums)
for i in range(len(nums)):
if nums[i] == val:
shift_vector -=1
final_length -=1
elif shift_vector != 0:
nums[i+shift_vector] = nums[i]
return final_length
|
[
"bill@popstack.io"
] |
bill@popstack.io
|
15e3d0c90005d336f978715f7fa6b9fbb8df55ad
|
1198238841bedc19e9cc16c2ba22b4c6861cad1f
|
/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi
|
9fc34374b9ad035291f3ad04b7ab44b64eeb9340
|
[
"MIT"
] |
permissive
|
henryfradley/relax_description
|
1ba44bf25fe740c2c24c92215730faf4f01ab954
|
c7617d06b14a6cb666c69bc0b1530d244fad8ac7
|
refs/heads/master
| 2023-01-19T08:10:24.230424
| 2020-11-21T03:34:38
| 2020-11-21T03:34:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,813
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/henryfradley/Library/Caches/node-gyp/12.18.3",
"standalone_static_library": 1,
"save_dev": "true",
"dry_run": "",
"legacy_bundling": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/Users/henryfradley/.nvm/versions/node/v12.18.3/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/henryfradley/.nvm/versions/node/v12.18.3/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/henryfradley/.npm-init.js",
"userconfig": "/Users/henryfradley/.npmrc",
"cidr": "",
"node_version": "12.18.3",
"user": "501",
"auth_type": "legacy",
"editor": "vim",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/henryfradley/.nvm/versions/node/v12.18.3/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/henryfradley/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.6 node/v12.18.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/_r/vn81lt5d45s78j324zmphdkm0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/Users/henryfradley/.nvm/versions/node/v12.18.3"
}
}
|
[
"fradleyhenry@gmail.com"
] |
fradleyhenry@gmail.com
|
f96ecaafff5a7d64947e990639f494c299a6634b
|
2c96ab7bef672279c55a9cc6cd64707e0d0362dd
|
/PopulationSnowVsSunGraph/pythondraft.py
|
1e7bc3a484c9667b0bb401afd8b487fde00f656c
|
[] |
no_license
|
kahmed1996/Project-2
|
ac9185a6ba10a1c3966d75cd1d7f5202a4fe66b4
|
8f4265aa21b892f2fedcc2398fb637759afc6771
|
refs/heads/master
| 2022-09-25T15:44:57.988401
| 2019-09-23T11:35:28
| 2019-09-23T11:35:28
| 205,035,363
| 0
| 0
| null | 2022-08-23T17:52:30
| 2019-08-28T22:39:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
#Starting the data magic. This takes five boxes
engine = create_engine("sqlite:///citypop.db")
engine
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
#Getting all the stuff set so it databass properly
Base.classes.keys()
onlytable=Base.classes.onlytable2
session = Session(engine)
inspector=inspect(engine)
#Putting the DB into a dataframe
dataframe = pd.read_sql_query("SELECT * FROM onlytable2", engine)
#Putting the dataframe into a Panda
dataframe_PD=pd.DataFrame.from_records(dataframe,columns=['date', 'SNOW', 'NOSNOW'])
dataframe_PD.set_index(["date"])
#Getting the variable types right
dataframe_PD['SNOW']=pd.to_numeric(dataframe_PD['SNOW'])
dataframe_PD['NOSNOW']=pd.to_numeric(dataframe_PD['NOSNOW'])
#PLOTLY!
lines=dataframe_PD.plot.line(x='date',y='SNOW')
lines=dataframe_PD.plot.line(x='date',y='NOSNOW')
plt.savefig('precipitationplot.png')
plt.show()
|
[
"NBLaptop@Nicks-MacBook-Air.local"
] |
NBLaptop@Nicks-MacBook-Air.local
|
351b4eddb3f58e872e3497a9bea27b19aa4d720f
|
4d89652acca24e0bc653e0b4cb5846ceb5b568e4
|
/google-cloud-sdk/lib/surface/run/domain_mappings/list.py
|
ab9c9af7d8e8d0e25820072bf29df8501224e959
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
ibssasimon/LyricLingo
|
410fcec94d2bd3ea75c975c55713f5b8fb913229
|
0dfc951b270912470b36ce0083afd9d4fe41b10a
|
refs/heads/master
| 2021-06-25T10:00:18.215900
| 2020-01-09T00:35:46
| 2020-01-09T00:35:46
| 222,135,399
| 2
| 1
| null | 2021-04-30T20:54:14
| 2019-11-16T17:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,061
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for listing all domain mappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import commands
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.run import resource_args
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class List(commands.List):
"""Lists domain mappings."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To list all Cloud Run domain mappings, run:
$ {command}
""",
}
@classmethod
def CommonArgs(cls, parser):
# Flags specific to connecting to a cluster
cluster_group = flags.GetClusterArgGroup(parser)
namespace_presentation = presentation_specs.ResourcePresentationSpec(
'--namespace',
resource_args.GetNamespaceResourceSpec(),
'Namespace to list domain mappings in.',
required=True,
prefixes=False)
concept_parsers.ConceptParser(
[namespace_presentation]).AddToParser(cluster_group)
parser.display_info.AddFormat(
"""table(
{ready_column},
metadata.name:label=DOMAIN,
route_name:label=SERVICE,
region:label=REGION)""".format(ready_column=pretty_print.READY_COLUMN))
parser.display_info.AddUriFunc(cls._GetResourceUri)
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
def Run(self, args):
"""List available domain mappings."""
conn_context = connection_context.GetConnectionContext(
args, self.ReleaseTrack())
namespace_ref = args.CONCEPTS.namespace.Parse()
with serverless_operations.Connect(conn_context) as client:
self.SetCompleteApiEndpoint(conn_context.endpoint)
return commands.SortByName(client.ListDomainMappings(namespace_ref))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""Lists domain mappings."""
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
AlphaList.__doc__ = List.__doc__
|
[
"ibssasimon@gmail.com"
] |
ibssasimon@gmail.com
|
8b7e403a7ac3e0d9f15db32a34eb8da70fbb217a
|
0e33e481ce9122b0d43ec033dc1d0c162b67d4ee
|
/blog/migrations/0001_initial.py
|
49409277632f1fe3f1c46d98e394c58b70cb6477
|
[] |
no_license
|
lunnbag/my-first-blog
|
1f2a69f9c6407fc775b925c41a60da1dcfb40bb2
|
f3af52fa53f18793546847074066e559158c89ec
|
refs/heads/master
| 2020-03-20T18:18:33.571922
| 2018-06-16T15:41:50
| 2018-06-16T15:41:50
| 137,582,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-16 12:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"lauralunn@hotmail.co.uk"
] |
lauralunn@hotmail.co.uk
|
48cbc1da28c514c7264777210b3ecea16d3f98c4
|
21ac48139cefea2bf9f4c49509d6c31b12061373
|
/ELK/python_ex1.py
|
ca49a1e122b8e45a8c61ff6de6fa24bb8159e356
|
[] |
no_license
|
dkyos/dev-samples
|
23e60a035c278a2d63c82d84182bfb642a97c5c2
|
61a6cb1d084c85df7c2127da47b6a2bca0cfb6e5
|
refs/heads/master
| 2020-05-21T23:42:36.002672
| 2019-12-04T14:11:36
| 2019-12-04T14:11:36
| 14,510,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#!/usr/bin/env python
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
res = es.search(index="deraw", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
|
[
"dk77.yun@samsung.com"
] |
dk77.yun@samsung.com
|
72d836455d93771000edfb04b38af3e3e1f679e1
|
adbf3c67b8ebe1e74d70a7fd20328d26a2be7400
|
/myparser.py
|
9786e530cca43bc5309295f7bf70841375293e71
|
[] |
no_license
|
mejitos/stack-overflow-overflow
|
3adc4eacfd92d406560243234a87687ea40bcdb1
|
1681864f51f4949fe2f63cbc682d43e63f013e94
|
refs/heads/master
| 2022-12-10T16:22:59.682860
| 2019-12-08T15:54:35
| 2019-12-08T15:54:35
| 226,351,427
| 0
| 0
| null | 2022-07-06T20:23:37
| 2019-12-06T14:53:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,659
|
py
|
from bs4 import BeautifulSoup
from config import Config
from result import Result
class Parser:
"""Class used for parsing wanted information from HTML"""
def parse_urls(self, resource):
"""Parses clean urls out of HTML resource
Args:
resource: HTML resource as a string
Returns:
List of clean urls as strings
"""
soup = BeautifulSoup(resource, Config.PARSER)
parsed_urls = []
urls = soup.find_all('div', class_='result-link')
for url in urls:
parsed_url = url.find('a')['href']
if parsed_url not in parsed_urls:
parsed_urls.append(parsed_url)
return parsed_urls
def parse_results(self, url, resource):
"""Parses results from single results thread
Args:
resource: HTML resource as a string
Returns
List of Result objects
"""
output = []
soup = BeautifulSoup(resource, Config.PARSER)
# TODO: Parse the question as own function
question = soup.find('div', id='question')
q_votes = question.find('div', class_='votecell').getText().replace('\n', ' ').strip().split(' ')[0]
q_text = question.find('div', class_='post-text').getText().replace('\n', ' ').strip()
q_info_container = question.find('div', class_='post-signature owner grid--cell')
q_date = q_info_container.find('span', class_='relativetime').getText().replace('\n', ' ').strip()
q_user = q_info_container.find('div', class_='user-details').find('a').getText().replace('\n', ' ').strip()
q = Result(url, [{'type': 'text', 'content': q_text}], q_user, q_votes, q_date)
output.append(q)
# TODO: Parse the answers as own function
answers = soup.find_all('div', class_='answer')
for answer in answers:
try:
a_votes = answer.find('div', class_='votecell').getText().replace('\n', ' ').strip().split(' ')[0]
except:
a_votes = 'N/A'
try:
a_info_container = answer.find_all('div', class_='post-signature grid--cell fl0')
if len(a_info_container) > 1:
a_info_container = a_info_container[1]
else:
a_info_container = a_info_container[0]
except:
pass
try:
a_user = a_info_container.find('div', class_='user-details').find('a').getText().replace('\n', ' ').strip()
except:
a_user = 'N/A'
try:
a_date = a_info_container.find('span', class_='relativetime').getText().replace('\n', ' ').strip()
except:
a_date = 'N/A'
try:
all_text = []
a_text = answer.find('div', class_='post-text')
for elem in a_text:
if elem.name == 'pre':
all_text.append({'type': 'code', 'content': elem.text.split('\n')})
elif elem.name == 'p':
all_text.append({'type': 'text', 'content': elem.text.replace('\n', ' ').strip()})
# elif elem.name == 'h1':
# all_text.append({'type': 'heading', 'content': elem.text})
except:
all_text = ['N/A']
a = Result(url, all_text, a_user, a_votes, a_date)
output.append(a)
return output
|
[
"timomehto@gmail.com"
] |
timomehto@gmail.com
|
9c90388d3381674e0cb41add4b6132118e241883
|
444970b3dda58e0feb7adb6faf94d024d4672749
|
/Processor.py
|
fab2e317671631624bd55c2fbc719f1d57e4540a
|
[] |
no_license
|
kyrie2014/CI-Test
|
061eb494a115882a975afe79d3a17a52da3541e1
|
9244d3c8b904146baa4cd58c57a0c6ab5c87ed0a
|
refs/heads/master
| 2020-03-31T20:03:15.559493
| 2018-10-11T05:16:45
| 2018-10-11T05:16:45
| 152,523,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,819
|
py
|
# coding:utf-8
from xml.dom import minidom, Node
from os.path import *
import sys
import pandas as pd
import re
import os
import copy
class AutoProcessor(object):
xml_path = r'\\shnas01\publicshared\BM\BM_AutoTest\AutoTest_Case'
bug_path = r'\\shnas01\publicshared\BM\BM_AutoTest\AutoBugs\bm_bug.xlsx'
def __init__(self, log_path=None, case_dir=None, flag=None):
self.result_path = join(
join(
log_path,
AutoProcessor.latest_log_dirs(log_path, flag)[0]
),
r'testResult.xml'
) if log_path is not None else None
self.xml_path = join(self.xml_path, case_dir)
# print self.result_path
def filter_bug(self):
if exists(self.bug_path):
return None
# filter用例名字
def filter(summary):
result = re.findall(r'#([\S\s]*)#', summary)
if len(result) > 0:
return result[0]
return None
df = pd.DataFrame(pd.read_excel(self.bug_path))
new_df = df.apply(lambda summary: map(filter, df['Summary']), axis=0)
return new_df['Summary'].tolist()
def comment_node(self, xml_name, cases):
xml_file = join(self.xml_path, xml_name)
doc = minidom.parse(xml_file).documentElement
element = doc.getElementsByTagName('TestCase') + doc.getElementsByTagName('Test')
for node in element:
if node.getAttribute('name') in cases:
comment = node.ownerDocument.createComment(node.toxml())
node.parentNode.replaceChild(comment, node)
with open(xml_file, 'wb') as file:
file.write(doc.toxml())
def uncomment_node(self, xml_name, cases):
import re
xml_file = join(self.xml_path, xml_name)
content, flag = '', False
pattern = re.compile(r'<!--[\S\s]*{}[\S\s]*'.format('|'.join(cases)))
with open(xml_file, 'r') as file:
for line in file:
filter = pattern.findall(line)
if filter:
if '-->' not in filter[-1]:
flag = True
line = re.sub('(<!)?--[>]?', '', line)
if flag and '-->' in line:
line = line.replace('-->', '')
content += line
with open(xml_file, 'wb') as file:
file.write(content)
def uncomment_all_nodes(self):
import re
for _, _, files in os.walk(self.xml_path):
for file in files:
if '.xml' not in file or '_plan.xml' in file:
continue
print 'File: ' + file
xml_file = join(self.xml_path, file)
content, is_modified = '', False
with open(xml_file, 'r') as file:
for line in file:
if re.findall(r'(<!--\s?<)|(>\s?-->)', line):
print 'Modified: ' + line
line = re.sub('(<!)?--[>]?', '', line)
is_modified = True
content += line
if is_modified:
with open(xml_file, 'wb') as file:
file.write(content)
@staticmethod
def latest_log_dirs(path, flag):
return sorted(
[
(x, getctime(join(path, x)))
for x in os.listdir(path)
if (isdir(join(path, x)) and flag in x)
],
key=lambda i: i[1]
)[-1]
def comment_and_create_tpm_bug(self):
fail_cases = dict()
doc = minidom.parse(self.result_path).documentElement
# node = doc.getElementsByTagName('Summary')[-1]
# ratio = node.getAttribute('firstRunPassRate').strip('%')
# if int(ratio) <= 30:
# return fail_cases_list
node = doc.getElementsByTagName('TestBuildInfo')[-1]
node2 = doc.getElementsByTagName('BuildInfo')[-1]
# 获取测试结果失败的用例名
nodes = doc.getElementsByTagName('TestCase')
for n in nodes:
xml_file_name = ''
ch_nodes = [ch for ch in n.childNodes if ch.nodeType == n.ELEMENT_NODE]
instance = copy.copy(n)
for _ in range(10):
instance = instance.parentNode
if instance.tagName == 'TestPackage':
xml_file_name = instance.getAttribute('appPackageName')
break
if 'pyInitialize' in xml_file_name:
continue
if len(ch_nodes) > 1:
for cn in ch_nodes:
if 'fail' == cn.getAttribute('result'):
fail_cases[cn.getAttribute('name')] = xml_file_name
else:
cn = ch_nodes[-1]
if 'fail' == cn.getAttribute('result'):
fail_cases[n.getAttribute('name')] = xml_file_name
if fail_cases is None:
return
# self.comment_node(comment_cases)
# 初始化bug信息
from BugInfo import BugInfo, BugDescription
bug_info = BugInfo()
bug_desc = BugDescription(
device= node.getAttribute('deviceID'),
url = node.getAttribute('pack-url'),
hw = node.getAttribute('product-hardware'),
path = node.getAttribute('sharedPath'),
ver = node2.getAttribute('build_display')
)
from HttpHelper import HttpHelper
for case, xml_name in fail_cases.items():
# 注释case
self.comment_node(xml_name + '.xml', case)
bug_desc_instance = copy.copy(bug_desc)
bug_desc_instance.case_name = case + '@' + xml_name
# 提交TPM bug
# print bug_info.format_bug_info(bug_desc_instance)
HttpHelper().put(bug_info.format_bug_info(bug_desc_instance))
def init_option():
from optparse import OptionParser
parser = OptionParser(
usage='%prog -p [common|uncommon|reset] [case_directory] [sn]',
description='common or uncommon cases, and file bugs.'
)
parser.add_option(
'-p',
'--param',
dest='param',
nargs=3,
action='store',
help='common or uncommon cases, and file bugs',
metavar='PARAM'
)
(options, args) = parser.parse_args()
return options.param if options.param else sys.exit()
if '__main__' == __name__:
param, case_dir, sn = init_option()
# param, sn = 'common', 'SC77311E10181120412'
cases = dict()
try:
if param == 'common':
print 'common specified node'
path = os.getcwd().replace('testcases\ext', 'results')
ap = AutoProcessor(path, case_dir, sn)
ap.comment_and_create_tpm_bug()
elif param == 'uncommon':
print 'uncommon specified node'
ap = AutoProcessor(case_dir=case_dir)
closed_bugs = ap.filter_bug()
if closed_bugs is not None:
for bug in closed_bugs:
print 'Bug --> ' + bug
tmp = bug.split('@')
if tmp[1] not in bugs:
cases[tmp[1]] = [tmp[0]]
else:
cases[tmp[1]].append(tmp[0])
for file, cases in cases.items():
ap.uncomment_node(file, cases)
else:
print 'Not found bug!'
elif param == 'reset':
print 'reset all common node'
ap = AutoProcessor(case_dir=case_dir)
ap.uncomment_all_nodes()
except Exception, cause:
print cause
print 'Total test is pass'
|
[
"Kyrie.Liu@spreadtrum.com"
] |
Kyrie.Liu@spreadtrum.com
|
0a5b7be0c07c9be2e0443f4569ef24288a0b59c0
|
a01a43157460788b9156e09a7985267afcb2438b
|
/Province/Beijing.py
|
a16aa27771a6af43b8392cc39a9e4f7939b6114b
|
[] |
no_license
|
Ginchung/2020Wuhan
|
4f4db7556712363e1173394ee5e1c273b68e3f37
|
b9e9bdc8a326dfcc24a518229ac5c62490308188
|
refs/heads/master
| 2020-12-26T19:53:40.615462
| 2020-02-20T11:01:28
| 2020-02-20T11:01:28
| 237,622,642
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,513
|
py
|
### Project: 2020Wuhan@Github/Ginchung
## File: beijing.py
## Run with 'python beijing.py'
# object 'sdct'
# Function: stores the info from official message
# Key: YYYY-MM-DD-HH
# Web source: http://wjw.beijing.gov.cn/wjwh/ztzl/xxgzbd/gzbdyqtb/
province='beijing'
sdct={}
sdct['2020-01-21-18']='西城区1例、海淀区2例、丰台区1例、通州区1例、大兴区2例、昌平区2例,武汉来京人员1例'
sdct['2020-01-22-18']='西城区2例、朝阳区1例、海淀区2例、丰台区1例、石景山区1例、通州区1例、大兴区2例、昌平区2例,武汉来京人员2例'
sdct['2020-01-24-00']='西城区3例、朝阳区3例、海淀区3例、丰台区2例、石景山区1例、通州区2例、顺义区1例、大兴区2例、昌平区2例,外地来京人员7例'
sdct['2020-01-24-17']='西城区4例、朝阳区5例、海淀区4例、丰台区2例、石景山区1例、通州区2例、顺义区1例、大兴区2例、昌平区3例,外地来京人员10例'
sdct['2020-01-24-20']='西城区4例、朝阳区5例、海淀区6例、丰台区2例、石景山区1例、通州区2例、顺义区1例、大兴区2例、昌平区3例,外地来京人员10例'
sdct['2020-01-25-17']='西城区4例、朝阳区6例、海淀区8例、丰台区2例、石景山区1例、通州区3例、顺义区1例、大兴区2例、昌平区3例,外地来京人员11例'
sdct['2020-01-25-23']='东城区1例、西城区5例、朝阳区8例、海淀区10例、丰台区3例、石景山区1例、通州区4例、顺义区1例、大兴区2例、昌平区5例,外地来京人员11例'
sdct['2020-01-26-08']='东城区1例、西城区5例、朝阳区8例、海淀区13例、丰台区3例、石景山区1例、通州区4例、顺义区1例、大兴区2例、昌平区5例,外地来京人员11例'
sdct['2020-01-26-21']='东城区1例、西城区5例、朝阳区11例、海淀区16例、丰台区4例、石景山区2例、通州区6例、顺义区1例、大兴区3例、昌平区7例,外地来京人员12例'
sdct['2020-01-27-09']='东城区1例、西城区7例、朝阳区11例、海淀区17例、丰台区4例、石景山区2例、通州区6例、顺义区1例、大兴区5例、昌平区7例,外地来京人员11例'
sdct['2020-01-27-20']='东城区2例、西城区7例、朝阳区17例、海淀区17例、丰台区4例、石景山区2例、通州区7例、顺义区1例、大兴区5例、昌平区7例,外地来京人员11例'
sdct['2020-01-28-12']='东城区2例、西城区8例、朝阳区17例、海淀区21例、丰台区7例、石景山区2例、通州区7例、顺义区2例、大兴区7例、昌平区7例,外地来京人员11例'
sdct['2020-01-29-12']='东城区2例、西城区9例、朝阳区19例、海淀区23例、丰台区8例、石景山区2例、门头沟1例、通州区7例、顺义区2例、大兴区10例、昌平区8例,外地来京人员11例'
sdct['2020-01-29-20']='东城区2例、西城区12例、朝阳区21例、海淀区24例、丰台区10例、石景山区2例、门头沟区1例、通州区8例、顺义区2例、大兴区10例、昌平区8例,外地来京人员11例'
sdct['2020-01-30-08']='东城区2例、西城区12例、朝阳区21例、海淀区24例、丰台区11例、石景山区2例、门头沟区1例、通州区8例、顺义区2例、大兴区10例、昌平区9例、怀柔区1例,外地来京人员11例'
sdct['2020-01-30-20']='东城区2例、西城区12例、朝阳区21例、海淀区24例、丰台区11例、石景山区2例、门头沟区1例、通州区10例、顺义区2例、大兴区15例、昌平区9例、怀柔区1例,外地来京人员11例'
sdct['2020-01-31-00']='东城区3例、西城区13例、朝阳区22例、海淀区26例、丰台区11例、石景山区2例、门头沟区1例、通州区12例、顺义区2例、大兴区16例、昌平区12例、怀柔区1例,外地来京人员11例'
sdct['2020-01-31-14']='东城区3例、西城区16例、朝阳区24例、海淀区27例、丰台区11例、石景山区2例、门头沟区1例、通州区12例、顺义区2例、大兴区17例、昌平区12例、怀柔区1例,外地来京人员11例'
sdct['2020-02-01-00']='东城区3例、西城区17例、朝阳区27例、海淀区35例、丰台区12例、石景山区3例、门头沟区1例、通州区13例、顺义区2例、大兴区19例、昌平区12例、怀柔区1例,外地来京人员11例'
sdct['2020-02-01-12']='东城区3例、西城区17例、朝阳区28例、海淀区39例、丰台区16例、石景山区3例、门头沟区1例、房山区2例、通州区13例、顺义区2例、昌平区12例、大兴区20例、怀柔区1例、外地来京人员11例'
sdct['2020-02-02-00']='东城区3例、西城区17例、朝阳区35例、海淀区41例、丰台区16例、石景山区4例、门头沟区1例、房山区2例、通州区13例、顺义区6例、昌平区12例、大兴区21例、怀柔区1例、外地来京人员11例'
sdct['2020-02-03-00']='东城区2例、西城区26例、朝阳区31例、海淀区42例、丰台区17例、石景山区5例、门头沟区3例、房山区4例、通州区13例、顺义区5例、昌平区13例、大兴区28例、怀柔区3例、延庆区1例,外地来京人员19例'
sdct['2020-02-04-00']='东城区2例、西城区28例、朝阳区36例、海淀区45例、丰台区18例、石景山区6例、门头沟区3例、房山区5例、通州区13例、顺义区5例、昌平区14例、大兴区29例、怀柔区3例、延庆区1例,外地来京人员20例'
sdct['2020-02-05-00']='东城区6例、西城区29例、朝阳区43例、海淀区45例、丰台区20例、石景山区7例、门头沟区3例、房山区11例、通州区13例、顺义区6例、昌平区15例、大兴区29例、怀柔区4例、延庆区1例,外地来京人员21例'
sdct['2020-02-06-00']=''
city=[]
latest='2020-02-05-00'
for i in sdct[latest].replace(',','、').split('、'):
city.append(i[:2])
print('city of %s: '%province,city,'\n')
print('number of infected cities now: ',len(city))
Table={}
for k,v in zip(sdct.keys(),sdct.values()):
if len(v)<5:
continue
s=['0']*len(city)
v=v.replace(',','、').replace('武汉','外地')
for i in v.split('、'):
tmp=''
for t in i:
if t.isdigit():
tmp+=t
#tmp=int(tmp)
s[city.index(i[:2])]=tmp
Table[k]=s
print(s)
### Output
print(province,',',','.join(city))
for date,out in zip(Table.keys(),Table.values()):
print(date,',',','.join(out))
|
[
"ljcone@qq.com"
] |
ljcone@qq.com
|
4201aaf82a13c985bc5ed36fc69b99f462bf3731
|
7edbf1eb8a991e91192ab8ecf28d801080b2e230
|
/english/models.py
|
9072c4bef8292fd3cc1343d173be5d2012aa759f
|
[
"MIT"
] |
permissive
|
johncadigan/myenglishcloud
|
5b63d0d079ded93fbb539127a011ee17c3cb17d9
|
c698243866ce3edf864ad0e0c9a126aee57a54c0
|
refs/heads/master
| 2021-01-10T09:55:30.506142
| 2015-12-21T00:06:22
| 2015-12-21T00:06:22
| 48,341,436
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52,603
|
py
|
# -*- coding: utf-8 -*-
import hashlib
import random
import string
import transaction
import json
import os
import datetime
import Image
import errno
from datetime import date
import re
from random import shuffle, randint
from cryptacular.bcrypt import BCRYPTPasswordManager
from slugify import slugify
import glob
from pyramid.threadlocal import get_current_request
from pyramid.util import DottedNameResolver
from pyramid.security import (Everyone,
Allow,
Deny
)
from sqlalchemy import (Column,
ForeignKey,
event,
Index,
Table,
types,
Unicode,
select,
func,
case)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import synonym
from sqlalchemy.sql.expression import func
from sqlalchemy_utils import URLType
#from velruse.store.sqlstore import SQLBase
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
##### Helper Functs
QUIZ_DIRECTORY = 'english/static/uploads/quizzes/'
PICTURE_DIRECTORY = 'english/static/uploads/pictures/'
PICTURE_SIZES = [(256, 256), (128, 128), (64,64)]
PICTURE_SUBDIRECTORIES = ["original"] + ["{0}x{1}".format(x[0], x[1]) for x in PICTURE_SIZES]
PICTURE_DIRECTORIES = [os.path.join(PICTURE_DIRECTORY, s) for s in PICTURE_SUBDIRECTORIES]
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#for x in PICTURE_DIRECTORIES: make_sure_path_exists(x)
"""USER MODELS"""
auth_group_table = Table('auth_auth_groups', Base.metadata,
Column('auth_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('group_id', types.Integer(), \
ForeignKey('auth_groups.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('auth_group', auth_group_table.c.auth_id, auth_group_table.c.group_id)
class AuthGroup(Base):
""" Table name: auth_groups
::
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(80), unique=True, nullable=False)
description = Column(Unicode(255), default=u'')
"""
__tablename__ = 'auth_groups'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(80), unique=True, nullable=False)
description = Column(Unicode(255), default=u'')
users = relationship('AuthID', secondary=auth_group_table, \
backref='auth_groups')
def __repr__(self):
return u'%s' % self.name
def __unicode__(self):
return self.name
user_finished_content = Table('user_finished_content', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_finished_content', user_finished_content.c.user_id, user_finished_content.c.content_id)
user_added_content_vocab = Table('user_added_content_vocab', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_added_content_vocab', user_added_content_vocab.c.user_id, user_added_content_vocab.c.content_id)
user_voted_content_difficulty = Table('user_voted_content_difficulty', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_voted_content_difficulty', user_voted_content_difficulty.c.user_id, user_voted_content_difficulty.c.content_id)
user_voted_content_quality = Table('user_voted_content_quality', Base.metadata,
Column('user_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (authid_id,content_id)
Index('user_voted_content_quality', user_voted_content_quality.c.user_id, user_voted_content_quality.c.content_id)
class AuthID(Base):
""" Table name: auth_id
::
id = Column(types.Integer(), primary_key=True)
display_name = Column(Unicode(80), default=u'')
active = Column(types.Enum(u'Y',u'N',u'D', name=u"active"), default=u'Y')
created = Column(types.DateTime(), default=func.now())
"""
__tablename__ = 'auth_id'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
display_name = Column(Unicode(80), default=u'')
active = Column(types.Enum(u'Y',u'N',u'D', name=u"active"), default=u'Y')
created = Column(types.DateTime(), default=func.now())
groups = relationship('AuthGroup', secondary=auth_group_table, \
backref='auth_users')
users = relationship('AuthUser')
preferred_language = Column(types.Integer, ForeignKey('languages.id'))
added_vocab = relationship('Content', secondary=user_added_content_vocab, \
backref='vocab_adders')
finished_content = relationship('Content', secondary=user_finished_content, \
backref='finishers')
rated_content_difficulty = relationship('Content', secondary=user_voted_content_difficulty, \
backref='difficulty_raters')
rated_content_quality = relationship('Content', secondary=user_voted_content_quality, \
backref='quality_raters')
flashcards = relationship('Flashcard')
"""
Fix this to use association_proxy
groups = association_proxy('auth_group_table', 'authgroup')
"""
last_login = relationship('AuthUserLog', \
order_by='AuthUserLog.id.desc()')
login_log = relationship('AuthUserLog', \
order_by='AuthUserLog.id')
def in_group(self, group):
"""
Returns True or False if the user is or isn't in the group.
"""
return group in [g.name for g in self.groups]
def sorted_flashcards(self):
flashcards = {'all' : len(self.flashcards)}
i = 0
flashcards.setdefault('toAdd', [])
flashcards.setdefault('toPractice', [])
flashcards.setdefault('overdue', [])
flashcards.setdefault('today', [])
flashcards.setdefault('due', [])
flashcards.setdefault('tomorrow', [])
flashcards.setdefault('next_week', [])
flashcards.setdefault('this_week', [])
for flashcard in self.flashcards:
if flashcard.due.toordinal()-datetime.datetime.now().toordinal() < 0:
flashcards['overdue'].append(flashcard)
flashcards['due'].append(flashcard)
if flashcard.level == 'Show':
flashcards['toAdd'].append(flashcard)
else:
flashcards['toPractice'].append(flashcard)
elif flashcard.due.toordinal()-datetime.datetime.now().toordinal() == 0:
flashcards['today'].append(flashcard)
flashcards['due'].append(flashcard)
if flashcard.level == 'Show':
flashcards['toAdd'].append(flashcard)
else:
flashcards['toPractice'].append(flashcard)
elif 0 < flashcard.due.toordinal()-datetime.datetime.now().toordinal() <= 1:
flashcards['tomorrow'].append(flashcard)
elif flashcard.due.toordinal()-datetime.datetime.now().toordinal() <= 6:
flashcards['this_week'].append(flashcard)
elif 6 < flashcard.due.toordinal()-datetime.datetime.now().toordinal() <= 13:
flashcards['next_week'].append(flashcard)
flashcards['due#'] = len(flashcards['due'])
flashcards['toAdd#'] = len(flashcards['toAdd'])
flashcards['toPractice#'] = len(flashcards['toPractice'])
return flashcards
@classmethod
def get_by_id(cls, id):
"""
Returns AuthID object or None by id
.. code-block:: python
from apex.models import AuthID
user = AuthID.get_by_id(1)
"""
return DBSession.query(cls).filter(cls.id==id).first()
@classmethod
def get_by_display_name(cls, display_name):
"""
Returns AuthUser object or None by login
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_login('login')
"""
return DBSession.query(cls).filter(cls.display_name==display_name).first()
def get_profile(self, request=None):
"""
Returns AuthUser.profile object, creates record if it doesn't exist.
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_id(1)
profile = user.get_profile(request)
in **development.ini**
.. code-block:: python
apex.auth_profile =
"""
if not request:
request = get_current_request()
auth_profile = request.registry.settings.get('apex.auth_profile')
if auth_profile:
resolver = DottedNameResolver(auth_profile.split('.')[0])
profile_cls = resolver.resolve(auth_profile)
return get_or_create(DBSession, profile_cls, auth_id=self.id)
@property
def group_list(self):
group_list = []
if self.groups:
for group in self.groups:
group_list.append(group.name)
return ','.join( map( str, group_list ) )
class AuthUser(Base):
""" Table name: auth_users
::
id = Column(types.Integer(), primary_key=True)
login = Column(Unicode(80), default=u'', index=True)
_password = Column('password', Unicode(80), default=u'')
email = Column(Unicode(80), default=u'', index=True)
active = Column(types.Enum(u'Y',u'N',u'D'), default=u'Y')
"""
__tablename__ = 'auth_users'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
auth_id = Column(types.Integer, ForeignKey(AuthID.id), index=True)
provider = Column(Unicode(80), default=u'local', index=True)
login = Column(Unicode(80), default=u'', index=True)
salt = Column(Unicode(24))
_password = Column('password', Unicode(80), default=u'')
email = Column(Unicode(80), default=u'', index=True)
created = Column(types.DateTime(), default=func.now())
active = Column(types.Enum(u'Y',u'N',u'D', name=u"active"), default=u'Y')
def _set_password(self, password):
self.salt = self.get_salt(24)
password = password + self.salt
self._password = BCRYPTPasswordManager().encode(password, rounds=12)
def _get_password(self):
return self._password
password = synonym('_password', descriptor=property(_get_password, \
_set_password))
def get_salt(self, length):
m = hashlib.sha256()
word = ''
for i in xrange(length):
word += random.choice(string.ascii_letters)
m.update(word)
return unicode(m.hexdigest()[:length])
@classmethod
def get_by_id(cls, id):
"""
Returns AuthUser object or None by id
.. code-block:: python
from apex.models import AuthID
user = AuthID.get_by_id(1)
"""
return DBSession.query(cls).filter(cls.id==id).first()
@classmethod
def get_by_login(cls, login):
"""
Returns AuthUser object or None by login
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_login('login')
"""
return DBSession.query(cls).filter(cls.login==login).first()
@classmethod
def get_by_email(cls, email):
"""
Returns AuthUser object or None by email
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_email('email@address.com')
"""
return DBSession.query(cls).filter(cls.email==email).first()
@classmethod
def check_password(cls, **kwargs):
if kwargs.has_key('id'):
user = cls.get_by_id(kwargs['id'])
if kwargs.has_key('login'):
user = cls.get_by_login(kwargs['login'])
if not user:
return False
try:
if BCRYPTPasswordManager().check(user.password,
'%s%s' % (kwargs['password'], user.salt)):
return True
except TypeError:
pass
request = get_current_request()
# fallback_auth = request.registry.settings.get('apex.fallback_auth')
# if fallback_auth:
# resolver = DottedNameResolver(fallback_auth.split('.', 1)[0])
#fallback = resolver.resolve(fallback_auth)
#return fallback().check(DBSession, request, user, \
#kwargs['password'])
return False
class AuthUserLog(Base):
"""
event:
L - Login
R - Register
P - Password
F - Forgot
"""
__tablename__ = 'auth_user_log'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
auth_id = Column(types.Integer, ForeignKey(AuthID.id), index=True)
user_id = Column(types.Integer, ForeignKey(AuthUser.id), index=True)
time = Column(types.DateTime(), default=func.now())
ip_addr = Column(Unicode(39), nullable=False)
event = Column(types.Enum(u'L',u'R',u'P',u'F', name=u"event"), default=u'L')
class Country(Base):
__tablename__= 'countries'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
name = Column(Unicode(50), nullable=False)
image = Column(Unicode(50), nullable=False)
language_profile_pairs = Table('language_profile_pairs', Base.metadata,
Column('language_id', types.Integer(), \
ForeignKey('languages.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('profile_id', types.Integer(), \
ForeignKey('profiles.id', onupdate='CASCADE', ondelete='CASCADE'))
)
Index('language_profile', language_profile_pairs.c.language_id, language_profile_pairs.c.profile_id)
class Profile(Base):
__tablename__= 'profiles'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
owner = Column(types.Integer, ForeignKey(AuthID.id), index=True)
picture_id = Column(types.Integer, ForeignKey('pictures.id'))
name = Column(Unicode(50))
date_of_birth = Column(types.Date())
country_id = Column(types.Integer, ForeignKey('countries.id'))
city = Column(Unicode(50))
about_me = Column(Unicode(1000))
languages = relationship('Language', secondary=language_profile_pairs, \
backref='languages')
class Language(Base):
__tablename__= 'languages'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
english_name = Column(Unicode(50), nullable=False)
native_name = Column(Unicode(50), nullable=False)
iso_lang = Column(Unicode(10))
goog_translate = Column(Unicode(10))
profiles = relationship('Profile', secondary=language_profile_pairs, \
backref='profiles')
class Card(Base):
__tablename__='cards'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
lemma_id = Column(ForeignKey('english_lemmas.id', onupdate='CASCADE', ondelete='CASCADE'))
language_id = Column(ForeignKey('languages.id', onupdate='CASCADE', ondelete='CASCADE'))
translations = relationship('Translation')
class Translation(Base):
__tablename__='translations'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
card_id = Column(ForeignKey('cards.id', onupdate='CASCADE', ondelete='CASCADE'))
foreign_lemma_id = Column(ForeignKey('foreign_lemmas.id', onupdate='CASCADE', ondelete='CASCADE'))
count = Column(types.Integer, default=1, index=True)
lemma_content_pairs = Table('lemma_content_pairs', Base.metadata,
Column('english_lemma_id', types.Integer(), \
ForeignKey('english_lemmas.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('english_lemma_content', lemma_content_pairs.c.english_lemma_id, lemma_content_pairs.c.content_id)
class EnglishLemma(Base):
""" N=Noun, PR=Pronoun, ADJ=Adjective, ADV=Adverb, VB=Verb, PVB=Phrasal Verb, PP=Preposition, CNJ=Conjunction,
"""
__tablename__= 'english_lemmas'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
owner = Column(ForeignKey('auth_id.id'))
form_id = Column(ForeignKey('english_forms.id'))
form = relationship('EnglishForm')
example_sentence = Column(Unicode(100), nullable=False)
pos = Column(types.Enum(u'Noun',u'Pronoun',u'Adjective', u'Adverb', u'Verb', u'Phrasal Verb', u'Preposition', u'Conjunction', u'Collocation', u'Slang'), nullable=False)
picture_id = Column(types.Integer, ForeignKey('pictures.id'))
picture = relationship("Picture")
content_ids = relationship('Content', secondary=lemma_content_pairs, \
backref='content_ids')
class EnglishLemmaCategory(Base):
__tablename__= 'english_lemma_categories'
__mapper_args__ = {'batch': False # allows extension to fire for each
# instance before going to the next.
}
parent = None
id = Column(types.Integer(), primary_key=True)
name = Column(types.Unicode(100))
lemma_id = Column(ForeignKey('english_lemmas.id'))
level = Column("lvl", types.Integer, nullable=False)
left = Column("lft", types.Integer, nullable=False)
right = Column("rgt", types.Integer, nullable=False)
@event.listens_for(EnglishLemmaCategory, "before_insert")
def before_insert(mapper, connection, instance):
print 'making adjustments before insertion'
#If the new term has no parent, connect to root
if instance.parent == None:
category = mapper.mapped_table
values = connection.execute(select([category]).where(category.c.name == 'ALL')).first().values()
parent = EnglishLemmaCategory()
parent.name = values[0]
parent.level = values[2]
parent.left = values[3]
parent.right = values[4]
instance.parent = parent
category = mapper.mapped_table
#Find right most sibling's right value
right_most_sibling = connection.scalar(
select([category.c.rgt]).
where(category.c.name == instance.parent.name)
)
#Update all values greater than rightmost sibiling
connection.execute(
category.update(
category.c.rgt >= right_most_sibling).values(
#Update if left bound in greater than rightmost sibling
lft=case(
[(category.c.lft > right_most_sibling,
category.c.lft + 2)],
else_=category.c.lft
),
#Update if right bound is greater than right most sibling
rgt=case(
[(category.c.rgt >= right_most_sibling,
category.c.rgt + 2)],
else_=category.c.rgt
)
)
)
instance.left = right_most_sibling
instance.right = right_most_sibling + 1
instance.level = instance.parent.level + 1
@event.listens_for(EnglishLemmaCategory, "after_delete")
def after_delete(mapper, connection, target):
category = mapper.mapped_table
#Delete leaf
if target.right-target.left == 1:
print 'updating after deletion of leaf'
#Update all values greater than right side
connection.execute(
category.update(
category.c.rgt > target.left).values(
#Update if left bound in greater than right side
lft=case(
[(category.c.lft > target.left,
category.c.lft - 2)],
else_=category.c.lft
),
#Update if right bound is greater than right
rgt=case(
[(category.c.rgt >= target.left,
category.c.rgt - 2)],
else_=category.c.rgt
)
)
)
#Delete parent
else:
print 'updating after deletion of parent'
category = mapper.mapped_table
#Promote all children
connection.execute(
category.update(
category.c.lft.between(target.left, target.right)).values(
#Update if left bound in greater than right side
lft=case(
[(category.c.lft > target.left,
category.c.lft - 1)],
else_=category.c.lft
),
#Update if right bound is greater than right
rgt=case(
[(category.c.rgt > target.left,
category.c.rgt - 1)],
else_=category.c.rgt
),
lvl=case([(category.c.lvl > target.level,
category.c.lvl - 1)],
else_=category.c.lvl
)
)
)
#Update all values greater than right side
connection.execute(
category.update(
category.c.rgt > target.right).values(
#Update if left bound in greater than right side
lft=case(
[(category.c.lft > target.left,
category.c.lft - 2)],
else_=category.c.lft
),
#Update if right bound is greater than right
rgt=case(
[(category.c.rgt >= target.left,
category.c.rgt - 2)],
else_=category.c.rgt
)
)
)
class EnglishForm(Base):
"""
"""
__tablename__= 'english_forms'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
form = Column(Unicode(50), nullable=False)
class FormInfo(Base):
"""
"""
__tablename__= 'form_infos'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
form_id = Column(types.Integer, ForeignKey('english_forms.id'))
definitions = Column(Unicode(1000))
freq = Column(types.Integer)
class ForeignLemma(Base):
""" N=Noun, PR=Pronoun, ADJ=Adjective, ADV=Adverb, VB=Verb, PVB=Phrasal Verb, PP=Preposition, CNJ=Conjunction,
"""
__tablename__= 'foreign_lemmas'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
language_id = Column(types.Integer, ForeignKey('languages.id'))
form = Column(Unicode(50), nullable=False)
class Flashcard(Base):
__tablename__= 'flashcards'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
card_id = Column(ForeignKey('cards.id', onupdate='CASCADE', ondelete='CASCADE'))
owner = Column(ForeignKey('auth_id.id'))
level = Column(types.Enum('Show', '4Source','8Source', '4Target', '8Target', 'Flashcard1','Flashcard2','Flashcard3','Flashcard4','Flashcard5','Flashcard6','Flashcard7','Flashcard8'), default='Show')
due = Column(types.Date(), default=func.now())
interval = Column(types.Integer(), default=10)
ease = Column(types.Integer(), default=2500)
correct = Column(types.Integer(), default=0)
incorrect = Column(types.Integer(), default=0)
class FlashcardHistory(Base):
__tablename__= 'flashcardhistories'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
flashcard_id = Column(ForeignKey('flashcards.id'), index=True)
time = Column(types.DateTime(), default=func.now())
level = Column(types.Enum('Show', '4Source','8Source', '4Target', '8Target', 'Flashcard1','Flashcard2','Flashcard3','Flashcard4','Flashcard5','Flashcard6','Flashcard7','Flashcard8'))
response_time= Column(types.Integer())
response = Column(Unicode(50))
correct = Column(types.Boolean())
"""CONTENT MODELS"""
tag_content_pairs = Table('tag_content_pairs', Base.metadata,
Column('tag_id', types.Integer(), \
ForeignKey('tags.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('content_id', types.Integer(), \
ForeignKey('contents.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('tag_content', tag_content_pairs.c.tag_id, tag_content_pairs.c.content_id)
class Tag(Base):
__tablename__ = 'tags'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(100), unique=True, nullable=False)
contents = relationship('Content', secondary=tag_content_pairs, \
backref='contents')
class Content(Base):
__tablename__= 'contents'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
type = Column(types.Enum(u'lesson', u'reading'))
released = Column(types.Date(), default=func.now())
title = Column(Unicode(80))
description = Column(Unicode(350))
picture_id = Column(types.Integer(), ForeignKey('pictures.id'))
picture = relationship("Picture")
url = Column(URLType)
views = Column(types.Integer(), default=0)
owner = Column(types.Integer, ForeignKey('auth_users.id'), index = True)
quiz_id = Column(types.Integer, ForeignKey("quizzes.id"), index = True)
quiz = relationship('Quiz', uselist=False)
finished_by = relationship('AuthID', secondary=user_finished_content)
difficulty_rated_by = relationship('AuthID', secondary=user_voted_content_difficulty)
quality_rated_by = relationship('AuthID', secondary=user_voted_content_quality)
vocab_added_by = relationship('AuthID', secondary=user_added_content_vocab)
tags = relationship('Tag', secondary=tag_content_pairs, \
backref='tags')
comments = relationship('Comment')
difficulty_votes = relationship('DifficultyVote')
quality_votes = relationship('QualityVote')
vocabulary = relationship('EnglishLemma', secondary=lemma_content_pairs, \
backref='vocabulary')
@classmethod
def get_by_title(cls, title):
""" Returns Content object or None by title content = Content.get_by_title('title')"""
return DBSession.query(cls).filter(cls.title==title).first()
@classmethod
def get_by_url(cls, url):
""" Returns Content object or None by title content = Content.get_by_url('url')"""
return DBSession.query(cls).filter(cls.url==url).first()
class DifficultyVote(Base):
__tablename__ = 'difficulty_votes'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
score = Column(types.Integer())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
content_id = Column(types.Integer, ForeignKey('contents.id'), default = None)
class QualityVote(Base):
__tablename__ = 'quality_votes'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
score = Column(types.Integer())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
content_id = Column(types.Integer, ForeignKey('contents.id'), default = None)
class Comment(Base):
__tablename__ = 'comments'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
comment_type = Column(types.Enum(u'C',u'Q'), default=u'C')
time = Column(types.DateTime(), default=func.now())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
content_id = Column(types.Integer, ForeignKey('contents.id'), default = None)
text = Column(Unicode(1000), nullable = False)
replies = relationship('CommentReply')
class CommentReply(Base):
__tablename__ = 'comment_replies'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
time = Column(types.DateTime(), default=func.now())
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
parent_id = Column(types.Integer, ForeignKey('comments.id'), default = None)
text = Column(Unicode(1000), nullable = False)
class Lesson(Base):
""" Table name: lessons
video = Column(types.VARCHAR(200))
quiz_id = Column(types.Integer, ForeignKey('quizzes.id'), nullable= False)
"""
__tablename__ = 'lessons'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
content_id = Column(types.Integer(), ForeignKey('contents.id'))
content = relationship("Content")
video = Column(types.VARCHAR(200))
class Reading(Base):
""" Table name: readings
text = Column(types.UnicodeText())
quiz_id = Column(types.Integer, ForeignKey('quizzes.id'), nullable= False)
"""
__tablename__ = 'readings'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
content_id = Column(types.Integer(), ForeignKey('contents.id'))
text = Column(types.UnicodeText())
sources = relationship('Source')
class Source(Base):
__tablename__ = 'sources'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
reading_id = Column(types.Integer(), ForeignKey('readings.id'))
author = Column(types.Unicode(60))
title = Column(types.Unicode(100))
url = Column(types.Unicode(200))
source = Column(types.Unicode(60))
date = Column(types.Date, default =func.now)
class Quiz(Base):
__tablename__ = 'quizzes'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
title = Column(Unicode(100), nullable=False, default=u"This quiz is coming soon!")
tagline = Column(Unicode(100), nullable=False, default=u'Test your Knowledge!')
content = relationship('Content')
questions = relationship('Question')
def to_json(self):
if len(self.questions) > 0:
questions = self.questions[0].to_json()
for question in self.questions[1:]:
questions += "," + question.to_json
quiz = """var quizJSON = {{"info": {{"name": "{title}","main": "<p>{tagline}</p>", "results": "<h5>Learn More!</h5><p>We have many more lessons for you.</p>", "level1": "You know this lesson very well!", "level2": "You know this lesson well.", "level3": "You might want to watch this lesson again.", "level4": "You should watch this lesson again.","level5":"You should definitely watch this lesson again" }}, "questions": [{questions}]}};""".format(**{'title' : self.title, 'tagline' : self.tagline, 'questions' : questions})
file_path = os.path.join(QUIZ_DIRECTORY, '{0}.js'.format(self.id))
temp_file_path = os.path.join('/tmp', '{0}.js'.format(self.id))
output_file = open(temp_file_path, 'wb')
output_file.write(quiz)
output_file.close()
os.rename(temp_file_path, file_path)
def json_id(self):
questions = DBSession.query(Question).filter(self.id==Question.quiz_id).all()
if len(questions) > 0:
return self.id
else:
return 0
@event.listens_for(Quiz, "after_insert")
def after_insert(mapper, connection, target):
target.to_json()
@event.listens_for(Quiz, "after_update")
def after_update(mapper, connection, target):
print "\n\n\nUPDATING QUIZ\n\n\n", str(target)
target.to_json()
class Question(Base):
__tablename__ = 'questions'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
quiz_id = Column(types.Integer, ForeignKey('quizzes.id'))
prompt = Column(Unicode(100), unique=True, nullable=False)
answers = relationship('Answer')
correct_message = Column(Unicode(100), nullable=False, default=u'That was correct!')
incorrect_message = Column(Unicode(100), nullable=False, default=u'That was incorrect...')
def to_json(self):
correct = randint(0, 9)
incorrect = randint(0, 3)
icheadline = ['Incorrect!', 'Too bad..', 'You were wrong...', 'Sorry...'][incorrect]
cheadline = ['Correct!', 'Good job!', 'Right on!', 'Way to go!', 'Keep it up!', 'Awesome!', 'Wonderful!', "You're right!", 'Yup', 'Good answer'][correct]
dic = {"prompt" : self.prompt,
"cexplanation" : self.correct_message,
"cheadline" : cheadline,
"icexplanation" : self.incorrect_message,
"icheadline":icheadline,
}
for i, answer in enumerate(self.answers):
dic["a{0}t".format(i+1)] = answer.response
if answer.correct:
dic["a{0}v".format(i+1)]='true'
else:
dic["a{0}v".format(i+1)]='false'
if len(self.answers) == 4:
question = """{{"q": "{prompt}", "a": [{{"option": "{a1t}", "correct": {a1v}}}, {{"option": "{a2t}", "correct": {a2v}}}, {{"option": "{a3t}", "correct": {a3v}}}, {{"option": "{a4t}", "correct": {a4v}}}], "correct": "<p><span>{cheadline}</span>{cexplanation}</p>", "incorrect": "<p><span>{icheadline}</span>{icexplanation}</p>"}}""".format(**dic)
elif len(self.answers) == 2:
question = """{{"q": "{prompt}", "a": [{{"option": "{a1t}", "correct": {a1v}}}, {{"option": "{a2t}", "correct": {a2v}}}], "correct": "<p><span>{cheadline}</span>{cexplanation}</p>", "incorrect": "<p><span>{icheadline}</span>{icexplanation}</p>"}}""".format(**dic)
return question
@event.listens_for(Question, "after_update")
def after_update(mapper, connection, target):
print "\n\n\nUPDATING QUIZ\n\n\n", str(target.quiz_id)
quiz = connection.query(Quiz).filter(Quiz.id==target.quiz_id).first()
quiz.to_json()
class Answer(Base):
__tablename__ = 'answers'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer, primary_key=True)
question_id = Column(types.Integer, ForeignKey('questions.id'), default = None)
response = Column(Unicode(100), unique=True, nullable=False)
correct = Column(types.Boolean)
class Picture(Base):
"""Table which stores pictures"""
__tablename__ = 'pictures'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(URLType)
owner = Column(types.Integer, ForeignKey(AuthID.id), index = True)
@classmethod
def add_file(cls, input_file, name):
pic = Image.open(input_file)
for i, size in enumerate([pic.size]+PICTURE_SIZES):
pic.thumbnail(size, Image.ANTIALIAS)
file_path = os.path.join(PICTURE_DIRECTORIES[i], '{0}.jpeg'.format(name))
pic.save(file_path, 'jpeg')
@classmethod
def from_file(cls, name, image):
if name == None: name = str(uuid.uuid4())
same_name =len(glob.glob(os.path.join(PICTURE_DIRECTORIES[0], '{0}[0-9]*.jpeg'.format(name))))
name+=str(same_name)
input_file = image.file
cls.add_file(input_file, name)
return Picture(name=name)
@classmethod
def update_with_file(cls, pid, name, image):
if name == None: name = str(uuid.uuid4())
same_name =len(glob.glob(os.path.join(PICTURE_DIRECTORIES[0], '{0}[0-9]*.jpeg'.format(name))))
name+=str(same_name)
session = DBSession()
session.query(cls).filter(cls.id==pid).update(values={'name' : name.strip()})
input_file = image.file
cls.add_file(input_file, name)
session.flush()
class PotentialPicture(Base):
"""Table which stores pictures"""
__tablename__ = 'potential_pictures'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
name = Column(types.VARCHAR(75))
""" Usage Data"""
class UserPoint(Base):
__tablename__ = 'user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
source = Column(types.Unicode(255), default=u'')
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
time = Column(types.DateTime(), default=func.now())
class TotalUserPoint(Base):
__tablename__ = 'total_user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
class MonthlyUserPoint(Base):
__tablename__ = 'monthly_user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
month = Column(types.Integer())
class WeeklyUserPoint(Base):
__tablename__ = 'weekly_user_points'
__table_args__ = {"sqlite_autoincrement": True}
id = Column(types.Integer(), primary_key=True)
user_id = Column(types.Integer, ForeignKey(AuthID.id))
amount = Column(types.Integer(), default = 0)
week = Column(types.Integer())
def populate(settings):
## Add logistical data
session = DBSession()
default_groups = [(u'users',u'User Group'), (u'teachers',u'Teacher Group'), (u'admin',u'Admin Group')]
for name, description in default_groups:
group = AuthGroup(name=name, description=description)
session.add(group)
session.flush()
transaction.commit()
session.close()
session = DBSession()
languages={'ab':{'name':"Abkhaz",'nativename':"аҧсуа"},'aa':{'name':"Afar",'nativename':"Afaraf"},'af':{'name':"Afrikaans",'nativename':"Afrikaans"},'ak':{'name':"Akan",'nativename':"Akan"},'sq':{'name':"Albanian",'nativename':"Shqip"},'am':{'name':"Amharic",'nativename':"አማርኛ"},'ar':{'name':"Arabic",'nativename':"العربية"},'an':{'name':"Aragonese",'nativename':"Aragonés"},'hy':{'name':"Armenian",'nativename':"Հայերեն"},'as':{'name':"Assamese",'nativename':"অসমীয়া"},'av':{'name':"Avaric",'nativename':"авар мацӀ, магӀарул мацӀ"},'ae':{'name':"Avestan",'nativename':"avesta"},'ay':{'name':"Aymara",'nativename':"aymar aru"},'az':{'name':"Azerbaijani",'nativename':"azərbaycan dili"},'bm':{'name':"Bambara",'nativename':"bamanankan"},'ba':{'name':"Bashkir",'nativename':"башҡорт теле"},'eu':{'name':"Basque",'nativename':"euskara, euskera"},'be':{'name':"Belarusian",'nativename':"Беларуская"},'bn':{'name':"Bengali",'nativename':"বাংলা"},'bh':{'name':"Bihari",'nativename':"भोजपुरी"},'bi':{'name':"Bislama",'nativename':"Bislama"},'bs':{'name':"Bosnian",'nativename':"bosanski jezik"},'br':{'name':"Breton",'nativename':"brezhoneg"},'bg':{'name':"Bulgarian",'nativename':"български език"},'my':{'name':"Burmese",'nativename':"ဗမာစာ"},'ca':{'name':"Catalan; Valencian",'nativename':"Català"},'ch':{'name':"Chamorro",'nativename':"Chamoru"},'ce':{'name':"Chechen",'nativename':"нохчийн мотт"},'ny':{'name':"Chichewa; Chewa; Nyanja",'nativename':"chiCheŵa, chinyanja"},'zh':{'name':"Chinese",'nativename':"中文 (Zhōngwén), 汉语, 漢語"},'cv':{'name':"Chuvash",'nativename':"чӑваш чӗлхи"},'kw':{'name':"Cornish",'nativename':"Kernewek"},'co':{'name':"Corsican",'nativename':"corsu, lingua corsa"},'cr':{'name':"Cree",'nativename':"ᓀᐦᐃᔭᐍᐏᐣ"},'hr':{'name':"Croatian",'nativename':"hrvatski"},'cs':{'name':"Czech",'nativename':"česky, čeština"},'da':{'name':"Danish",'nativename':"dansk"},'dv':{'name':"Divehi; Dhivehi; Maldivian;",'nativename':"ދިވެހި"},'nl':{'name':"Dutch",'nativename':"Nederlands, Vlaams"},'en':{'name':"English",'nativename':"English"},'eo':{'name':"Esperanto",'nativename':"Esperanto"},'et':{'name':"Estonian",'nativename':"eesti, eesti keel"},'ee':{'name':"Ewe",'nativename':"Eʋegbe"},'fo':{'name':"Faroese",'nativename':"føroyskt"},'fj':{'name':"Fijian",'nativename':"vosa Vakaviti"},'fi':{'name':"Finnish",'nativename':"suomi, suomen kieli"},'fr':{'name':"French",'nativename':"français, langue française"},'ff':{'name':"Fula; Fulah; Pulaar; Pular",'nativename':"Fulfulde, Pulaar, Pular"},'gl':{'name':"Galician",'nativename':"Galego"},'ka':{'name':"Georgian",'nativename':"ქართული"},'de':{'name':"German",'nativename':"Deutsch"},'el':{'name':"Greek, Modern",'nativename':"Ελληνικά"},'gn':{'name':"Guaraní",'nativename':"Avañeẽ"},'gu':{'name':"Gujarati",'nativename':"ગુજરાતી"},'ht':{'name':"Haitian; Haitian Creole",'nativename':"Kreyòl ayisyen"},'ha':{'name':"Hausa",'nativename':"Hausa, هَوُسَ"},'he':{'name':"Hebrew (modern)",'nativename':"עברית"},'hz':{'name':"Herero",'nativename':"Otjiherero"},'hi':{'name':"Hindi",'nativename':"हिन्दी, हिंदी"},'ho':{'name':"Hiri Motu",'nativename':"Hiri Motu"},'hu':{'name':"Hungarian",'nativename':"Magyar"},'ia':{'name':"Interlingua",'nativename':"Interlingua"},'id':{'name':"Indonesian",'nativename':"Bahasa Indonesia"},'ie':{'name':"Interlingue",'nativename':"Originally called Occidental; then Interlingue after WWII"},'ga':{'name':"Irish",'nativename':"Gaeilge"},'ig':{'name':"Igbo",'nativename':"Asụsụ Igbo"},'ik':{'name':"Inupiaq",'nativename':"Iñupiaq, Iñupiatun"},'io':{'name':"Ido",'nativename':"Ido"},'is':{'name':"Icelandic",'nativename':"Íslenska"},'it':{'name':"Italian",'nativename':"Italiano"},'iu':{'name':"Inuktitut",'nativename':"ᐃᓄᒃᑎᑐᑦ"},'ja':{'name':"Japanese",'nativename':"日本語 (にほんご/にっぽんご)"},'jv':{'name':"Javanese",'nativename':"basa Jawa"},'kl':{'name':"Kalaallisut, Greenlandic",'nativename':"kalaallisut, kalaallit oqaasii"},'kn':{'name':"Kannada",'nativename':"ಕನ್ನಡ"},'kr':{'name':"Kanuri",'nativename':"Kanuri"},'ks':{'name':"Kashmiri",'nativename':"कश्मीरी, كشميري"},'kk':{'name':"Kazakh",'nativename':"Қазақ тілі"},'km':{'name':"Khmer",'nativename':"ភាសាខ្មែរ"},'ki':{'name':"Kikuyu, Gikuyu",'nativename':"Gĩkũyũ"},'rw':{'name':"Kinyarwanda",'nativename':"Ikinyarwanda"},'ky':{'name':"Kirghiz, Kyrgyz",'nativename':"кыргыз тили"},'kv':{'name':"Komi",'nativename':"коми кыв"},'kg':{'name':"Kongo",'nativename':"KiKongo"},'ko':{'name':"Korean",'nativename':"한국어 (韓國語), 조선말 (朝鮮語)"},'ku':{'name':"Kurdish",'nativename':"Kurdî, كوردی"},'kj':{'name':"Kwanyama, Kuanyama",'nativename':"Kuanyama"},'la':{'name':"Latin",'nativename':"latine, lingua latina"},'lb':{'name':"Luxembourgish, Letzeburgesch",'nativename':"Lëtzebuergesch"},'lg':{'name':"Luganda",'nativename':"Luganda"},'li':{'name':"Limburgish, Limburgan, Limburger",'nativename':"Limburgs"},'ln':{'name':"Lingala",'nativename':"Lingála"},'lo':{'name':"Lao",'nativename':"ພາສາລາວ"},'lt':{'name':"Lithuanian",'nativename':"lietuvių kalba"},'lu':{'name':"Luba-Katanga",'nativename':""},'lv':{'name':"Latvian",'nativename':"latviešu valoda"},'gv':{'name':"Manx",'nativename':"Gaelg, Gailck"},'mk':{'name':"Macedonian",'nativename':"македонски јазик"},'mg':{'name':"Malagasy",'nativename':"Malagasy fiteny"},'ms':{'name':"Malay",'nativename':"bahasa Melayu, بهاس ملايو"},'ml':{'name':"Malayalam",'nativename':"മലയാളം"},'mt':{'name':"Maltese",'nativename':"Malti"},'mi':{'name':"Māori",'nativename':"te reo Māori"},'mr':{'name':"Marathi (Marāṭhī)",'nativename':"मराठी"},'mh':{'name':"Marshallese",'nativename':"Kajin M̧ajeļ"},'mn':{'name':"Mongolian",'nativename':"монгол"},'na':{'name':"Nauru",'nativename':"Ekakairũ Naoero"},'nv':{'name':"Navajo, Navaho",'nativename':"Diné bizaad, Dinékʼehǰí"},'nb':{'name':"Norwegian Bokmål",'nativename':"Norsk bokmål"},'nd':{'name':"North Ndebele",'nativename':"isiNdebele"},'ne':{'name':"Nepali",'nativename':"नेपाली"},'ng':{'name':"Ndonga",'nativename':"Owambo"},'nn':{'name':"Norwegian Nynorsk",'nativename':"Norsk nynorsk"},'no':{'name':"Norwegian",'nativename':"Norsk"},'ii':{'name':"Nuosu",'nativename':"ꆈꌠ꒿ Nuosuhxop"},'nr':{'name':"South Ndebele",'nativename':"isiNdebele"},'oc':{'name':"Occitan",'nativename':"Occitan"},'oj':{'name':"Ojibwe, Ojibwa",'nativename':"ᐊᓂᔑᓈᐯᒧᐎᓐ"},'cu':{'name':"Old Church Slavonic, Church Slavic, Church Slavonic, Old Bulgarian, Old Slavonic",'nativename':"ѩзыкъ словѣньскъ"},'om':{'name':"Oromo",'nativename':"Afaan Oromoo"},'or':{'name':"Oriya",'nativename':"ଓଡ଼ିଆ"},'os':{'name':"Ossetian, Ossetic",'nativename':"ирон æвзаг"},'pa':{'name':"Panjabi, Punjabi",'nativename':"ਪੰਜਾਬੀ, پنجابی"},'pi':{'name':"Pāli",'nativename':"पाऴि"},'fa':{'name':"Persian",'nativename':"فارسی"},'pl':{'name':"Polish",'nativename':"polski"},'ps':{'name':"Pashto, Pushto",'nativename':"پښتو"},'pt':{'name':"Portuguese",'nativename':"Português"},'qu':{'name':"Quechua",'nativename':"Runa Simi, Kichwa"},'rm':{'name':"Romansh",'nativename':"rumantsch grischun"},'rn':{'name':"Kirundi",'nativename':"kiRundi"},'ro':{'name':"Romanian, Moldavian, Moldovan",'nativename':"română"},'ru':{'name':"Russian",'nativename':"русский язык"},'sa':{'name':"Sanskrit (Saṁskṛta)",'nativename':"संस्कृतम्"},'sc':{'name':"Sardinian",'nativename':"sardu"},'sd':{'name':"Sindhi",'nativename':"सिन्धी, سنڌي، سندھی"},'se':{'name':"Northern Sami",'nativename':"Davvisámegiella"},'sm':{'name':"Samoan",'nativename':"gagana faa Samoa"},'sg':{'name':"Sango",'nativename':"yângâ tî sängö"},'sr':{'name':"Serbian",'nativename':"српски језик"},'gd':{'name':"Scottish Gaelic; Gaelic",'nativename':"Gàidhlig"},'sn':{'name':"Shona",'nativename':"chiShona"},'si':{'name':"Sinhala, Sinhalese",'nativename':"සිංහල"},'sk':{'name':"Slovak",'nativename':"slovenčina"},'sl':{'name':"Slovene",'nativename':"slovenščina"},'so':{'name':"Somali",'nativename':"Soomaaliga, af Soomaali"},'st':{'name':"Southern Sotho",'nativename':"Sesotho"},'es':{'name':"Spanish; Castilian",'nativename':"español, castellano"},'su':{'name':"Sundanese",'nativename':"Basa Sunda"},'sw':{'name':"Swahili",'nativename':"Kiswahili"},'ss':{'name':"Swati",'nativename':"SiSwati"},'sv':{'name':"Swedish",'nativename':"svenska"},'ta':{'name':"Tamil",'nativename':"தமிழ்"},'te':{'name':"Telugu",'nativename':"తెలుగు"},'tg':{'name':"Tajik",'nativename':"тоҷикӣ, toğikī, تاجیکی"},'th':{'name':"Thai",'nativename':"ไทย"},'ti':{'name':"Tigrinya",'nativename':"ትግርኛ"},'bo':{'name':"Tibetan Standard, Tibetan, Central",'nativename':"བོད་ཡིག"},'tk':{'name':"Turkmen",'nativename':"Türkmen, Түркмен"},'tl':{'name':"Tagalog",'nativename':"Wikang Tagalog, ᜏᜒᜃᜅ᜔ ᜆᜄᜎᜓᜄ᜔"},'tn':{'name':"Tswana",'nativename':"Setswana"},'to':{'name':"Tonga (Tonga Islands)",'nativename':"faka Tonga"},'tr':{'name':"Turkish",'nativename':"Türkçe"},'ts':{'name':"Tsonga",'nativename':"Xitsonga"},'tt':{'name':"Tatar",'nativename':"татарча, tatarça, تاتارچا"},'tw':{'name':"Twi",'nativename':"Twi"},'ty':{'name':"Tahitian",'nativename':"Reo Tahiti"},'ug':{'name':"Uighur, Uyghur",'nativename':"Uyƣurqə, ئۇيغۇرچە"},'uk':{'name':"Ukrainian",'nativename':"українська"},'ur':{'name':"Urdu",'nativename':"اردو"},'uz':{'name':"Uzbek",'nativename':"zbek, Ўзбек, أۇزبېك"},'ve':{'name':"Venda",'nativename':"Tshivenḓa"},'vi':{'name':"Vietnamese",'nativename':"Tiếng Việt"},'vo':{'name':"Volapük",'nativename':"Volapük"},'wa':{'name':"Walloon",'nativename':"Walon"},'cy':{'name':"Welsh",'nativename':"Cymraeg"},'wo':{'name':"Wolof",'nativename':"Wollof"},'fy':{'name':"Western Frisian",'nativename':"Frysk"},'xh':{'name':"Xhosa",'nativename':"isiXhosa"},'yi':{'name':"Yiddish",'nativename':"ייִדיש"},'yo':{'name':"Yoruba",'nativename':"Yorùbá"},'za':{'name':"Zhuang, Chuang",'nativename':"Saɯ cueŋƅ, Saw cuengh"}}
#languages = [("Abkhaz","аҧсуа"),("Afar","Afaraf"),("Afrikaans","Afrikaans"),("Akan","Akan"),("Albanian","Shqip"),("Amharic","አማርኛ"),("Arabic","العربية"),("Aragonese","Aragonés"),("Armenian","Հայերեն"),("Assamese","অসমীয়া"),("Avaric","авар мацӀ"),("Avestan","avesta"),("Aymara","aymar aru"),("Azerbaijani","azərbaycan dili"),("Bambara","bamanankan"),("Bashkir","башҡорт теле"),("Basque","euskara"),("Belarusian","Беларуская"),("Bengali","বাংলা"),("Bihari","भोजपुरी"),("Bislama","Bislama"),("Bosnian","bosanski jezik"),("Breton","brezhoneg"),("Bulgarian","български език"),("Burmese","Burmese"),("Catalan","Català"),("Chamorro","Chamoru"),("Chechen","нохчийн мотт"),("Chichewa","chiCheŵa"),("Chinese","中文"),("Chuvash","чӑваш чӗлхи"),("Cornish","Kernewek"),("Corsican","corsu"),("Cree","ᓀᐦᐃᔭᐍᐏᐣ"),("Croatian","hrvatski"),("Czech","česky"),("Danish","dansk"),("Divehi","ދިވެހި"),("Dutch","Nederlands"),("Dzongkha","རྫོང་ཁ"),("English","English"),("Esperanto","Esperanto"),("Estonian","eesti"),("Ewe","Eʋegbe"),("Faroese","føroyskt"),("Fijian","vosa Vakaviti"),("Finnish","suomi"),("French","français"),("Fula","Fulfulde | Pulaar"),("Gaelic","Gàidhlig"),("Galician","Galego"),("Georgian","ქართული"),("German","Deutsch"),("Greek","Ελληνικά"),("Guaraní","Avañe'ẽ"),("Gujarati","ગુજરાતી"),("Haitian","Kreyòl ayisyen"),("Hausa","هَوُسَ"),("Hebrew","עברית"),("Herero","Otjiherero"),("Hindi","हिन्दी| हिंदी"),("Hiri Motu","Hiri Motu"),("Hungarian","Magyar"),("Icelandic","Íslenska"),("Ido","Ido"),("Igbo","Asụsụ Igbo"),("Indonesian","Bahasa Indonesia"),("Interlingua","Interlingua"),("Interlingue","Interlingue"),("Inuktitut","ᐃᓄᒃᑎᑐᑦ"),("Inupiaq","Iñupiaq"),("Irish","Gaeilge"),("Italian","Italiano"),("Japanese","日本語"),("Javanese","basa Jawa"),("Kalaallisut","kalaallisut"),("Kannada","ಕನ್ನಡ"),("Kanuri","Kanuri"),("Kashmiri","कश्मीरी"),("Kazakh","Қазақ тілі"),("Khmer","ភាសាខ្មែរ"),("Kikuyu","Gĩkũyũ"),("Kinyarwanda","Ikinyarwanda"),("Kirghiz","кыргыз тили"),("Kirundi","kiRundi"),("Komi","коми кыв"),("Kongo","KiKongo"),("Korean","한국어 (韓國語)"),("Kurdish","Kurdî"),("Kwanyama","Kuanyama"),("Lao","ພາສາລາວ"),("Latin","latine"),("Latvian","latviešu valoda"),("Lezgian","Лезги чlал"),("Limburgish","Limburgs"),("Lingala","Lingála"),("Lithuanian","lietuvių kalba"),("Luba-Katanga","Luba-Katanga"),("Luganda","Luganda"),("Luxembourgish","Lëtzebuergesch"),("Macedonian","македонски јазик"),("Malagasy","Malagasy fiteny"),("Malay","bahasa Melayu"),("Malayalam","മലയാളം"),("Maltese","Malti"),("Manx","Gaelg"),("Marathi","मराठी"),("Marshallese","Kajin M̧ajeļ"),("Mongolian","монгол"),("Māori","te reo Māori"),("Nauru","Ekakairũ Naoero"),("Navajo","Diné bizaad"),("Ndonga","Owambo"),("Nepali","नेपाली"),("North Ndebele","isiNdebele"),("Norwegian","Norsk"),("Nuosu","Nuosuhxop"),("Occitan","Occitan"),("Ojibwe","ᐊᓂᔑᓈᐯᒧᐎᓐ"),("Oriya","ଓଡ଼ିଆ"),("Oromo","Afaan Oromoo"),("Ossetian","ирон æвзаг"),("Panjabi","ਪੰਜਾਬੀ| پنجابی"),("Pashto","پښتو"),("Persian","فارسی"),("Polish","polski"),("Portuguese","Português"),("Pāli","पाऴि"),("Quechua","Kichwa"),("Romanian","română"),("Romansh","rumantsch grischun"),("Russian","русский язык"),("Sami (Northern)","Davvisámegiella"),("Samoan","gagana fa'a Samoa"),("Sango","yângâ tî sängö"),("Sanskrit","संस्कृतम्"),("Sardinian","sardu"),("Serbian","српски језик"),("Shona","chiShona"),("Sindhi","सिन्धी"),("Sinhala","සිංහල"),("Slavonic","ѩзыкъ словѣньскъ"),("Slovak","slovenčina"),("Slovene","slovenščina"),("Somali","Soomaaliga"),("South Ndebele","isiNdebele"),("Southern Sotho","Sesotho"),("Spanish","español | castellano"),("Sundanese","Basa Sunda"),("Swahili","Kiswahili"),("Swati","SiSwati"),("Swedish","svenska"),("Tagalog","Wikang Tagalog"),("Tahitian","Reo Tahiti"),("Tajik","тоҷикӣ"),("Tamil","தமிழ்"),("Tatar","татарча"),("Telugu","తెలుగు"),("Thai","ไทย"),("Tibetan","བོད་ཡིག"),("Tigrinya","ትግርኛ"),("Tonga","faka Tonga"),("Tsonga","Xitsonga"),("Tswana","Setswana"),("Turkish","Türkçe"),("Turkmen","Türkmen | Түркмен"),("Twi","Twi"),("Uighur","Uyƣurqə"),("Ukrainian","українська"),("Urdu","اردو"),("Uzbek","O'zbek"),("Venda","Tshivenḓa"),("Vietnamese","Tiếng Việt"),("Volapük","Volapük"),("Walloon","Walon"),("Welsh","Cymraeg"),("Western Frisian","Frysk"),("Wolof","Wollof"),("Xhosa","isiXhosa"),("Yiddish","ייִדיש"),("Yoruba","Yorùbá"),("Zhuang","Saɯ cueŋƅ"),("Zulu","isiZulu")]
goog = ['Afrikaans', 'Albanian', 'Arabic', 'Armenian', 'Azerbaijani', 'Basque', 'Belarusian', 'Bengali', 'Bosnian', 'Bulgarian', 'Catalan', 'Cebuano', 'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Esperanto', 'Estonian', 'Filipino', 'Finnish', 'French', 'Galician', 'Georgian', 'German', 'Greek', 'Gujarati', 'Haitian Creole', 'Hausa', 'Hebrew', 'Hindi', 'Hmong', 'Hungarian', 'Icelandic', 'Igbo', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Javanese', 'Kannada', 'Khmer', 'Korean', 'Lao', 'Latin', 'Latvian', 'Lithuanian', 'Macedonian', 'Malay', 'Maltese', 'Maori', 'Marathi', 'Mongolian', 'Nepali', 'Norwegian', 'Persian', 'Polish', 'Portuguese', 'Punjabi', 'Romanian', 'Russian', 'Serbian', 'Slovak', 'Slovenian', 'Somali', 'Spanish', 'Swahili', 'Swedish', 'Tamil', 'Telugu', 'Thai', 'Turkish', 'Ukrainian', 'Urdu', 'Vietnamese', 'Welsh', 'Yiddish', 'Yoruba', 'Zulu']
language_tuples = []
for key in languages:
language_tuples.append((languages[key]['name'], languages[key]['nativename'], key))
language_tuples = sorted(language_tuples, key=lambda a: a[0])
for l in language_tuples:
goog_trans = None
if goog.count(l[0]) > 0: goog_trans =l[2]
language = Language(english_name=l[0], native_name=l[1], iso_lang=l[2], goog_translate=goog_trans)
session.add(language)
transaction.commit()
session.close()
def initialize_sql(engine, settings):
DBSession.configure(bind=engine)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
# if settings.has_key('apex.velruse_providers'):
# pass
#SQLBase.metadata.bind = engine
#SQLBase.metadata.create_all(engine)
try:
populate(settings)
except IntegrityError:
transaction.abort()
|
[
"johnpaulcadigan@gmail.com"
] |
johnpaulcadigan@gmail.com
|
5bfc5f2cabecf0d946bad1504ba6985fda33a417
|
1b1144757634a9cab972ed5696199910ba762912
|
/mysite/app/user/migrations/0005_auto_20210302_1918.py
|
dd505597b2f7558b46067bafbb80a58d77b59243
|
[] |
no_license
|
bair2503/Python
|
67a44905c499c4cec1d29c090112fecd0e82e1c4
|
1ae168cbf269b781b8fd7d4b2fbcfa828362f3d4
|
refs/heads/main
| 2023-06-24T13:41:59.636586
| 2021-07-28T19:26:09
| 2021-07-28T19:26:09
| 390,099,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Generated by Django 3.1 on 2021-03-02 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_auto_20210302_1911'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='address',
field=models.CharField(max_length=200),
),
]
|
[
"bairgatapov93@mail.ru"
] |
bairgatapov93@mail.ru
|
97632d9e7d7bf489360aea53ea24698165011038
|
8a1d0238e997e7c953a21fc397f76a6c145b5e09
|
/configs/gb_SMALL_STIMS_LARGE_MATRIX.bcicfg.py
|
401a3c23afe5d9edc2c565238ca773aaf19936c9
|
[] |
no_license
|
luciopercable/eye_loc
|
8c39cb562bcbb46b6d9d01ac224ad77e91193559
|
a30f9690246b1f9f69ccbdb9c53519e9d4677e94
|
refs/heads/master
| 2020-04-19T11:58:37.406875
| 2018-12-08T17:41:55
| 2018-12-08T17:41:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import itertools
r,c = 6,6
step_vert = 110
step_horiz = 110
names = [a for a in u'abcdefghijklmonpqrstuvwxyz_1234567890!@#$%^&*()+=-~[]{};:\"\|?.,/<>½¾¿±®©§£¥¢÷µ¬']
aim_word = 'neuroscience_158'
rows = [list(a) for a in np.arange(r*c).reshape((c,r)).T]
columns = [list(a) for a in np.arange(r*c).reshape((c,r))]
posr = [55 - step_horiz* (len(rows)/2- a) for a in range(r)]
posc = [55- step_vert* (len(columns)/2- a) for a in range(len(columns))]
pos = [(r, c) for c in posc[::-1] for r in posr ]
config = {
'stimuli_dir':'.\\rescources\\stimuli\\letters_grey_black',
'background':'black',
'rows':rows,
'columns':columns,
'positions':pos,
'names':names,
'size':50,
'window_size':(1680, 1050),
'number_of_inputs':12,
'aims_learn': [0,5,35,30,21],#[29,35],#,9,13,17,21,25,30,34,38],
'aims_play': [names.index(a) for a in aim_word],#[0:2]
'shrink_matrix' : 1,
'textsize' : 0.07
}
# print config['aims_play']
#print rows + columns
print pos
|
[
"kriattiffer@gmail.com"
] |
kriattiffer@gmail.com
|
32fb9188aec819ccd91d7b952306ceb971f26b87
|
2d308f49fd8326173f2a1cf6ba1ab25b0abff302
|
/rxbpn/testing/tobserver.py
|
1f00d63ddba26fd7f0f8138d5487905f7b6dfcc4
|
[
"BSD-3-Clause"
] |
permissive
|
JIAWea/rxbpn
|
6c407dd38524a2e6b2f800cf9b88ebd0287398c6
|
8760d086c802291398c25d7bd8e4e541962b191a
|
refs/heads/main
| 2023-03-16T18:49:05.563516
| 2021-01-29T09:58:20
| 2021-01-29T09:58:20
| 333,379,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from rxbp.acknowledgement.acksubject import AckSubject
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.observer import Observer as AckObserver
class TASubscribe(AckObserver):
def on_next(self, value):
# if value[0] == 11:
# return AckSubject()
print("Received: {}, type: {}".format(value, type(value)))
return continue_ack
def on_completed(self):
print('Done!')
def on_error(self, exc):
print('Exception: ', exc)
|
[
"1552937000@qq.com"
] |
1552937000@qq.com
|
4b766dd20dadec39d3f2a2f88debe30a8f290e2a
|
4551ef7051f937af33908fdd0768bc7174caba97
|
/dwh/pipeline.py
|
5cca159ca5561ae71560c280bf61377f2d999c4b
|
[] |
no_license
|
thangaiya/SelfService
|
171163aba5d72053beaa55887b563fc7477b5292
|
7e6e2827b27fe81e29b06b546b880d9440912264
|
refs/heads/master
| 2021-04-09T14:51:30.211736
| 2018-03-18T12:27:08
| 2018-03-18T12:27:08
| 125,722,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,433
|
py
|
import re
from csv import DictReader
from collections import *
from itertools import chain
from xml.parsers.expat import ParserCreate
from unidecode import unidecode
from pathlib import Path
from typing import *
from typing.io import *
from typing.re import *
from .utils import dispatch_function, XMLStack, PersistentBuffer
from .configuration import Configurations
def normalize(field):
if isinstance(field, str):
field = unidecode(field)
return field
@dispatch_function
def parse(file: IO, type: Text, **kwargs) -> Hashable:
"""Dispatch `file` to the appropriate parser (specified by `type`)"""
return type.lower()
@parse.register('csv')
def _(file: IO, type: Text, **kwargs) -> Iterator[Dict]:
"""Parse a csv `file`"""
yield from DictReader(file, **kwargs)
@parse.register('txt')
def _(file: IO, type: Text, pattern: Pattern, flags: int = 0) -> Iterator[Dict]:
"""Parse a text `file`"""
yield from (item.groupdict() for item in re.finditer(pattern, file.read(), flags))
@parse.register('xml')
def _(file: IO, type: Text, buffer_size: int = 65536, buffer_text: bool = True) -> Iterator[Tuple]:
"""Parse an xml `file`"""
parser = ParserCreate()
parser.buffer_size = buffer_size
parser.buffer_text = buffer_text
stack = XMLStack()
parser.StartElementHandler = stack.start
parser.EndElementHandler = stack.end
parser.CharacterDataHandler = stack.character
for line in file:
parser.Parse(line, False)
yield from stack.items()
stack.clear()
def project(values: Dict, mappings: Dict) -> Dict:
"""Performs a projection of `values` (row) to `mappings` (schema)"""
return {(alias or field): normalize(values.get(field)) for (field, alias) in mappings.items()}
@dispatch_function
def dispatch(item: Union[Dict, Tuple], outputs: List[Dict]) -> Hashable:
"""Dispatch `item` to the appropriate dispatcher (based on its type)"""
return type(item)
@dispatch.register(OrderedDict)
@dispatch.register(dict)
def _(item: Dict, outputs: List[Dict]) -> Iterator[Tuple[Text, Dict]]:
"""Dispatch `item` (row) to multiple `outputs` (tables)"""
yield from ((output['name'], project(item, output['fields'])) for output in outputs)
@dispatch.register(tuple)
def _(item: Tuple, outputs: List[Dict]) -> Iterator[Tuple[Text, Dict]]:
"""Dispatch `item` (row) to multiple `outputs` (tables)"""
tag, values = item
yield from ((output['name'], project(values, output['fields'])) for output in outputs if tag == output['tag'])
def apply_pipeline(file: Path, config: Dict) -> Iterator[Tuple[Text, Dict]]:
"""Pass `file` through the pipeline (specified by `config`)"""
if re.match(config.get('pattern'), file.name):
with open(file, **config.get('source_args')) as f:
yield from chain.from_iterable(dispatch(item, config.get('outputs')) for item in parse(f, **config.get('parser_args')))
def get_rows(file: Path, configs: List[Dict]) -> Iterator[Tuple[Text, Dict]]:
"""Pass `file` through all the pipelines (specified by `configs`)"""
yield from chain.from_iterable((apply_pipeline(file, config) for config in configs))
def persist(pb: PersistentBuffer, config: Configurations, file):
try:
for table, row in get_rows(file, config.get()):
pb.add(table, row)
except Exception as e:
raise e
finally:
file.unlink()
|
[
"thangaiya@gmail.com"
] |
thangaiya@gmail.com
|
be26a348d616f0dddc10ae8c7ad0db166b68900d
|
3b1bb402b4d11dfd6e3a6430b1275e6ee814ecf8
|
/client/clustering/ClusterProceduralWeaponSingle.py
|
9bf44ac25365898028388d16b3decba73731393a
|
[] |
no_license
|
DanieleGravina/ProceduralWeapon
|
546c6e966381717b2a7c2c2bd4fcdd14d05abb76
|
d8b3289086f857f31349bac7edb12de313de4319
|
refs/heads/master
| 2021-01-10T20:54:38.864377
| 2015-04-03T15:33:12
| 2015-04-03T15:33:12
| 26,865,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,229
|
py
|
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from sklearn.manifold import MDS
from sklearn.cluster import DBSCAN
import numpy as np
from sklearn import metrics
import statistics
import matplotlib.pyplot as plt
from radar_chart_single import draw_radar
from math import *
from Costants import *
limits = [(ROF_MIN/100, ROF_MAX/100), (SPREAD_MIN/100, SPREAD_MAX/100), (AMMO_MIN, AMMO_MAX), (SHOT_COST_MIN, SHOT_COST_MAX), (RANGE_MIN/100, RANGE_MAX/100),
(SPEED_MIN, SPEED_MAX), (DMG_MIN, DMG_MAX), (DMG_RAD_MIN, DMG_RAD_MAX), (-GRAVITY_MIN, -GRAVITY_MAX),
(EXPLOSIVE_MIN, EXPLOSIVE_MAX)]
label =["ROF", "SPREAD", "AMMO", "SHOT_COST", "LIFE_SPAN", "SPEED", "DMG", "DMG_RAD", "GRAVITY", "EXPLOSIVE"]
def printWeapon(pop):
for ind in pop :
print("Weapon "+ " Rof:" + str(ind[0]) + " Spread:" + str(ind[1]) + " MaxAmmo:" + str(ind[2])
+ " ShotCost:" + str(ind[3]) + " Range:" + str(ind[4]) )
print("Projectile "+ " Speed:" + str(ind[5]) + " Damage:" + str(ind[6]) + " DamageRadius:" + str(ind[7])
+ " Gravity:" + str(ind[8]) + " Explosive:" + str(ind[9]))
print("*********************************************************" + "\n")
def writeWeapon(pop, pop_file):
i = 0
for ind in pop :
pop_file.write("(" + str(i) + ")" + "\n")
i += 1
pop_file.write("Weapon "+ " Rof:" + str(ind[0]) + " Spread:" + str(ind[1]) + " MaxAmmo:" + str(ind[2])
+ " ShotCost:" + str(ind[3]) + " Range:" + str(ind[4]) + "\n")
pop_file.write("Projectile "+ " Speed:" + str(ind[5]) + " Damage:" + str(ind[6]) + " DamageRadius:" + str(ind[7])
+ " Gravity:" + str(ind[8]) + " Explosive:" + str(ind[9]) +"\n")
def normalize(data):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i][j] = (data[i][j] - limits[j][0])/(limits[j][1] - limits[j][0])
return data
def postProcess(data):
clone = list(data)
#fireinterval become rate of fire -> (1/fireinterval)
clone[0] = log(1/(ROF_MIN/100)) + log(1/clone[0])
#gravity is inverted
clone[8] = - clone[8]
return clone
class ClusterProceduralWeapon:
def __init__(self, data = None, fitness = None, file = None):
self.data = data
self.fits = fitness
self.file = file
def cluster(self):
try :
os.makedirs("cluster")
os.chdir("cluster")
except :
os.chdir("cluster")
self.file = open("cluster.txt", "w")
cluster_file = self.file
X = np.array(self.data, np.float32)
print(X.shape)
X = normalize(X)
db = DBSCAN(eps=0.05, min_samples=5).fit(X)
labels = db.labels_
labels_unique = np.unique( [labels[i] for i in range(len(labels)) if labels[i] != - 1] )
n_clusters_ = len(labels_unique)
print(labels)
index = []
fitness = []
entropy_mean = []
entropy_stdev = []
dist_mean = []
dist_stdev = []
fits_clustered = [[] for _ in range(n_clusters_)]
clusters = [[] for _ in range(n_clusters_)]
print("number of estimated clusters : %d" % n_clusters_ )
cluster_file.write("number of estimated clusters : %d" % n_clusters_ + "\n")
num_cluster = 0
for k in range(n_clusters_):
my_members = labels == k
for i in range(len(labels)):
if my_members[i]:
index += [i]
fitness += [self.fits[i]]
fits_clustered[k] += [self.fits[i]]
if fitness != []:
entropy_mean += [ statistics.mean( [fitness[i][0] for i in range(len(fitness))] ) ]
entropy_stdev += [ statistics.stdev( [fitness[i][0] for i in range(len(fitness))] ) ]
dist_mean += [ statistics.mean( [fitness[i][1] for i in range(len(fitness))] ) ]
dist_stdev += [ statistics.stdev( [fitness[i][1] for i in range(len(fitness))] ) ]
clusters[k] += [postProcess(self.data[i]) for i in range(len(labels)) if my_members[i]]
cluster_file.write("cluster: " + str(num_cluster) + "°" + "\n")
num_cluster += 1
cluster_file.write("index:"+ "\n")
cluster_file.write(str(index) + "\n")
cluster_file.write("fitness:"+ "\n")
cluster_file.write(str(fitness)+ "\n")
cluster_file.write("mean balance:"+ "\n")
cluster_file.write(str(entropy_mean)+ "\n")
cluster_file.write("std dev balance:"+ "\n")
cluster_file.write(str(entropy_stdev)+ "\n")
cluster_file.write("mean dist from target:"+ "\n")
cluster_file.write(str(dist_mean)+ "\n")
cluster_file.write("std dev dist from target:"+ "\n")
cluster_file.write(str(dist_stdev)+ "\n")
cluster_file.write("members:"+ "\n")
writeWeapon([self.data[i] for i in range(len(labels)) if my_members[i]], cluster_file)
cluster_file.write("==========================================================================="+ "\n")
print(index)
print("members:")
printWeapon([self.data[i] for i in range(len(labels)) if my_members[i]])
print("fitness:"+ "\n")
print(str(fitness)+ "\n")
print("mean entropy:"+ "\n")
print(str(entropy_mean)+ "\n")
print("std dev fitness:"+ "\n")
print(str(entropy_stdev)+ "\n")
print("mean dist:"+ "\n")
print(str(dist_mean)+ "\n")
print("std dev dist:"+ "\n")
print(str(dist_stdev)+ "\n")
print("mean of cluster")
print(np.mean([self.data[i] for i in range(len(labels)) if my_members[i]], axis=0))
print("std of cluster")
print(np.std([self.data[i] for i in range(len(labels)) if my_members[i]], axis=0))
index = []
fitness = []
entropy_mean = []
entropy_stdev = []
dist_mean = []
dist_stdev = []
colors = list('bgrcmykbgrcmykbgrcmykbgrcmyk')
'''
mds = MDS(n_components=2)
pos = mds.fit_transform(X.astype(np.float64))
colors = list('bgrcmykbgrcmykbgrcmykbgrcmyk')
plt.figure(figsize=(16,9))
for i in range(len(pos[:,0])):
if labels[i] != -1 :
plt.plot(pos[i, 0], pos[i, 1], 'o', markerfacecolor=colors[labels[i]], markeredgecolor='k')
else:
plt.plot(pos[i, 0], pos[i, 1], 'x', markerfacecolor=colors[labels[i]], markeredgecolor='k')
plt.savefig("mds.png", bbox_inches='tight')
plt.close()
'''
X_ordered = []
X = np.array(self.data);
colors_ordered = []
fits_ordered = []
colors_cluster = []
for i in range(n_clusters_):
for j in range(len(labels)):
if labels[j] == i and labels[j] != -1:
X_ordered.append(X[j][:])
fits_ordered.append(self.fits[j])
colors_ordered += [colors[labels[j]]]
colors_cluster += [colors_ordered[len(colors_ordered) - 1]]
labels_ = [labels[i] for i in range(len(labels)) if labels[i] != -1]
'''
width = 0.8
ind = np.arange(len(labels_))
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(wspace=0.50, hspace=0.25)
k = [i for i in range(len(labels_))]
for j in range(10):
ax = fig.add_subplot(4, 3, j+1)
plt.ylabel(label[j])
plt.ylim(limits[j][0], limits[j][1])
ax.bar(k, [X_ordered[ind][j] for ind in range(len(labels_))], color=colors_ordered)
'''
#plt.show()
'''
plt.figure(9)
colors_cluster = [colors[labels[i]] for i in range(len(labels))]
width = 0.8
ind = np.arange(len(labels))
k = [i for i in range(len(labels))]
for j in range(9):
plt.subplot(330 + j)
plt.ylabel(label[j])
plt.ylim(limits[j][0], limits[j][1])
plt.bar(k, [X[i][j] for i in range(len(labels))], color=colors_cluster)
#plt.xticks(ind+width/2., list(str(i) for i in range(len(labels)) ) )
#plt.show()
'''
drawRadarChart(self, clusters, n_clusters_, colors_cluster, fits_clustered)
drawBarPlot(self, clusters, n_clusters_, colors_cluster, fits_clustered)
def drawRadarChart(self, clusters, n_clusters_, colors, fits):
weapons = []
num_samples = []
for cluster in clusters:
if(len(cluster) > 0):
weapons += [np.mean(cluster, axis=0)]
num_samples += [len(cluster)]
index = 0
while len(weapons) > 0 :
draw_radar(weapons[:1], colors[index], fits[index], num_samples[0])
weapons = weapons[1:]
num_samples = num_samples[1:]
index += 1
plt.savefig("radar"+ str(index) + ".png", bbox_inches='tight')
plt.close()
def drawBarPlot(self, clusters, n_clusters_, colors_cluster, fitness_cluster):
weapons = []
weapons_std = []
limits[0] = (0, log(1/(ROF_MIN/100))*2)
alphabet = list("ABCDEFGHILMNOPQRSTUVZ")
k = np.arange(n_clusters_)
width = 0.35
for cluster in clusters:
if(len(cluster) > 0):
weapons += [list(np.mean(cluster, axis=0))]
weapons_std += [list(np.std(cluster, axis=0))]
fig = plt.figure(figsize=(16, 9))
fig.subplots_adjust(wspace=0.80, hspace=0.25)
for j in range(10):
ax = fig.add_subplot(4, 3, j+1)
plt.ylabel(label[j])
plt.ylim(limits[j][0], limits[j][1])
ax.bar(k, [weapons[ind][j] for ind in range(n_clusters_)], width, color=colors_cluster)
ax.set_xticks(k + width/2)
ax.set_xticklabels( alphabet[:n_clusters_] )
ax = fig.add_subplot(4, 3, 11)
plt.ylabel("BALANCE")
ax.bar(k, [np.mean(fitness_cluster[i], axis = 0)[0] for i in range(n_clusters_)], width, color=colors_cluster,
yerr = [np.std(fitness_cluster[i], axis = 0)[0] for i in range(n_clusters_)])
ax.set_xticks(k + width/2)
ax.set_xticklabels( alphabet[:n_clusters_] )
ax = fig.add_subplot(4, 3, 12)
plt.ylabel("DISTANCE")
ax.bar(k, [np.mean(fitness_cluster[i], axis = 0)[1] for i in range(n_clusters_)], width, color=colors_cluster,
yerr = [np.std(fitness_cluster[i], axis = 0)[1] for i in range(n_clusters_)])
ax.set_xticks(k + width/2)
ax.set_xticklabels( alphabet[:n_clusters_] )
plt.savefig("cluster.png", bbox_inches='tight', dpi = 200)
plt.close()
def main():
data = []
pop_file = open("population_cluster.txt", "r")
content = pop_file.readlines()
temp = []
fitness = []
for string in content:
if "Weapon" in string or "Projectile" in string:
split_spaces = string.split(" ")
for splitted in split_spaces:
if ":" in splitted:
split_colon = splitted.split(":")
temp += [float(split_colon[1])]
if (len(temp) == 10):
data += [temp]
temp = []
if "fitness" in string :
split_spaces = string.split(" ")
temp_fit = []
for splitted in split_spaces:
if "(" in splitted:
splitted = splitted.replace("(", "")
splitted = splitted.replace(")", "")
splitted = splitted.replace(",", "")
splitted = splitted.replace("\n", "")
fit = float(splitted)
temp_fit += [fit]
if ")" in splitted:
splitted = splitted.replace("(", "")
splitted = splitted.replace(")", "")
splitted = splitted.replace(",", "")
splitted = splitted.replace("\n", "")
dist = float(splitted)
temp_fit += [dist]
fitness += [temp_fit]
fits = np.array( [fitness[i][0] for i in range(len(fitness))] )
dists = np.array( [fitness[i][1] for i in range(len(fitness))] )
#get third quartile
q3 = np.percentile(fits, 75)
print("third quartile " + str(q3))
data_filtered = []
dists_filtered = []
fits_filtered = []
#filter out ind with fit < q3
for i in range(len(fits)):
if fits[i] >= 0 :
data_filtered += [data[i]]
dists_filtered += [dists[i]]
fits_filtered += [fitness[i]]
d3 = np.percentile(dists_filtered, 50)
print("median dist" + str(d3))
c = ClusterProceduralWeapon(data_filtered, fits_filtered)
c.cluster()
main()
|
[
"daniele.gravina@mail.polimi.it"
] |
daniele.gravina@mail.polimi.it
|
cb9d222ef240028c7c2bb5e92e4ffa33f9a97b42
|
1ef667feb6d4653dab8bd2d0474bbce37a568900
|
/perm/ops/user_perm.py
|
ad610982420a34e7be387fd8652d7079f1d1b884
|
[
"MIT"
] |
permissive
|
ni-ning/pauli
|
02f05859b0285914f26a50b9b68344bfa72e7666
|
5fdcba9c0aa3bb3f960546ee078f417a0f772a84
|
refs/heads/master
| 2020-07-30T13:27:00.562980
| 2019-09-10T09:48:08
| 2019-09-10T09:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,502
|
py
|
# coding:utf-8
import six
from ...auth.models import User
from ..models import UserPerm, RoleDesc
from . import perm_base
def get_or_create_user_perm(user_id):
user_perm = UserPerm.objects(user_id=user_id, soft_del=False).first()
if not user_perm:
user = User.objects(id=user_id, soft_del=False).first()
if not user:
return None
user_perm = UserPerm(user_id=user_id)
user_perm.save()
return user_perm
def add_perm(perm, user_id=None, role_id=None):
if isinstance(perm, six.string_types):
perm = perm_base.get_perm_desc_from_string(perm)
if not perm_base.is_valid_perm_desc(perm):
return False, "错误的权限描述"
if user_id:
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "目标用户不存在"
if any(map(lambda x: perm_base.equ(perm, x), user_perm.perms)):
return False, "重复的权限描述"
user_perm.perms.append(perm)
user_perm.save()
return True, user_perm
elif role_id:
role = RoleDesc.objects(id=role_id).first()
if not role:
return False, "目标角色不存在"
if any(map(lambda x: perm_base.equ(perm, x), role.perms)):
return False, "重复的权限描述"
role.perms.append(perm)
role.save()
return True, role
else:
return False, "用户或者角色id未提供"
def update_perms(perms, user_id=None, role_id=None):
parsed_perms = []
for perm in perms:
if isinstance(perm, six.string_types):
perm = perm_base.get_perm_desc_from_string(perm)
if not perm_base.is_valid_perm_desc(perm):
return False, "错误的权限描述 %s" % perm
parsed_perms.append(perm)
if user_id:
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "目标用户不存在"
user_perm.perms = parsed_perms
user_perm.save()
return True, user_perm
elif role_id:
role = RoleDesc.objects(id=role_id).first()
if not role:
return False, "目标角色不存在"
user_perm.perms = parsed_perms
role.save()
return True, role
else:
return False, "用户或者角色id未提供"
def remove_perm(perm, user_id=None, role_id=None):
if isinstance(perm, six.string_types):
perm = perm_base.get_perm_desc_from_string(perm)
if not perm_base.is_valid_perm_desc(perm):
return False, "错误的权限描述"
if user_id:
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "目标用户不存在"
user_perm.perms = list(filter(lambda x: not perm_base.equ(perm, x),
user_perm.perms))
user_perm.save()
return True, user_perm
elif role_id:
role = RoleDesc.objects(id=role_id, soft_del=False).first()
if not role:
return False, "目标角色不存在"
role.perms = list(filter(lambda x: not perm_base.equ(perm, x),
role.perms))
role.save()
return True, role
else:
raise Exception("Neither user_id and role_id is available.")
def get_user_perm_list(user_id, exclude_role=False):
'''
获取用户的权限列表.
user_id: str, 用户的id
exclude_role: boolean, 是否要排除掉用户的角色中的权限
'''
user_perm = UserPerm.objects(user_id=user_id, soft_del=False).first()
if not user_perm:
return []
else:
perms = []
perms.extend(user_perm.perms)
if (not exclude_role) and user_perm.roles:
roles = RoleDesc.objects(id__in=user_perm.roles,
soft_del=False)
for role in roles:
perms.extend(role.perms)
return perms
def has_perm(user_id, target_perm_desc,
is_upstream=False, is_owner=False, perm_list=None):
'''
检查用户是否拥有某个指定权限。
perm_list: list, optional, 如果提供此参数,则不每次调用时根据user_id获取
用户的权限列表。
'''
if isinstance(target_perm_desc, six.string_types):
target_perm_desc = perm_base\
.get_perm_desc_from_string(target_perm_desc)
perm_list = perm_list or get_user_perm_list(user_id)
if perm_base.is_perm_allowed(perm_list, target_perm_desc,
is_upstream=is_upstream, is_owner=is_owner):
return True
#for i in perm_list:
# if perm_base.is_perm_matched(i, target_perm_desc,
# is_upstream=is_upstream, is_owner=is_owner):
# return True
return False
def get_user_roles(user_id):
'''
Return the role objects.
'''
user_perm = UserPerm.objects(user_id=user_id, soft_del=False).first()
if user_perm and user_perm.roles:
roles = RoleDesc.objects(id__in=user_perm.roles,
soft_del=False)
return roles
return []
def refresh_user_info_roles(user_id, roles):
user = User.objects(id=user_id).first()
if user:
role_names = []
if roles:
role_names = [role_desc.name for role_desc in\
RoleDesc.objects(id__in=roles,
soft_del=False)]
user.info['role_names'] = role_names
user.save()
return True
else:
return False
def update_user_roles(user_id, role_ids):
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "用户不存在或已删除"
role_ids = list(set(role_ids))
role_count = RoleDesc.objects(id__in=role_ids,
soft_del=False).count()
if len(role_ids) != role_count:
return False, "存在无效的用户角色"
user_perm.roles = role_ids
user_perm.save()
refresh_user_info_roles(user_perm.user_id, user_perm.roles)
return True, user_perm
def add_role_to_user(role_id, user_id):
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "用户不存在或已删除"
role = RoleDesc.objects(id=role_id, soft_del=False).first()
if not role:
return False, "角色不存在或已经删除"
if not role_id in user_perm.roles:
user_perm.roles.append(role_id)
user_perm.save()
refresh_user_info_roles(user_perm.user_id, user_perm.roles)
return True, user_perm
def remove_role_from_user(role_id, user_id):
user_perm = get_or_create_user_perm(user_id)
if not user_perm:
return False, "用户不存在或已删除"
user_perm.roles = list(filter(lambda x: x != role_id,
user_perm.roles))
user_perm.save()
refresh_user_info_roles(user_perm.user_id, user_perm.roles)
return True, user_perm
def get_all_roles(soft_del=False):
roles = RoleDesc.objects(soft_del=soft_del)
return list(roles)
def create_role(name=None):
if not name:
return False, "角色名未提供"
role_desc = RoleDesc.objects(name=name, soft_del=False).first()
if role_desc:
return False, "角色名已经存在"
role_desc = RoleDesc(name=name)
role_desc.save()
return get_role_info(role_desc)
def remove_role(role_id):
if not role_id:
return False, "角色id未提供"
role = RoleDesc.objects(id=role_id).first()
if not role:
return False, "角色不存在"
role.soft_del = True
role.save()
return get_role_info(role)
def update_role(role_id, name=None):
if not role_id:
return False, "角色id未提供"
if not name:
return False, "名字未提供"
duplicated_role = RoleDesc.objects(soft_del=False,
name=name,
id__ne=role_id).first()
if duplicated_role:
return False, "同名角色已经存在"
role = RoleDesc.objects(id=role_id, soft_del=False).first()
if not role:
return False, "角色不存在"
role.name = name
role.save()
return get_role_info(role)
def get_role_info(role=None, role_id=None, role_name=None):
if not role:
if role_id:
role = RoleDesc.objects(id=role_id).first()
elif role_name:
role = RoleDesc.objects(name=role_name, soft_del=False).first()
if not role:
return False, "角色不存在"
ret = {'id': str(role.id),
'name': str(role.name),
'perms': role.perms,
'granted_positions': role.granted_positions,
'created': str(role.created),
'soft_del': role.soft_del,
'lut': str(role.lut)}
return True, ret
def get_perm_triples(actions, user_id):
ret = {}
perm_list = get_user_perm_list(user_id)
for action in actions:
ret.setdefault(action, {})
ret[action]['*'] = has_perm(user_id, action, perm_list=perm_list)
ret[action]['+'] = has_perm(user_id, {'action': action, 'effect': 'allow', 'resource': '+'},
is_upstream=True, perm_list=perm_list)
ret[action]['-'] = has_perm(user_id, {'action': action, 'effect': 'allow', 'resource': '-'},
is_owner=True, perm_list=perm_list)
return True, ret
|
[
"socrateslee@users.noreply.github.com"
] |
socrateslee@users.noreply.github.com
|
1b5142f366dc75a64591a6b27ee82c0362541e40
|
23052f3c9dcfecb3cf50e5593960d47d257a5579
|
/praw_blog.py
|
1aa3eaad6f3a15e13d8581447d4fc99a0e2ccc28
|
[] |
no_license
|
sergewh20/PRAW-blog
|
77b4dcc121bbd0a4ce9e1f1fc956644874dd81e5
|
2567a3125a9317cdb20691cf78f4cf29b8eb67ca
|
refs/heads/master
| 2022-11-14T03:25:21.099797
| 2020-07-02T12:40:48
| 2020-07-02T12:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
import praw
import pandas as pd
reddit = praw.Reddit(client_id = 'CLIENT_ID',
client_secret = 'CLIENT_SECRET',
usernme = 'USERNAME',
password = 'PASSWORD',
user_agent = 'PRAW Blog')
subreddit_list= ['india','worldnews','announcements','funny','AskReddit',
'gaming','pics','science','movies','todayilearned'
]
author_list = []
id_list = []
link_flair_text_list = []
num_comments_list = []
score_list = []
title_list = []
upvote_ratio_list = []
for subred in subreddit_list:
subreddit = reddit.subreddit(subred)
hot_post = subreddit.hot(limit = 10000)
for sub in hot_post:
author_list.append(sub.author)
id_list.append(sub.id)
link_flair_text_list.append(sub.link_flair_text)
num_comments_list.append(sub.num_comments)
score_list.append(sub.score)
title_list.append(sub.title)
upvote_ratio_list.append(sub.upvote_ratio)
print(subred, 'completed; ', end='')
print('total', len(author_list), 'posts has been scraped')
df = pd.DataFrame({'ID':id_list,
'Author':author_list,
'Title':title_list,
'Count_of_Comments':num_comments_list,
'Upvote_Count':score_list,
'Upvote_Ratio':upvote_ratio_list,
'Flair':link_flair_text_list
})
df.to_csv('reddit_dataset.csv', index = False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
d4434ec75f5e7002b51785f4507c97ae1014eb85
|
690ace6a6fe00db3dd0c799d9d7078d18f641daf
|
/graph1.py
|
3e45f25fb01d024c06bad00934ddfb10502e0025
|
[] |
no_license
|
tom523/server-sample
|
6abdcaa27e98954ac83c7835cf7fe55cd8475cff
|
f77228ac6da5ed45d562b3cc113af0e1cecb90c2
|
refs/heads/master
| 2022-08-02T05:28:45.228869
| 2020-05-22T08:14:32
| 2020-05-22T08:14:32
| 266,049,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSort(self):
indegree_dict = defaultdict(int)
for _, adjancency_list in self.graph.items():
for adjancency in adjancency_list:
indegree_dict[adjancency] += 1
zero_indegree_list = list(set(self.graph.keys()) - set(indegree_dict.keys()))
ret = []
while zero_indegree_list:
v = zero_indegree_list.pop()
ret.append(v)
for adj in self.graph[v]:
indegree_dict[adj] -= 1
if indegree_dict[adj] == 0:
zero_indegree_list.append(adj)
print(ret)
g = Graph(6)
g.addEdge(5, 2)
g.addEdge(5, 0)
g.addEdge(4, 0)
g.addEdge(4, 1)
g.addEdge(2, 3)
g.addEdge(3, 1)
g.topologicalSort()
|
[
"358777330@qq.com"
] |
358777330@qq.com
|
08436a0b3e5a7e257274990561662c86d8e96311
|
53d6fc1222eaba9f2c4d9883ab2093612ba6fa87
|
/dicta.py
|
cc20977fbc23d9c8bcc83112b8e0c75312b7ab5b
|
[] |
no_license
|
vinay-iyengar/Bootcamp-Projects
|
0c1fcd266669f117f0710d8229c866d9649d1a43
|
5a414903c117947b0f47568ea0e4d448658c448b
|
refs/heads/master
| 2021-10-12T00:56:27.342506
| 2019-01-31T09:24:09
| 2019-01-31T09:24:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
d={1:"Speckbit", 2:"World", 3:"Quiet"}
for key,val in d.items():
print(d)
print(key, "=>", val)
|
[
"vinay2397@gmail.com"
] |
vinay2397@gmail.com
|
e940cbbc122b704ae013d728430934c12dab1aa9
|
b4026496a66b0577c96e45c6e7b18faeb433f328
|
/scripts/ci/pre_commit/pre_commit_check_order_setup.py
|
e94109c0469bfa3bedba6ec8703410a30a125bd2
|
[
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] |
permissive
|
dferguson992/airflow
|
4beb2f970b77645d56546fb558953fe205d7355b
|
3d52b3ed8e6ed7cd4298edc731d88e9de0406df9
|
refs/heads/master
| 2021-08-19T01:32:45.597782
| 2020-11-16T19:54:29
| 2020-11-16T19:54:29
| 311,678,786
| 1
| 0
|
Apache-2.0
| 2020-11-16T19:54:30
| 2020-11-10T14:10:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,657
|
py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test for an order of dependencies in setup.py
"""
import os
import re
import sys
from os.path import abspath, dirname
from typing import List
errors = []
MY_DIR_PATH = os.path.dirname(__file__)
SOURCE_DIR_PATH = os.path.abspath(os.path.join(MY_DIR_PATH, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, SOURCE_DIR_PATH)
def _check_list_sorted(the_list: List[str], message: str) -> None:
sorted_list = sorted(the_list)
if the_list == sorted_list:
print(f"{message} is ok")
return
i = 0
while sorted_list[i] == the_list[i]:
i += 1
print(f"{message} NOK")
errors.append(
f"ERROR in {message}. First wrongly sorted element" f" {the_list[i]}. Should be {sorted_list[i]}"
)
def setup() -> str:
setup_py_file_path = abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, os.pardir, 'setup.py'))
with open(setup_py_file_path) as setup_file:
setup_context = setup_file.read()
return setup_context
def check_main_dependent_group(setup_context: str) -> None:
"""
Test for an order of dependencies groups between mark
'# Start dependencies group' and '# End dependencies group' in setup.py
"""
pattern_main_dependent_group = re.compile(
'# Start dependencies group\n(.*)# End dependencies group', re.DOTALL
)
main_dependent_group = pattern_main_dependent_group.findall(setup_context)[0]
pattern_sub_dependent = re.compile(' = \\[.*?\\]\n', re.DOTALL)
main_dependent = pattern_sub_dependent.sub(',', main_dependent_group)
src = main_dependent.strip(',').split(',')
_check_list_sorted(src, "Order of dependencies")
def check_sub_dependent_group(setup_context: str) -> None:
r"""
Test for an order of each dependencies groups declare like
`^dependent_group_name = [.*?]\n` in setup.py
"""
pattern_dependent_group_name = re.compile('^(\\w+) = \\[', re.MULTILINE)
dependent_group_names = pattern_dependent_group_name.findall(setup_context)
pattern_dependent_version = re.compile('[~|><=;].*')
for group_name in dependent_group_names:
pattern_sub_dependent = re.compile(f'{group_name} = \\[(.*?)\\]', re.DOTALL)
sub_dependent = pattern_sub_dependent.findall(setup_context)[0]
pattern_dependent = re.compile('\'(.*?)\'')
dependent = pattern_dependent.findall(sub_dependent)
src = [pattern_dependent_version.sub('', p) for p in dependent]
_check_list_sorted(src, f"Order of sub-dependencies group: {group_name}")
def check_alias_dependent_group(setup_context: str) -> None:
"""
Test for an order of each dependencies groups declare like
`alias_dependent_group = dependent_group_1 + ... + dependent_group_n` in setup.py
"""
pattern = re.compile('^\\w+ = (\\w+ \\+.*)', re.MULTILINE)
dependents = pattern.findall(setup_context)
for dependent in dependents:
src = dependent.split(' + ')
_check_list_sorted(src, f"Order of alias dependencies group: {dependent}")
def check_install_and_setup_requires(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
install_requires and setup_requires in setup.py
"""
pattern_install_and_setup_requires = re.compile('(setup_requires) ?= ?\\[(.*?)\\]', re.DOTALL)
install_and_setup_requires = pattern_install_and_setup_requires.findall(setup_context)
for dependent_requires in install_and_setup_requires:
pattern_dependent = re.compile('\'(.*?)\'')
dependent = pattern_dependent.findall(dependent_requires[1])
pattern_dependent_version = re.compile('[~|><=;].*')
src = [pattern_dependent_version.sub('', p) for p in dependent]
_check_list_sorted(src, f"Order of dependencies in do_setup section: {dependent_requires[0]}")
def check_extras_require(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
extras_require in setup.py
"""
pattern_extras_requires = re.compile(r'EXTRAS_REQUIREMENTS: Dict\[str, List\[str\]] = {(.*?)}', re.DOTALL)
extras_requires = pattern_extras_requires.findall(setup_context)[0]
pattern_dependent = re.compile('\'(.*?)\'')
src = pattern_dependent.findall(extras_requires)
_check_list_sorted(src, "Order of dependencies in: extras_require")
def check_provider_requirements(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
providers_require in setup.py
"""
pattern_extras_providers_packages = re.compile(
r'PROVIDERS_REQUIREMENTS: Dict\[str, Iterable\[str\]\] = {(.*?)}', re.DOTALL
)
extras_requires = pattern_extras_providers_packages.findall(setup_context)[0]
pattern_dependent = re.compile('"(.*?)"')
src = pattern_dependent.findall(extras_requires)
_check_list_sorted(src, "Order of dependencies in: providers_require")
def check_extras_provider_packages(setup_context: str) -> None:
"""
Test for an order of dependencies in function do_setup section
providers_require in setup.py
"""
pattern_extras_requires = re.compile(
r'EXTRAS_PROVIDERS_PACKAGES: Dict\[str, Iterable\[str\]\] = {(.*?)}', re.DOTALL
)
extras_requires = pattern_extras_requires.findall(setup_context)[0]
pattern_dependent = re.compile('"(.*?)":')
src = pattern_dependent.findall(extras_requires)
_check_list_sorted(src, "Order of dependencies in: extras_provider_packages")
def checks_extra_with_providers_exist() -> None:
from setup import EXTRAS_REQUIREMENTS, EXTRAS_PROVIDERS_PACKAGES # noqa # isort:skip
message = 'Check if all extras have providers defined in: EXTRAS_PROVIDERS_PACKAGES'
local_error = False
for key in EXTRAS_REQUIREMENTS.keys(): # noqa
if key not in EXTRAS_PROVIDERS_PACKAGES.keys(): # noqa
if not local_error:
local_error = True
print(f"Extra {key} NOK")
errors.append(
f"ERROR in {message}. The {key} extras is missing there."
" If you do not want to install any providers with this extra set it to []"
)
if not local_error:
print(f"{message} is ok")
if __name__ == '__main__':
setup_context_main = setup()
check_main_dependent_group(setup_context_main)
check_alias_dependent_group(setup_context_main)
check_sub_dependent_group(setup_context_main)
check_install_and_setup_requires(setup_context_main)
check_extras_require(setup_context_main)
check_provider_requirements(setup_context_main)
check_extras_provider_packages(setup_context_main)
checks_extra_with_providers_exist()
print()
print()
for error in errors:
print(error)
print()
if errors:
sys.exit(1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ad880090cfa86821407e0941820ac38bb2b6257a
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/cherrypy/cherrypy/lib/static.py
|
730d86b5c8aca8450f7467f6e5d78d45615cc9e1
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025
| 2014-09-06T22:34:16
| 2014-09-06T22:34:16
| 23,744,842
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/cherrypy/cherrypy/lib/static.py
|
[
"ron.y.kagan@gmail.com"
] |
ron.y.kagan@gmail.com
|
89bb687edf42e8d1b56379fb9bdefa2543b5cfa9
|
852d549b766134aa7d2d25fbcaceb5f1e9017fc9
|
/exam.py
|
642515798c839d156e41b43ade02fb5cfbf8d56e
|
[] |
no_license
|
python819/pythonfiles
|
3ae570873ebe17e5b70d5e989294fc49143e74d6
|
033a76e11adc25d88f034f9bf82520d8f2a79cca
|
refs/heads/master
| 2020-07-02T17:55:52.582633
| 2019-08-10T10:20:14
| 2019-08-10T10:20:14
| 201,612,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#module 1:--
#1
#for j in range(1,6):
# print('*'*j)
#2
'''
a=input('enter the password')
if len(a)>=6 and len(a)<=12:
if a.isalpha() is True:
print('should contain atleast one special symboland one number')
elif a.isalnum() is True:
print('should contain atleast one special symbol')
elif a.islower() is True :
print('should contain atleast one uppercase')
elif a.isupper() is True :
print('should contain atleast one lowercase')
else:
print('strong password')
else:
print('passowrd shoulb be min of 6 and max of 12 characters' )
'''
#3
a=[]
for j in range(0,5):
a[j]=input('enter the elements')
print(a.sort())
|
[
"indraneilsai2@gmail.com"
] |
indraneilsai2@gmail.com
|
8aa4afb7f7c82069446a0267272eef69cf23eb38
|
4c78f66b6f852fa4ad0729eebadc3ee96a65ed57
|
/from_article_url.py
|
0e9ca6d389f946421d08ce31c2e7e20fd5e6dd84
|
[] |
no_license
|
proteeti13/first-page-news
|
04c933fc5a8c4dbfa70b7fa1e14a45369dab0bde
|
50ffca23371c205a56d7d8b6a499a3822a175452
|
refs/heads/master
| 2020-07-30T20:40:22.603338
| 2019-09-23T10:45:38
| 2019-09-23T10:45:38
| 210,353,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,904
|
py
|
import requests
from bs4 import BeautifulSoup
import articleDateExtractor
import dateparser
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
url_prothom_alo = "https://www.prothomalo.com/bangladesh/article/1614818/%E2%80%98%E0%A6%AA%E0%A6%9B%E0%A6%A8%E0%A7%8D%E0%A6%A6%E0%A7%87%E0%A6%B0%E2%80%99-%E0%A6%97%E0%A6%BE%E0%A7%9C%E0%A6%BF-%E0%A6%95%E0%A6%BF%E0%A6%A8%E0%A6%A4%E0%A7%87-%E0%A7%AA%E0%A7%AE-%E0%A6%B2%E0%A6%BE%E0%A6%96-%E0%A6%9F%E0%A6%BE%E0%A6%95%E0%A6%BE-%E0%A6%AC%E0%A6%BE%E0%A7%9C%E0%A6%A4%E0%A6%BF-%E0%A6%97%E0%A7%81%E0%A6%A8%E0%A6%9B%E0%A7%87-%E0%A6%AC%E0%A6%BF%E0%A6%AE%E0%A6%BE%E0%A6%A8"
url_cnn = "https://edition.cnn.com/interactive/2019/09/business/samsung-headquarters-south-korea/index.html"
url_bbc = "https://www.bbc.com/news/av/stories-49666419/life-saving-surgery-but-not-by-a-doctor"
url_daily_star = "https://www.thedailystar.net/frontpage/rohingyas-voter-list-election-commission-staffers-fraud-ring-behind-it-1801495"
url_bdnews = "https://bdnews24.com/world/2019/09/18/iran-s-rouhani-blames-us-saudi-for-conflict-in-region"
url_huffpost = "https://www.huffpost.com/entry/migrant-mothers-children-suing-trump-asylum-ban_n_5d819313e4b0957256ada9d6?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuaHVmZnBvc3QuY29tLw&guce_referrer_sig=AQAAANOwUFQmmgtG832C2zFu5uIzShOo3_RozywzkTKf85PBdUFTHQKAGjHyBDynkdwTJxHck2dYWcFBGL2IzcnmF5qdCPWruhCVMQGJ6w0r-1adq1h7JtIyl6ebGslvov3BUdBonintC93gn1dTVOJkdSpfmxkd4L0zipjURTlwZjhC"
url_nytimes = "https://www.nytimes.com/2019/09/17/climate/trump-california-emissions-waiver.html?action=click&module=Top%20Stories&pgtype=Homepage"
url_list = [
url_prothom_alo,
url_cnn,
url_bbc,
url_daily_star,
url_bdnews,
url_huffpost,
url_nytimes,
]
for url in url_list:
response = requests.get(url, headers=header)
soup = BeautifulSoup(response.text, "lxml")
site_name = soup.find("meta", property="og:site_name")
title = soup.find("meta", property="og:title")
content = soup.find("meta", property="og:description")
url = soup.find("meta", property="og:url")
image = soup.find("meta", property="og:image")
# date = dateparser.parse(soup)
print(
"site-name : ", site_name["content"] if site_name else "No site_name given here"
)
print("title : ", title["content"] if title else "No title given here")
print("content : ", content["content"] if content else "No description given here")
print("image : ", image["content"] if image else "No image given here")
print("url : ", url["content"] if url else "No url given here")
# print("date :", date)
# from newspaper import Article
# url = "https://bdnews24.com/world/2019/09/18/iran-s-rouhani-blames-us-saudi-for-conflict-in-region"
# article = Article(url)
# article.download()
# article.parse()
# print(article.publish_date)
|
[
"proteeti13@gmail.com"
] |
proteeti13@gmail.com
|
d45ddbfb167176a465fa730c943239468eb2aa4a
|
d95699c77bfe9e74e358b277f1a2a72dd471cc73
|
/train101.py
|
124dbb628500e464a0fe8ca85125c5319ed55add
|
[] |
no_license
|
ckfanzhe/HuaLuCup2020
|
61dffb29a7a0f41cf0607b6080c76a1a6cee2d26
|
307b3a741060db07df56d865ac8754663c089a69
|
refs/heads/main
| 2023-06-08T13:54:10.901945
| 2021-06-20T08:53:44
| 2021-06-20T08:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
# -*- coding: utf-8 -*-
import argparse
import random
import numpy as np
import torch.backends.cudnn as cudnn
import multiprocessing
import time
import torch
import torch.nn as nn
from torchsummary import summary
import os
from cfg import _metrics, _fit, _modelcheckpoint, _reducelr, _criterion
from data_gen_train import data_flow
from models.model import ResNet50, EfficientB7, ResNet101
def model_fn(args, mode):
model = ResNet101(weights=args.pretrained_weights, input_shape=(args.img_channel, args.input_size, args.input_size), num_classes=args.num_classes)
for param in model.parameters():
param.requires_grad = True
for name, value in model.named_parameters():
print(name, value.requires_grad)
model = nn.DataParallel(model)
model = model.cuda()
return model
def train_model(args, mode):
model = model_fn(args, mode)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
criterion = {'lossL' : nn.CrossEntropyLoss().cuda(), 'lossS' : _criterion.LabelSmoothSoftmaxCE().cuda()}
metrics = {"acc@1" : _metrics.top1_accuracy, "acc@3" : _metrics.topk_accuracy}
checkpoint1 = _modelcheckpoint.SingleModelCheckPoint(filepath=os.path.join('./models/', 'best_resnext101.pth'), monitor='val_acc@1', mode='max', verbose=1, save_best_only=True, save_weights_only=True)
checkpoint2 = _modelcheckpoint.SingleModelCheckPoint(filepath=os.path.join('./models/', 'ep{epoch:05d}-val_acc@1_{val_acc@1:.4f}-val_lossS_{val_lossS:.4f}-val_lossL_{val_lossL:.4f}.pth'), monitor='val_acc@1', mode='max', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = _reducelr.StepLR(optimizer, factor=0.2, patience=8, min_lr=1e-6)
_fit.Fit(
data_flow = data_flow,
model=model,
args=args,
batch_size = args.batch_size,
optimizer=optimizer,
criterion=criterion,
metrics=metrics,
reduce_lr = reduce_lr,
checkpoint = [checkpoint1, checkpoint2],
verbose=1,
workers=int(multiprocessing.cpu_count() * 0.8),
)
print('Training Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--data_local', default=r'/notebooks', type=str, help='')
parser.add_argument('--input_size', default=400, type=int, help='')
parser.add_argument('--img_channel', default=3, type=int, help='')
parser.add_argument('--num_classes', default=4, type=int, help='')
parser.add_argument('--batch_size', default=32, type=int, help='')
parser.add_argument('--learning_rate', default=1e-4, type=float, help='')
parser.add_argument('--max_epochs', default=40, type=int, help='')
parser.add_argument('--start_epoch', default=0, type=int, help='')
parser.add_argument('--pretrained_weights', default='./models/zoo/resnext101_32x8d-8ba56ff5.pth', type=str, help='')
parser.add_argument('--seed', default=None, type=int, help='')
args, unknown = parser.parse_known_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '0, 1, 2, 3, 4, 5, 6, 7, 8'
print('CUDA device count : {}'.format(torch.cuda.device_count()))
if not os.path.exists(args.data_local):
raise Exception('FLAGS.data_local_path: %s is not exist' % args.data_local)
if args.seed != None:
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
cudnn.deterministic = True
print('You have chosen to seed training with seed {}.'.format(args.seed))
else:
print('You have chosen to random seed.')
train_model(args=args, mode='train')
|
[
"ieluoyiming@163.com"
] |
ieluoyiming@163.com
|
59fbf899cb91638c4c208f659ae96a918d587461
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/nltk/cluster/__init__.py
|
38a9111e2204c7174d3bfbd82559e79570513835
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:59aceae689404a10cc3a170d5442209edea3f051e4f50c800fa557e86d234639
size 4271
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
214e442be29616883451dca6b73800ab366555e6
|
8bccd1376213a9fe56ab7bd69815a309339e8ceb
|
/quickdrop/quickdrop.py
|
be95d836b7decbb1458b091af81941e2e584a249
|
[] |
no_license
|
zevaverbach/quickdrop
|
0d2906fb3deb2fdb24b0f932347238676fab33b1
|
7017c1518f831515f331c715dcd5f5d24a1877f9
|
refs/heads/master
| 2020-04-23T12:44:27.704581
| 2019-02-17T22:20:15
| 2019-02-17T22:20:15
| 171,179,181
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
import os
from pathlib import Path
import sys
import click
import dropbox
import pyperclip
DROPBOX_ACCESS_TOKEN = os.getenv('DROPBOX_ACCESS_TOKEN')
DROPBOX_ROOT_PATH = os.getenv('DROPBOX_ROOT_PATH')
LB = '\n'
@click.command()
@click.argument('filepath', type=click.Path(exists=True))
def cli(filepath):
check_for_env_vars()
dropbox_relative_path = get_relative_path(filepath)
url = share_file(dropbox_relative_path)
copy_to_clipboard(url)
print(f'Okay, {filepath} is now shared, accessible via {LB}{url}.')
print('This url was also copied to your clipboard for your convenience.')
def share_file(filepath):
try:
shared_link = get_client().sharing_create_shared_link(filepath)
except dropbox.exceptions.ApiError as e:
raise click.ClickException('There was a problem with the path.')
else:
return shared_link.url
def get_relative_path(filepath):
DROPBOX_ROOT = Path(DROPBOX_ROOT_PATH).expanduser()
if '/' not in filepath:
filepath = f'/{filepath}'
elif not filepath.startswith('/') and not filepath.startswith('~'):
*path_parts, filename = filepath.split('/')
relevant_path_parts = []
for path_part in path_parts:
if path_part not in DROPBOX_ROOT_PATH:
relevant_path_parts.append(path_part)
filepath = os.path.join(*relevant_path_parts, f'/{filename}')
filepath_expanded_user = Path(filepath).expanduser()
path = Path(str(filepath_expanded_user).replace(str(DROPBOX_ROOT), ''))
return str(path)
def check_for_valid_access_token():
if not DROPBOX_ACCESS_TOKEN:
raise click.ClickException(
'Please get an access token here and store it in an environment '
'variable called "DROPBOX_ACCESS_TOKEN": '
' https://www.dropbox.com/developers/apps')
try:
dbx = get_client()
dbx.users_get_current_account()
except dropbox.exceptions.AuthError as e:
raise click.ClickException(str(e))
def check_for_env_vars():
check_for_valid_access_token()
check_for_dropbox_root_path()
def check_for_dropbox_root_path():
if not DROPBOX_ROOT_PATH:
raise click.ClickException(
'Please create an environment variable called "DROPBOX_ROOT_PATH" '
'with the path to your computer\'s root Dropbox folder.')
if not Path(DROPBOX_ROOT_PATH).exists:
raise click.ClickException(f'{DROPBOX_ROOT_PATH} doesn\'t exist!')
def get_client():
return dropbox.Dropbox(DROPBOX_ACCESS_TOKEN)
def copy_to_clipboard(url):
pyperclip.copy(url)
|
[
"zev@averba.ch"
] |
zev@averba.ch
|
b9913ec6af02ea14d2cc7fb4552be713477a0d1d
|
5f55f05a2b115407e0703d1848c6f4681a16546d
|
/make_test_data.py
|
a2f390840aabdeda6cb958ed575cdbde1ebcd96c
|
[] |
no_license
|
yanjj199609017239230/D-python-gy-api-1908A
|
d20f359dbe39a0f213da273ce762c19ee912aa0f
|
0d188710438cd51f5c0afea5818fba115ed0b8af
|
refs/heads/master
| 2020-07-30T06:03:36.655044
| 2019-09-22T08:53:49
| 2019-09-22T08:53:49
| 210,112,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__title__ = ''
#__author__ = 'xuepl'
#__mtime__ = '2019/9/11'
import json
import os
import yaml
from config.conf import FILE_PATH
POST = 1
GET = 0
mode_name = "charge"
test_case = "扣款异常流1金额为空"
method = POST
url = "/acc/charge" #接口地址
data = None
params = None
status_code = 200
headers = {}
expect = "2000"
json_data = '''{
"accountName": "stdg2623",
"changeMoney": 25
}''' #注意数据格式为字典或者为json串
if(isinstance(json_data,str)):
json_data = json.loads(json_data)
d = [{
"test_case":test_case,
"method":method,
"url":url,
"data":data,
"params":params,
"json":json_data,
"status_code":status_code,
"expect":expect,
"headers":headers
}]
with open(os.path.join(FILE_PATH,"test_{}.yaml".format(mode_name)),'a',encoding='utf-8') as f:
yaml.safe_dump(d,f,encoding='utf-8',default_flow_style=False,allow_unicode=True)
f.write("\n")
|
[
"1726550139@qq.com"
] |
1726550139@qq.com
|
ae003c75bee275054a1f41fbe04ff1af3abba836
|
24470bd278c86ce441015c4e1737d240d67f37a0
|
/models.py
|
44e3a92569245472f8563b8d6d85d42c9d2736b2
|
[] |
no_license
|
kevsersrca/graphql-flask
|
d8acaab43004aca9b763e811189c39bc7b6c3e17
|
88d11432dbff1fbcb93061f7093b31e8805bdfd2
|
refs/heads/master
| 2021-01-20T21:11:48.494715
| 2017-08-29T11:56:06
| 2017-08-29T11:56:06
| 101,755,776
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
from sqlalchemy import *
from sqlalchemy.orm import (scoped_session, sessionmaker, relationship,
backref)
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('mysql+pymysql://root:@localhost/tutorial', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
class Department(Base):
__tablename__ = 'department'
id = Column(Integer, primary_key=True)
name = Column(String(255), index=True)
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
hired_on = Column(DateTime, default=func.now())
department_id = Column(Integer, ForeignKey('department.id'))
department = relationship( Department, backref=backref('users', uselist=True, cascade='delete,all'))
|
[
"kev@Kev-MacBook-Pro.local"
] |
kev@Kev-MacBook-Pro.local
|
8bfb6e8d486ecbfa00ac77fffd5fed9b085007f4
|
af7dbe519166b969d8af2a56e0ad2231aae80b44
|
/generate/lib/generate_hash.py
|
d8f5af74f50e99ab55caae92a9a674d18ee697bd
|
[] |
no_license
|
tevix/browser-extensions
|
3e73d8fd5b39e0fd22e65ddb7d8cf59d726d34e1
|
dd09f301096ef95ed8caaa6a939f3e12aaadec34
|
refs/heads/master
| 2021-01-22T03:49:01.688265
| 2019-12-07T17:12:10
| 2019-12-07T17:12:10
| 81,462,065
| 0
| 0
| null | 2017-02-09T15:05:20
| 2017-02-09T15:05:20
| null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
import os
import hashlib
import json
os.chdir(os.path.dirname(os.path.abspath(__file__)))
hashes = {}
for root, dirs, files in os.walk('.'):
for file in files:
path = os.path.join(root, file)
with open(path, 'rb') as cur_file:
hash = hashlib.md5(cur_file.read()).hexdigest()
hashes[path.replace('\\', '/')[2:]] = hash
with open('hash.json', 'w') as hash_file:
json.dump(hashes, hash_file, indent=2)
|
[
"james.colin.brady@gmail.com"
] |
james.colin.brady@gmail.com
|
c8c37c72b598f4e577de6c74001660675ab2e307
|
a25aac80385265247c23c571463753b6b71051bf
|
/pre_traitement.py
|
9967bd322995d35bfa97f911a19153f154d8695e
|
[] |
no_license
|
XavierFarchetto/Hackathon
|
3a6ccfc436a2657d5b7ce3edc84353b7560b6db4
|
42649d01d894bd7118fe03eb7327124677d68883
|
refs/heads/master
| 2020-04-06T14:03:59.457239
| 2018-11-14T15:14:32
| 2018-11-14T15:14:32
| 157,526,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
import os
input_directory = "annotated_job_offers"
output_directory = "jo"
def verify_annotation(current_line):
words = current_line.split("\t")
if len(words) < 2:
word = words[0]
if "\n" in word:
word = word[:-1]
line = "\t".join([word, "O"]) + "\n"
else:
annotation = words[1]
if annotation == "\n" or annotation == "":
annotation = "O"
if "\n" in annotation:
annotation = annotation[:-1]
classification = annotation.split(" ")
if len(classification) > 1:
union = "_".join(classification)
line = "\t".join([words[0], union]) + "\n"
else:
line = "\t".join([words[0], annotation]) + "\n"
return line
def study_tag(line, list):
tag = line.split("\t")[1]
if not(tag in list):
list.append(tag)
return sorted(list)
else:
return list
def verify_file(input_directory, output_directory, file_name, tags_list):
input_file_name = os.path.join(input_directory, file_name)
output_file_name = os.path.join(output_directory, file_name)
tag_list = tags_list
with open(output_file_name, "w") as output:
with open(input_file_name, "r") as input:
line = input.readline()
while line:
line = verify_annotation(line)
output.write(line)
tag_list = study_tag(line[:-1], tag_list)
line = input.readline()
return tag_list
def write_tags_file(list):
with open("tags.txt", "w") as file :
for tag in list:
file.write(tag+"\n")
def verify_directory(input_directory, output_directory, first_file=1, last_file=5428):
file_list = sorted(os.listdir(input_directory))[first_file-1:last_file]
tag_list = []
for counter, file in enumerate(file_list):
tag_list = verify_file(input_directory, output_directory, file, tag_list)
print("Element {}\{} - File {} reviewed".format(str(counter+first_file), last_file, file))
write_tags_file(tag_list)
if __name__ == "__main__":
# 5428 file max
verify_directory(input_directory, output_directory)
|
[
"xavier.farchetto@telecomnancy.eu"
] |
xavier.farchetto@telecomnancy.eu
|
ac30e12fa3d62c3d9eca81ef631868ac0afd7eac
|
ccd1c59f380326eaa7a5ec069e8bf49ceae5589a
|
/07_farangeit_to_celsium.py
|
04c298e4caf8e3aa3f3259ddda4ee5d3fbded1e0
|
[] |
no_license
|
Froststorm/Codeabby_learn
|
bfc7c27797c4fe72b4657556c92e00535d1be7b9
|
9e00624a87db4990721897cb3b01675028d814e5
|
refs/heads/master
| 2021-01-19T19:02:30.937847
| 2017-11-30T19:05:22
| 2017-11-30T19:05:22
| 101,183,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
def toCelsium(a):
return round(5/9*(a-32))
listOfNums = [int(input())]
listOfNums = [int(x) for x in listOfNums.split()]
print(listOfNums[1::], end="\n\n\n")
# for i in range(34):
print(" ".join([str(toCelsium(i)) for i in listOfNums[1:]]))
|
[
"andrey.paladin@gmail.com"
] |
andrey.paladin@gmail.com
|
7276c06a92637c166751509d796fa9da93aaa076
|
78d035d98059909fa8546b65040432880d629e22
|
/gunicorn.conf.py
|
3702d784b774884ff291c145142b734a94780675
|
[] |
no_license
|
traffic-signal-control/TSCC-flask
|
1c1e49043afd35e81ca5535ff1455f588fd91b4b
|
afcc4e5105a9966f1840e0605ce16315b50e80cf
|
refs/heads/master
| 2020-05-04T07:55:48.889744
| 2019-04-17T09:59:14
| 2019-04-17T09:59:14
| 179,037,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
__author__ = 'Wingslet'
debug = True
workers = 5 # 定义同时开启的处理请求的进程数量,根据网站流量适当调整
worker_class = "gevent" # 采用gevent库,支持异步处理请求,提高吞吐量
bind = "0.0.0.0:8000" # 监听IP放宽,以便于Docker之间、Docker和宿主机之间的通信
|
[
"wingsweihua@gmail.com"
] |
wingsweihua@gmail.com
|
208846eb714574f4adfe61167d1f7792766e06ec
|
be1d8fdaf2820d910799180aaee4772cfbf2cfb7
|
/UndirectedUnweightedGraph_adjList.py
|
34b93d151250f18696e046fc09f711f5f33924ac
|
[] |
no_license
|
gerganzh/Python-Mini-Projects
|
04dd612ffb56b64606409b1bef23c27c60ab91f5
|
5c4da489896c608aa43647aedf4f7147921d6018
|
refs/heads/master
| 2020-05-25T12:54:55.604746
| 2019-05-21T09:48:29
| 2019-05-21T09:48:29
| 187,809,132
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,509
|
py
|
#Week 7, Task 1
'''
I implemented an unweighted and undirected graph data structure, where the nodes consist of positive integers. I
decided to use adjacency list approach.
'''
import sys
from collections import OrderedDict
class Node: #creating class node
def __init__(self, node):
self.name = node
self.neighbors = [] #the list will be appended by adjacent (connected) nodes
def add_neighbors(self, neighbors): #function to add adjacent nodes(neighbours) in the list
for neighbor in neighbors:
if isinstance(neighbor, Node): #if the object is an instance of Node
if neighbor.name not in self.neighbors: #makes sure that there are no duplicate nodes
self.neighbors.append(neighbor.name) #appending the neighbors list
neighbor.neighbors.append(self.name)
else:
return None
class Graph: #creating the Graph class
def __init__(self):
self.nodes = {} #dictionary that will contain the nodes
def add_nodes(self, nodes): #adding nodes to Graph
for node in nodes:
if isinstance(node, Node): #checks if the object is an instance of Node
self.nodes[node.name] = node.neighbors #
def adjacency_list(self): #used for printing the adjacency list
adj_list = [str(key) + ":" + str(self.nodes[key]) for key in self.nodes.keys()] #creating the list with dictionary inside
for a in adj_list: #to print it on a new row
print(a)
def adjacency_list_dict(self): #to print a dictionary, with the dictionary of neighbours inside (needed for BFS and isPath)
return({str(key): self.nodes[key] for key in self.nodes.keys()})
def isPath(graph, v, w, path=[]):
path += [v]
if v == w: #if/when the nodes are the same
print('There is path!')
return('The path is: ' + str(path))
elif v not in graph and w not in graph: #if both nodes are not in the graph
raise TypeError('Both nodes do not exist in this graph. ')
elif v not in graph:
raise TypeError ("The start node does not exist in this graph. ")
elif w not in graph:
raise TypeError("The end node does not exist in this graph. ")
for node in graph[v]: #for every node that is connected to the start node
if node not in path: #if it's not already in path
newpath = isPath(graph, node, w, path) #call the function again with the new node instead of start node
return newpath #and return the path
return ("Can't find path to this node!")
def print_list(graph): #to print the list
print('The Adjacency List for this unweighted, undirected graph is: ')
return graph.adjacency_list()
def print_dict(graph): #to print the dictionary
return(graph.adjacency_list_dict())
def bfs(graph1, start): #BFS Search
queue = [start] #create the queue
visited = [] #keep track of the visited nodes
if start not in graph1:
raise TypeError('Node not found.')
while queue: #looping until queue is empty
node = queue.pop(0) #remove element with index 0 from list and returns it
visited.append(node) #append the list with node
neighbours = graph1[node] #get the adjacent nodes
for neighbour in neighbours: #for each element in neighbours
queue.append(neighbour) #append the queue with neighbour node
visited.append(neighbour) #append the visited list with neighbour node
return bfs_write(visited) #remove the duplicates and retain order
def bfs_write(lst): #output the traversed nodes to an external txt file
f = open('file.txt', 'w+')
f.write(str(list(OrderedDict.fromkeys(lst))))
f.close()
########################################## Testing The Code ###########################################################
if __name__ == "__main__":
A = Node('1') #create instances of class Node
B = Node('2')
C = Node('3')
D = Node('4')
E = Node('5')
A.add_neighbors([B, C, E]) #add neighbours (connected nodes)
B.add_neighbors([A, C])
C.add_neighbors([B, A, E, D])
D.add_neighbors([C])
E.add_neighbors([A, C])
graph = Graph() #create the graph itself
graph.add_nodes([A, B, C, D, E])
print_list(graph)#print the adjacency list
dict = print_dict(graph)
bfs(dict,'1')
print(isPath(dict,'1','3'))
|
[
"noreply@github.com"
] |
noreply@github.com
|
657337bf90a24e453740657f6c0d434ef21313c9
|
cf62f7a7f9e13205fe83957fb7bfcf1b097bf481
|
/src/index.py
|
a2ae504efaedb021f53a79f53ead655fd59982c9
|
[
"Apache-2.0"
] |
permissive
|
biothings/mygene.info
|
09bf19f481c066789a4ad02a0d2880f31dae28f6
|
fe1bbdd81bc29b412ca4288d3af38e47c0602ab7
|
refs/heads/master
| 2023-08-22T21:34:43.540840
| 2023-08-08T23:25:15
| 2023-08-08T23:25:18
| 54,933,630
| 89
| 20
|
NOASSERTION
| 2023-07-18T23:53:49
| 2016-03-29T00:36:49
|
Python
|
UTF-8
|
Python
| false
| false
| 757
|
py
|
"""
Mygene Web Server Entry Point
Examples:
>>> python index.py
>>> python index.py --debug
>>> python index.py --port=8000
"""
import os.path
import config
from biothings.web.launcher import main
ADDON_HANDLERS = [
(r"/demo/?(.*)", "tornado.web.StaticFileHandler",
{"path": "docs/demo", "default_filename": "index.html"}),
]
if config.INCLUDE_DOCS:
if not os.path.exists(config.DOCS_STATIC_PATH):
raise IOError('Run "make html" to generate sphinx docs first.')
ADDON_HANDLERS += [
(r"/widget/(.*)", "tornado.web.RedirectHandler", {"url": "/static/widget/{0}"}),
(r"/?(.*)", "tornado.web.StaticFileHandler", {'path': config.DOCS_STATIC_PATH}),
]
if __name__ == '__main__':
main(ADDON_HANDLERS)
|
[
"xzhou@scripps.edu"
] |
xzhou@scripps.edu
|
2fad265d11b5850de7947324b15cf3811b053d58
|
1b25efab9fd81f1c1b9cd484a13d530759809838
|
/backend/dating/api/v1/serializers.py
|
94acc95fb234b127aaf19304903f55ffff0256f5
|
[] |
no_license
|
crowdbotics-apps/test-31906
|
1728e7947b6cbd52dc123310647ec523914aa1aa
|
2f6841d3ac3e4d335712fd11b3ee81166eec2f47
|
refs/heads/master
| 2023-08-30T11:31:54.409975
| 2021-11-10T07:26:53
| 2021-11-10T07:26:53
| 426,524,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
from rest_framework import serializers
from dating.models import Setting, Like, UserPhoto, Match, Dislike, Inbox, Profile
class InboxSerializer(serializers.ModelSerializer):
class Meta:
model = Inbox
fields = "__all__"
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = "__all__"
class DislikeSerializer(serializers.ModelSerializer):
class Meta:
model = Dislike
fields = "__all__"
class UserPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = UserPhoto
fields = "__all__"
class SettingSerializer(serializers.ModelSerializer):
class Meta:
model = Setting
fields = "__all__"
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = "__all__"
class MatchSerializer(serializers.ModelSerializer):
class Meta:
model = Match
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
899c5f0098afd90b2bbd71e177e514e42fe973d5
|
36d4c9a57b53f5e14acb512759b49fe44d9990d8
|
/hackerrank/30-days-of-code/day-8.py
|
d6527ddafbd6b3abc73b984d4cbb1c5fe239558e
|
[] |
no_license
|
yosef8234/test
|
4a280fa2b27563c055b54f2ed3dfbc7743dd9289
|
8bb58d12b2837c9f8c7b1877206a365ab9004758
|
refs/heads/master
| 2021-05-07T22:46:06.598921
| 2017-10-16T18:11:26
| 2017-10-16T18:11:26
| 107,286,907
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
# # -*- coding: utf-8 -*-
# Objective
# Today, we're learning about Key-Value pair mappings using a Map or Dictionary data structure. Check out the Tutorial tab for learning materials and an instructional video!
# Task
# Given NN names and phone numbers, assemble a phone book that maps friends' names to their respective phone numbers. You will then be given an unknown number of names to query your phone book for; for each namename queried, print the associated entry from your phone book (in the form name=phoneNumbername=phoneNumber) or Not foundNot found if there is no entry for namename.
# Note: Your phone book should be a Dictionary/Map/HashMap data structure.
# Input Format
# The first line contains an integer, NN, denoting the number of entries in the phone book.
# Each of the NN subsequent lines describes an entry in the form of 22 space-separated values on a single line. The first value is a friend's namename, and the second value is an 88-digit phone numberphone number.
# After the NN lines of phone book entries, there are an unknown number of lines of queries. Each line (query) contains a namename to look up, and you must continue reading lines until there is no more input.
# Note: Names consist of lowercase English letters and are first names only.
# Constraints
# 1≤N≤1051≤N≤105
# 1≤queries≤1051≤queries≤105
# Output Format
# On a new line for each query, print Not foundNot found if the name has no corresponding entry in the phone book; otherwise, print the full namename and phoneNumberphoneNumber in the format name=phoneNumbername=phoneNumber.
# Sample Input
# 3
# sam 99912222
# tom 11122222
# harry 12299933
# sam
# edward
# harry
# Sample Output
# sam=99912222
# Not found
# harry=12299933
# Explanation
# N=3N=3
# We add the NN subsequent (Key,Value) pairs to our map so it looks like this:
# phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}
# We then process each query and print Key=ValueKey=Value if the queried Key is found in the map, or Not foundNot found otherwise.
# Query 0: samsam
# Sam is one of the keys in our dictionary, so we print sam=99912222sam=99912222.
# Query 1: edwardedward
# Edward is not one of the keys in our dictionary, so we print Not foundNot found.
# Query 2: harryharry
# Harry is one of the keys in our dictionary, so we print harry=12299933harry=12299933.
n=int(input())
phonebook = dict(input().split() for _ in range(n))
for j in range(n):
name = input().strip()
if name in phonebook:
print(name + "=" + phonebook[name])
else:
print("Not found")
|
[
"ekoz@protonmail.com"
] |
ekoz@protonmail.com
|
3fc9a5f3265731caf8a21fd55c7b32f7899d4b42
|
035a3eebe2091897b942796781a192f67680b336
|
/objects/queries/dboraashwevchoice.py
|
3f919be71e6bb812c4a2453c10240c2196c7f0dc
|
[] |
no_license
|
gduvalsc/kairos
|
763eb8fa5daeefc5fd46ea066a47400d5be7b7f5
|
2bf863ba3f73fc16ef75842ad390eb55fb1906f1
|
refs/heads/master
| 2021-12-23T18:33:34.382989
| 2021-10-19T16:44:00
| 2021-10-19T16:44:00
| 77,072,517
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
null=None
true=True
false=False
class UserObject(dict):
def __init__(self):
object = {
"type": "query",
"id": "DBORAASHWEVCHOICE",
"collections": ["ORAHAS"],
"request": "select distinct event as label from ORAHAS where session_state = 'WAITING' order by label"
}
super(UserObject, self).__init__(**object)
|
[
"gduvalsc@gmail.com"
] |
gduvalsc@gmail.com
|
981f3b685443c1e8fabdc340684e1a4a52e41de2
|
e15fb687990589783066669784912ea8ac5bacaf
|
/genome_designer/test_data/full_vcf_test_set/generate_full_vcf_test_set.py
|
9dac81496c35a6bb2eaa6bc20477bb1f155f8606
|
[
"MIT"
] |
permissive
|
RubensZimbres/millstone
|
74d32105fa54104d0597b6789fb2871cb4fbd854
|
898936072a716a799462c113286056690a7723d1
|
refs/heads/master
| 2020-03-16T18:57:55.174716
| 2018-03-07T16:40:14
| 2018-03-07T16:40:14
| 132,894,394
| 1
| 2
| null | 2018-05-10T12:01:34
| 2018-05-10T12:01:33
| null |
UTF-8
|
Python
| false
| false
| 5,259
|
py
|
"""
Script for generating the test set.
This document describes how this test test was generated.
1) Select a region of the MG1655 genome to excise.
"""
import copy
import random
from Bio import SeqIO
import vcf
import simNGS_util
# Portion of MG1655 Genbank of size ~5.5 kB
EXCISED_GENBANK = 'mg1655_tolC_through_zupT.gb'
TEMPLATE_VCF = 'template.vcf'
VCF_TEMPLATE_READER = vcf.Reader(TEMPLATE_VCF)
SAMPLE_FASTA_ROOT = 'sample'
DESIGNED_SNP_VCF = 'designed_snps.vcf'
# If we do a SNP every 100 bases, that's 50 SNPs.
# We'll then do 20 designed SNPs and 20 SNPs per sample so we should get
# fairly interesting overlaps.
TOTAL_SNPS = 50
NUM_IN_CDS = 45
NUM_OTHER = TOTAL_SNPS - NUM_IN_CDS
# We'll create this many genomes.
NUM_SAMPLES = 6
def is_position_in_coding_feature(position, cds_features):
"""Checks whether the given position lies inside of a coding feature
in the given genome record.
"""
for feature in cds_features:
if (feature.location.start <= position and
position < feature.location.end):
return True
return False
BASE_OPTIONS = ['A', 'T', 'G', 'C']
def choose_alt(ref):
"""Returns a random base that is not ref.
"""
alt = ref
while alt == ref:
alt = random.choice(BASE_OPTIONS)
return alt
def get_subset_of_snps(all_snps, subset_size):
all_snp_positions = all_snps.keys()
subset = {}
while len(subset) < subset_size:
pos = random.choice(all_snp_positions)
if pos in subset:
continue
subset[pos] = all_snps[pos]
return subset
def create_vcf_for_subset(subset, out_path):
with open(out_path, 'w') as designed_fh:
writer = vcf.Writer(designed_fh, VCF_TEMPLATE_READER,
lineterminator='\n')
for pos, value_dict in subset.iteritems():
writer.write_record(vcf.model._Record(
'Chromosome', # CHROM
pos, # POS
None, # ID
value_dict['ref'], # REF
value_dict['alt'], # ALT
None, # QUAL
None, # FILTER
None, # INFO
None, # FORMAT
None, # sample_indexes
samples=None))
def main():
seq_record = SeqIO.read(EXCISED_GENBANK, 'genbank')
cds_features = [f for f in seq_record.features if f.type == 'CDS']
# Generate all possible SNPs to sample from. Store them in a dictionary
# keyed by position so we can easily deal with lookups and avoiding
# duplicates as needed below.
all_snps = {}
len_seq_record = len(seq_record)
# Select random positions for SNPs, respecting the distribution
# set above by the NUM_IN_CDS vs TOTAL_SNPS constants.
# NOTE: These SNP positions are pythonic. We have to update them when
# writing them out in vcf format below.
num_in_cds = 0
num_other = 0
while num_in_cds < NUM_IN_CDS or num_other < NUM_OTHER:
position = random.randint(0, len_seq_record - 1)
if position in all_snps:
continue
in_cds_feature = is_position_in_coding_feature(position, cds_features)
do_add_position = False
if in_cds_feature and num_in_cds < NUM_IN_CDS:
do_add_position = True
num_in_cds += 1
elif not in_cds_feature and num_other < NUM_OTHER:
do_add_position = True
num_other += 1
if do_add_position:
ref = seq_record.seq[position]
alt = choose_alt(ref)
all_snps[position] = {
'ref': ref,
'alt': [alt]
}
assert len(all_snps) == TOTAL_SNPS, "Didn't get all the SNPs we expected."
# Now select a subset of these SNPS to serve as designed.
designed_snps = get_subset_of_snps(all_snps, 20)
create_vcf_for_subset(designed_snps, DESIGNED_SNP_VCF)
# Now create the samples.
for sample_num in range(NUM_SAMPLES):
sample_name = SAMPLE_FASTA_ROOT + str(sample_num)
sample_record = copy.deepcopy(seq_record)
sample_record.id = sample_name
# Grab a subset of SNPs.
sample_snps = get_subset_of_snps(all_snps, 20)
# Introduce the mutations.
for position, value_dict in sample_snps.iteritems():
sample_record.seq = (
sample_record.seq[:position] +
value_dict['alt'][0] +
sample_record.seq[position + 1:])
assert len(sample_record) == len(seq_record), (
"For now we are only doing mutations.")
# Write out the sample fasta.
sample_output = sample_name + '.fa'
with open(sample_output, 'w') as out_fh:
SeqIO.write(sample_record, out_fh, 'fasta')
# Generate fake reads using simNGS.
simLibrary_fasta = sample_name + '.simLibrary.fa'
print sample_output, simLibrary_fasta
simNGS_util.run_simLibrary(sample_output, simLibrary_fasta)
# Generate reads using simNGS.
output_fq = sample_name + '.simLibrary.fq'
simNGS_util.run_paired_simNGS(simLibrary_fasta, output_fq)
if __name__ == '__main__':
main()
|
[
"gleb.kuznetsov@gmail.com"
] |
gleb.kuznetsov@gmail.com
|
2a456d30c85a8c4ec540c3dfdb5ecdb022605603
|
51d54eecaef308fa2b1bfe6b5b0f15c9921c0e02
|
/Fadi/svmRun/stopwordsfeature.py
|
9696ce1a1efdb5e7c7649c9dec1859f3141a0299
|
[] |
no_license
|
chaitanyamalaviya/11761-Project
|
8e9adb0c4ef8a908023a25274c2f4a032a428797
|
eda750e5296786919ad6882a7db8d4bc2366f05f
|
refs/heads/master
| 2021-05-01T00:31:25.292387
| 2016-12-03T18:07:07
| 2016-12-03T18:07:07
| 73,503,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,328
|
py
|
from __future__ import division
import pickle
import os
import nltk
import nltk.tokenize
from nltk.corpus import stopwords
import logging
import numpy as np
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
STOPWORDS = set(stopwords.words('english'))
def saveObj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def loadObj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def importArticles(corpusFileName):
articles = []
path = os.getcwd()
with open(path + '/' + corpusFileName, "r") as f:
lines = f.readlines()
article = []
for line in lines:
line = line.rstrip()
if line == "~~~~~":
if article:
articles.append(article)
article = []
else:
# Removes the start stop tags for the sentence
line = line[4:]
line = line[:-4]
line = line.rstrip()
article.append(line)
articles.append(article)
return articles
def getFakeGood(labelsFileName):
path = os.getcwd()
with open(path + '/' + labelsFileName, "r") as f:
lines = f.readlines()
labels = []
for line in lines:
line = line.rstrip()
labels.append(int(line))
return labels
def getNumberOfStopwords(article):
sumStop = 0
sumLength = 0
for sentence in article:
tokenizedSentence = nltk.word_tokenize(sentence.lower())
stopwords = len([i for i in tokenizedSentence if i in STOPWORDS])
length = len(tokenizedSentence)
sumStop += stopwords*length
sumLength += length
return float(sumStop)/sumLength
def getFeature(devFileName):
articles = importArticles(devFileName)
featureLength = len(articles)
featureArray = np.zeros([featureLength,1], dtype=float)
i = 0
for article in articles:
stopWords = getNumberOfStopwords(article)
featureArray[i] = stopWords
i += 1
return featureArray
def main():
articlesPickle = []
goodArticles = []
badArticles = []
articles = importArticles('trainingSet.dat')
labels = getFakeGood('trainingSetLabels.dat')
getFeature('trainingSet.dat')
i = 0
for label in labels:
if label == 1:
article = articles[i]
score = getNumberOfStopwords(article)
logging.debug("Average number of stopwords in good article: %s" % score)
goodArticles.append(score)
articlesPickle.append(score)
if label == 0:
article = articles[i]
score = getNumberOfStopwords(article)
logging.debug("Average number of stopwords in bad article: %s" % score)
badArticles.append(score)
articlesPickle.append(score)
i = i + 1
logging.debug("Average number of stopwords in good articles: %f" % (sum(goodArticles)/len(goodArticles)))
logging.debug("Average number of stopwords in bad articles: %f" % (sum(badArticles)/len(badArticles)))
saveObj(articlesPickle, 'feature_stopwords')
if __name__ == "__main__": main()
|
[
"fadibotros@Fadis-MBP.wv.cc.cmu.edu"
] |
fadibotros@Fadis-MBP.wv.cc.cmu.edu
|
fa09d7b28df8eaa91203691c54a1efca37c983c9
|
71804d207ca012e5398117128f5a65eb50c69699
|
/project4/task1.py
|
5a06e063eb7f7589bb88346d783ecffbe41c877e
|
[] |
no_license
|
rupadevan94/web-information-management
|
134cc1ba2119ebeed60b75754a8ca7d964395ace
|
963aafa48747ae35925a7a9cfc00bd3d73969f40
|
refs/heads/master
| 2020-06-28T06:34:14.633542
| 2019-08-02T04:37:36
| 2019-08-02T04:37:36
| 200,165,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
#!/usr/bin/env python3
import sys
import os
import numpy
import numpy.linalg
import scipy.misc
def getOutputPngName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.png'
def getOutputNpyName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.npy'
if len(sys.argv) < 3:
sys.exit('usage: task1.py <PNG inputFile> <rank>')
inputfile = sys.argv[1]
rank = int(sys.argv[2])
outputpng = getOutputPngName(inputfile, rank)
outputnpy = getOutputNpyName(inputfile, rank)
#
# TODO: The current code just prints out what it is supposed to to
# Replace the print statement wth your code
#
print("This program should read %s file, perform rank %d approximation, and save the results in %s and %s files." % (inputfile, rank, outputpng, outputnpy))
|
[
"rupadevan94@gmail.com"
] |
rupadevan94@gmail.com
|
8cdd0bd9d537ad94f769df4f3a1faf52e3fb8895
|
5760ff9bca037a2e85dde8ad4d583139ab8e128a
|
/migrations/versions/20150624090637_3606d4a47663_update_answercomment_model.py
|
c4dcdcc74edfefac69c1499b71d92697c7e86322
|
[] |
no_license
|
dianchang/dianchang
|
5b58cbfcf6dfcd9c2c9d55c0612a9327086b8b54
|
3414cd5af0a66facd6ec4eb787e7646d04d8c96c
|
refs/heads/master
| 2016-08-11T11:24:49.322330
| 2015-07-30T05:18:09
| 2015-07-30T05:18:09
| 36,111,229
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
"""Update AnswerComment model.
Revision ID: 3606d4a47663
Revises: 2040a458fc8a
Create Date: 2015-06-24 09:06:37.957787
"""
# revision identifiers, used by Alembic.
revision = '3606d4a47663'
down_revision = '2040a458fc8a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('answer_comment', sa.Column('likes_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('answer_comment', 'likes_count')
### end Alembic commands ###
|
[
"hustlzp@qq.com"
] |
hustlzp@qq.com
|
94a4a250cda1258c1bd6f317825a0d895ccc4900
|
55b0e3b5c59a4b929ca0b12dca6a7c88abc99b1c
|
/scripts/figures/angularMomentum_conservation.py
|
3fd84d5447cf74472ae41518839e1845cd9e9d47
|
[] |
no_license
|
andrewhalle/M31-dynamics
|
d007c387c59b87c7f76b5109011821e73b66317d
|
bf730eb66dc297ab7f35596e2b7b7fd6440a397f
|
refs/heads/master
| 2021-01-20T03:14:08.664161
| 2017-06-19T18:32:33
| 2017-06-19T18:32:33
| 60,396,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
# Generates figure which shows change #
# in total angular momentum of the #
# simulation versus time #
import rebound
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import sys
import os
sys.path.append("../include")
from universal_logs import *
def calculate_angular_momentum(sim):
com = sim.calculate_com()
j = np.array([0, 0, 0])
for p in sim.particles:
r = np.array([p.x - com.x, p.y - com.y, p.z - com.z])
v = np.array([p.vx - com.vx, p.vy - com.vy, p.vz - com.vz])
j = j + np.cross(r, v)
return np.linalg.norm(j)
sim_number = sys.argv[1].zfill(3)
initial = restore("../../logs/suite_u/" + sim_number + "/000000000.logu")
initial_mom = calculate_angular_momentum(initial)
sims = os.listdir("../../logs/suite_u/" + sim_number)
sims.sort()
sims.pop()
data = []
i = 1
while i < len(sims):
sim = restore("../../logs/suite_u/" + sim_number + "/" + sims[i])
data.append([i, (calculate_angular_momentum(sim) - initial_mom) / initial_mom])
i += 1
x = [a[0] for a in data]
y = [a[1] for a in data]
plt.plot(x, y, 'k')
plt.xlabel("Time")
plt.ylabel("Error")
plt.savefig("../../images/conservation/angular_momentum/" + sim_number + ".png")
|
[
"ahalle@berkeley.edu"
] |
ahalle@berkeley.edu
|
b9e393ac52c6010d7045b8feb8aa0c3c7d0a91bd
|
e97ed793c12124203338ba26976e25059903daa8
|
/statements/mkpdf_helper.py
|
b0558d5d1e14c544a95e2f5bbc822beb3123e9da
|
[
"MIT"
] |
permissive
|
alex65536/contest-template
|
26af256b5ed4b82b0101a0227726c03fdd31bde7
|
0a36269f792340a9a73b727159fd9ad220d96025
|
refs/heads/master
| 2021-06-21T06:18:01.482886
| 2021-01-05T01:08:03
| 2021-01-05T01:08:03
| 136,298,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
#!/usr/bin/env python3
import sys
import json
from os import path
if len(sys.argv) < 2:
sys.stderr.write("Usage: {} PROBLEM\n".format(sys.argv[0]))
sys.exit(1)
problem = sys.argv[1]
obj = None
tl = "??? секунд"
ml = "??? мегабайт"
infile = "???.in"
outfile = "???.out"
try:
obj = json.loads(open(path.join("..", "problems",
problem, "problem.json"), 'r').read())
except FileNotFoundError:
pass
if obj:
tl_sec = obj["problem"]["timeLimit"]
tl_sec_int = round(tl_sec)
if abs(tl_sec_int - tl_sec) < 1e-12:
word = "секунд"
if 10 <= tl_sec_int % 100 <= 19:
word = "секунд"
elif tl_sec_int % 10 == 1:
word = "секунда"
elif tl_sec_int % 10 in {2, 3, 4}:
word = "секунды"
tl = format("{} {}".format(tl_sec_int, word))
else:
tl = format("{:.3g} секунды".format(tl_sec))
ml_mb = int(obj["problem"]["memoryLimit"])
word = "мегабайта"
if 10 <= ml_mb % 100 <= 19 or ml_mb % 10 in {0, 5, 6, 7, 8, 9}:
word = "мегабайт"
elif ml_mb % 10 == 1:
word = "мегабайт"
ml = str(ml_mb) + ' ' + word
infile = obj["problem"]["input"]
if not infile:
infile = "стандартный ввод"
outfile = obj["problem"]["output"]
if not outfile:
outfile = "стандартный вывод"
print("\\def\\ProblemTimeLimit{{{}}}".format(tl))
print("\\def\\ProblemMemoryLimit{{{}}}".format(ml))
print("\\def\\ProblemInputFile{{{}}}".format(infile))
print("\\def\\ProblemOutputFile{{{}}}".format(outfile))
print("\\input{{{}.tex}}".format(problem))
print()
|
[
"sh200105@mail.ru"
] |
sh200105@mail.ru
|
005aa7160b9dcb3eb53b5920602371a013ae5a0c
|
807d460fbb00db68c1eb1a1cb490ae74a7806df9
|
/mysite/topic/admin.py
|
b1213bb354395c4a8a166f3cda781cba67c5ab67
|
[] |
no_license
|
modanhan/cpsc471
|
67fa6a68581efae5ebefac96e0e523b63c8e1edf
|
d1eabcc418d1d2c8d71b408a48394edabdeb80d0
|
refs/heads/master
| 2020-03-09T00:18:35.152083
| 2018-04-18T03:15:11
| 2018-04-18T03:15:11
| 128,484,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Topic
from .models import ChallengeTopic, TopicRating
# Register your models here.
admin.site.register(Topic)
admin.site.register(ChallengeTopic)
admin.site.register(TopicRating)
|
[
"modanhan@live.com"
] |
modanhan@live.com
|
cf386bf9e4f886259a355ae0d1237f0389fbdb0b
|
19cb4e993c6d482e02ae7cf3fa521302483754bd
|
/setup.py
|
849ff91c4f2292e4d01ef2aaf5b0a848f2f59186
|
[
"MIT"
] |
permissive
|
brimcfadden/stormed-amqp
|
b98414cbc1be5ae2fa25651cbd7ca21e08fa33e8
|
59e81bfa4632366dc3f20b3dff25df3331480798
|
refs/heads/master
| 2020-12-24T11:06:28.460936
| 2011-07-15T15:25:21
| 2011-07-15T15:25:21
| 2,053,652
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
import distutils.core
try:
# to enable "python setup.py develop"
import setuptools
except ImportError:
pass
distutils.core.setup(
name="stormed-amqp",
version='0.1',
packages = ["stormed", "stormed.method", "stormed.method.codegen"],
author="Paolo Losi",
author_email="paolo.losi@gmail.com",
download_url="http://github.com/downloads/paolo-losi/stormed-amqp/stormed-amqp-0.1.tar.gz",
license="http://www.opensource.org/licenses/mit-license.html",
description="native tornadoweb amqp 0-9-1 client implementation",
)
|
[
"paolo.losi@gmail.com"
] |
paolo.losi@gmail.com
|
d013fddbef0d5f30733064fb694bf1132b7eb341
|
78a3ba49a3aaea55431a41e72ff5297b069037fc
|
/neurokernel/LPU/InputProcessors/GaussianNoiseInputProcessor.py
|
f0c313149a6d3e50b379b499a7bf9c791a0f415b
|
[
"BSD-3-Clause"
] |
permissive
|
mkturkcan/neurodriver
|
edb9f0d79f8eabb0f3bd06c35e2277c0be70e255
|
dc5a10212e32ba1dee97af2cbc1b025917361b32
|
refs/heads/master
| 2021-04-27T00:27:41.903107
| 2018-03-04T19:08:36
| 2018-03-04T19:08:36
| 123,819,113
| 0
| 0
|
BSD-3-Clause
| 2018-03-04T19:00:03
| 2018-03-04T19:00:03
| null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
import numpy as np
from BaseInputProcessor import BaseInputProcessor
class GaussianNoiseInputProcessor(BaseInputProcessor):
def __init__(self, variable, uids, mean, std, start = -1, stop = -1):
super(GaussianNoiseInputProcessor, self).__init__([(variable,uids)],
mode=0)
self.mean = mean
self.std = std
self.start = start
self.stop = stop
self.var = variable
self.num = len(uids)
def update_input(self):
self.variables[self.var]['input'] = self.std*\
np.array(np.random.randn(self.num), dtype = self.dtypes[self.var]) + self.mean
def is_input_available(self):
if self.start>-1. and self.stop>self.start:
return (self.LPU_obj.time >= self.start and
self.LPU_obj.time < self.stop)
else:
return False
def post_run(self):
pass
|
[
"mkt2126@columbia.edu"
] |
mkt2126@columbia.edu
|
79673a33e5eeef00b9b046e1a7b02efaaf6695b1
|
929f5bbde3c215c86649cbd22f8b29a74fe3f3bf
|
/server/LabManager/calendar/routes.py
|
462b3f1c3f31c8c5769302f8efea9c687f95818b
|
[] |
no_license
|
Fayhen/Laborator.io
|
52f1a47f42a1bfa5cdfde2c55d25eacf20e76058
|
e486b2f152e0291a4132ad6fcd5b157c812f9798
|
refs/heads/master
| 2021-06-13T17:35:43.701277
| 2020-03-29T19:56:50
| 2020-03-29T19:56:50
| 179,155,182
| 0
| 0
| null | 2021-06-02T01:18:06
| 2019-04-02T20:40:07
|
Python
|
UTF-8
|
Python
| false
| false
| 81
|
py
|
from flask import Blueprint
calendar = Blueprint("calendar", __name__)
# WIP
|
[
"diego00alfa@gmail.com"
] |
diego00alfa@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.