blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d40e6c6ccb0a6eaf2d8ae58103faca2b2a47851c | 5efd154bac2e8bd5d4aedb710df8a6ddce98a13b | /titanic/titanic/build_pipeline/preprocessors.py | 2aeba597a7e60eb45bd969aa96861fb36f6c68d5 | [] | no_license | dbmikus/kaggle-competitions | b2c64e9b0ca7c642957e05d23ab9415a4b2af173 | fbc6ecc7fd43a8d3d78d9b90d0862f963927482d | refs/heads/master | 2020-12-04T07:51:18.402030 | 2020-01-06T03:44:39 | 2020-01-06T03:44:58 | 231,684,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_absolute_error
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from .. import clean_data
def build_preprocessor():
numerical_transformer = SimpleImputer(strategy="constant")
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
return _build_column_transformer(numerical_transformer, categorical_transformer)
def build_preprocessor_2():
numerical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant")),
("scaler", StandardScaler()),
]
)
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
return _build_column_transformer(numerical_transformer, categorical_transformer)
def _build_column_transformer(numerical_transformer, categorical_transformer):
numerical_cols = clean_data.numerical_features()
categorical_cols = clean_data.categorical_features()
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
return preprocessor
| [
"dmikus@hioscar.com"
] | dmikus@hioscar.com |
df44354a0e9ba11e598574ff072588cf831c4eab | b1800b94248ed661136176abf4bc017b760a74f8 | /venv/bin/python-config | 9dd06e3d53006f05de765622568a0bdbbc14d0c3 | [] | no_license | kilik42/craigslistBot | 96f7b26bd5523428ea8793c46cc663f279817dd8 | 56e5bff138f0f2f9855965666c5a23c9255d6e5e | refs/heads/master | 2021-01-14T03:53:22.349664 | 2020-03-07T23:01:20 | 2020-03-07T23:01:20 | 242,591,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | #!/Users/marvinevins/Desktop/craigslist_scrape/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"maevins@cps.edu"
] | maevins@cps.edu | |
9be3dcb242b2f80c8e342caa7a134dfd8c9449fa | ba2f9884bd4f378dbbc3ad27fd4ae98ba4d60af6 | /eddietest/test.py | 8f40309517fce1b8af9ed296d3d97909813e10db | [] | no_license | EddieCarlson/Taskbasket | 50a9cf3b0af19a60178bc1883515765d40ef1dc3 | 36d87af79f43981c65bcf9acf38998f8f2dd7741 | refs/heads/master | 2021-01-19T11:02:16.531501 | 2011-01-31T05:30:41 | 2011-01-31T05:30:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py |
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from operator import itemgetter
from messages.models import Message
#from eddietest.messages.models import Kind
from messages.models import Task
stasks = []
ftasks = []
tasks = Task.objects.all()
for t in tasks:
stasks.append(((t.caption), (t.priority), (t.pk)))
sorted(stasks, key=itemgetter(1))
for t in stasks:
ftasks.append(Task.objects.all().get(pk=t[2]))
for t in ftasks:
t
| [
"carlson.eddie@gmail.com"
] | carlson.eddie@gmail.com |
57a622ec59ebe93a1a1c999e5976d3f0c975abfd | 6529bcd79ebe6114372345c5ab1b930b1894a809 | /advanced/tree.py | 1630bcc3342186fe9715adf248f440d9942387ea | [
"MIT"
] | permissive | rocket3989/ACCA2020 | 4202752ed6c4d1f42df5a9fa768672b6292557b4 | 24ae77df2ca428cf761a987fb2bb2f1a35285804 | refs/heads/master | 2021-01-13T18:38:07.170860 | 2020-02-23T05:00:17 | 2020-02-23T05:00:17 | 242,459,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | from collections import defaultdict
class node:
def __init__(self):
self.right = -1
self.left = -1
self.val = -1
def getInd(char, S):
for i, char1 in enumerate(S):
if char == char1:
return i
while True:
nodes = defaultdict(node)
sIn = input().strip()
if sIn == "EXIT": exit()
inOrder, postOrder = sIn.split()
root = postOrder[-1]
nodes[root].val = getInd(root, inOrder)
def add(val, char, curr):
if val > nodes[curr].val:
if nodes[curr].right == -1:
nodes[curr].right = char
return
add(val, char, nodes[curr].right)
else:
if nodes[curr].left == -1:
nodes[curr].left = char
return
add(val, char, nodes[curr].left)
for char in reversed(postOrder[:-1]):
nodes[char].val = getInd(char, inOrder)
add(getInd(char, inOrder), char, root)
def dfs(node):
if node == -1: return
print(node, end='')
dfs(nodes[node].left)
dfs(nodes[node].right)
dfs(root)
print() | [
"rocket3989@gmail.com"
] | rocket3989@gmail.com |
d4ddcf35379ac1b72dda69366f4af64a658d9779 | 3ec575ad10f10c8364438a05c6fbd20d8afaa5f5 | /ordinary_data/views/export/export_meter.py | fd3250645c6a32659e49f3707c6eef227453cb11 | [] | no_license | haubourg/qwat-data-model | e182c6e7e20b545696e57139fb83b87330828eed | a6f504ada51bcee3f9daaeb94c2af0ed2985df22 | refs/heads/master | 2020-06-11T15:27:51.255858 | 2017-04-24T10:09:50 | 2017-04-24T10:09:50 | 75,641,114 | 0 | 2 | null | 2017-01-19T18:18:54 | 2016-12-05T15:53:11 | PLpgSQL | UTF-8 | Python | false | false | 944 | py | #!/usr/bin/env python
import yaml
import sys
from sql_export_view import SqlExportView
if len(sys.argv) > 1:
pg_service = sys.argv[1]
else:
pg_service = "qwat_test"
definition = yaml.load("""
name: qwat_od.vw_export_meter
from: qwat_od.vw_element_meter
exclude_join_fields:
- geometry%
- label_1%
- label_2%
joins:
district:
table: qwat_od.district
fkey: fk_pressurezone
pressurezone:
table: qwat_od.pressurezone
fkey: fk_pressurezone
precision:
table: qwat_vl.precision
fkey: fk_precision
precisionalti:
table: qwat_vl.precisionalti
fkey: fk_precisionalti
object_reference:
table: qwat_vl.object_reference
fkey: fk_object_reference
distributor:
table: qwat_od.distributor
fkey: fk_distributor
status:
table: qwat_vl.status
fkey: fk_status
folder:
table: qwat_od.folder
fkey: fk_folder
""")
print SqlExportView(pg_service, definition).sql()
| [
"denis.rouzaud@gmail.com"
] | denis.rouzaud@gmail.com |
927ac184628e437c055bab2f36900998fbdc2c7d | 5adf788186c67671542a9b8933f14f53d4b2ce19 | /reattach_caps.py | 0c007165f5d9027547deff90d0b92e3d4ae3d4e3 | [] | no_license | Arielbs/beta_solenoids | 6c2bcc14a917c564aeed4827530ae44db65d4ce0 | 73f662da5a800dbe1f09d56ffdc368f7266cbafb | refs/heads/master | 2021-01-23T16:40:53.078386 | 2014-12-11T21:16:12 | 2014-12-11T21:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,594 | py | #! /usr/bin/env python
InfoString = '''
Run on expanded pose with src range tag in pdb name like:
.*src13_26__15_28.*.pdb
'''
# '''
### external
### libraries
from multiprocessing import Pool
import numpy as np
import subprocess
import argparse
import glob
import time
import sys
import os
import re
if '-h' not in sys.argv:
import rosetta
# rosetta.init()
rosetta.init(extra_options = " -ex1 -ex2 -no_optH false -use_input_sc ") # "-mute basic -mute core -mute protocols"
from rosetta.protocols import grafting
# from repo
import solenoid_tools
from generate_backbones import fuse
from generate_backbones import get_residue_array
from expand_constraints import constraint_extrapolator
from expand_constraints import pose_has
from expand_constraints import set_all_weights_zero
# '''
# sys.argv = [ sys.argv[0], '-ref_pdb', '1EZG_Relax.pdb', '-ref_cst', '1EZG_Relax_All.cst', '-repeat_tag', 'rep24_1EZG_Relax']
def cap_and_relax_pdb( (RepeatPdb, ReferencePdb, ReferenceCst) ):
RepeatPose = rosetta.pose_from_pdb(RepeatPdb)
TrimmedRepeatPose = grafting.return_region( RepeatPose, 3, RepeatPose.n_residue()-3 )
TrimmedRepeatPose.pdb_info( rosetta.core.pose.PDBInfo( TrimmedRepeatPose ) )
ReferencePose = rosetta.pose_from_pdb( ReferencePdb )
ReferencePose.pdb_info( rosetta.core.pose.PDBInfo( ReferencePose ) )
# rosetta.dump_pdb(TrimmedRepeatPose, 'Trimmed.pdb')
RepeatLength = int(re.sub(r'.*rep(\d+).*pdb', r'\1', RepeatPdb))
SourceRanges = re.sub(r'.*src(\d+_\d+__\d+_\d+).*pdb', r'\1', RepeatPdb)
SourceRanges = SourceRanges.split('__')
SourceRanges = [ [ int(Value) for Value in Range.split('_') ] for Range in SourceRanges ]
SourceStart = SourceRanges[0][0]
SourceEnd = SourceRanges[0][1]
'''
Add N terminal cap
'''
NcapPose = grafting.return_region( ReferencePose, 1, SourceStart+5 )
# rosetta.dump_pdb(NcapPose, 'Ncap.pdb')
NcapLength = NcapPose.n_residue()
NcapOverhangPositions = [ Position for Position in range(NcapLength-3, NcapLength+1) ]
# print NcapOverhangPositions
NcapOverhangArray = get_residue_array( NcapPose, NcapOverhangPositions )
RepStartOverhangPositions = [1,2,3,4]
RepStartOverhangArray = get_residue_array( TrimmedRepeatPose, RepStartOverhangPositions )
# print RepStartOverhangArray
RMSD, rMtx, tVec = solenoid_tools.rmsd_2_np_arrays_rosetta( NcapOverhangArray, RepStartOverhangArray )
rosetta.Pose.apply_transform_Rx_plus_v(TrimmedRepeatPose, rMtx, tVec)
# rosetta.dump_pdb( TrimmedRepeatPose, 'TrimmedShifted.pdb' )
NcapPlusRepeatPose, RMSD, NcapCorrespondingResidues = fuse(NcapPose, TrimmedRepeatPose)
print 'Ncap attachment RMSD %f'%RMSD
# rosetta.dump_pdb( NcapPlusRepeatPose, 'NcapPlusRepeat.pdb' )
NcapPlusRepeatPose.pdb_info( rosetta.core.pose.PDBInfo( NcapPlusRepeatPose ) )
'''
Add C terminal cap
'''
Cshift = SourceEnd-6
CcapPose = grafting.return_region( ReferencePose, Cshift, ReferencePose.n_residue() )
# rosetta.dump_pdb(CcapPose, 'Ccap.pdb')
CcapOverhangPositions = [1,2,3,4]
CcapOverhangArray = get_residue_array( CcapPose, CcapOverhangPositions )
RepEndOverhangPositions = [ Position for Position in range( NcapPlusRepeatPose.n_residue()-3, NcapPlusRepeatPose.n_residue()+1 ) ]
# print 'RepEndOverhangPositions', RepEndOverhangPositions
RepEndOverhangArray = get_residue_array( NcapPlusRepeatPose, RepEndOverhangPositions )
RMSD, rMtx, tVec = solenoid_tools.rmsd_2_np_arrays_rosetta( RepEndOverhangArray, CcapOverhangArray )
rosetta.Pose.apply_transform_Rx_plus_v(CcapPose, rMtx, tVec)
# rosetta.dump_pdb( CcapPose, 'CcapPose.pdb' )
CappedRepeatPose, RMSD, CcapCorrespondingResidues = fuse(NcapPlusRepeatPose, CcapPose)
print 'Ccap attachment RMSD %f'%RMSD
CappedNamePdb = re.sub(r'(.*).pdb$', r'\1_Cap.pdb', RepeatPdb)
assert CappedNamePdb != RepeatPdb, 'regular expression substitution failed!'
rosetta.dump_pdb( CappedRepeatPose, CappedNamePdb )
'''
Generate csts for cap/repeat edges
'''
CstExtrapolator = constraint_extrapolator(ReferenceCst)
ConstraintSet = []
' N cap constraints are easy; no shifts are needed '
# For catching when individual constraints have been considered already
Redundict = {}
for Position in range(1, SourceStart+6):
# print 'Position', Position
# Skip positions w/out constraints
try:
PositionCstDict = CstExtrapolator.Cst[Position]
except KeyError:
continue
for AtomName in PositionCstDict:
for Constraint in PositionCstDict[AtomName]:
# unpack tuple values
AtomResidueCoords, ConstraintParameters, CstLineNumber, CstType = Constraint
# Redundancy check with redundict
try:
Check = Redundict[CstLineNumber]
# if cst considered already, skip it!
continue
except KeyError:
Redundict[CstLineNumber] = 1
if pose_has(CappedRepeatPose, AtomResidueCoords):
ConstraintSet.append(Constraint)
' C cap constraints are harder; need to shift due to pose expansion '
# CstExtrapolator.output_cst(ConstraintSet, 'NcapConstraints.cst')\
Redundict = {}
# print 'CcapCorrespondingResidues', CcapCorrespondingResidues
RepeatCcapPositionStart = CcapCorrespondingResidues[0][0]
# print 'RepeatCcapPositionStart', RepeatCcapPositionStart
ShiftToRepeatPose = RepeatCcapPositionStart - Cshift
# print 'ShiftToRepeatPose', ShiftToRepeatPose
for Position in range( Cshift, ReferencePose.n_residue()+1 ):
# Skip positions w/out constraints
try:
PositionCstDict = CstExtrapolator.Cst[Position]
except KeyError:
continue
for AtomName in PositionCstDict:
for Constraint in PositionCstDict[AtomName]:
# unpack tuple values
AtomResidueCoords, ConstraintParameters, CstLineNumber, CstType = Constraint
# Redundancy check with redundict
try:
Check = Redundict[CstLineNumber]
# if cst considered already, skip it!
continue
except KeyError:
Redundict[CstLineNumber] = 1
ExpandedPoseAtomResidueCoords = []
# iterate through atom residue pairs
for AtomResiduePair in AtomResidueCoords:
# print 'AtomResiduePair', AtomResiduePair
ExpandedPosePosition = (AtomResiduePair[1]) + ShiftToRepeatPose
# print 'ExpandedPosePosition', ExpandedPosePosition
ExpandedPoseAtomResidueCoords.append( ( AtomResiduePair[0], ExpandedPosePosition ) )
ShiftedConstraint = ExpandedPoseAtomResidueCoords, ConstraintParameters, CstLineNumber, CstType
if pose_has(CappedRepeatPose, ExpandedPoseAtomResidueCoords):
ConstraintSet.append(ShiftedConstraint)
CapCstName = re.sub(r'(.*).pdb$', r'\1.cst', CappedNamePdb)
CstExtrapolator.output_cst(ConstraintSet, CapCstName)
'''
idealize peptide bonds with command line subprocess
'''
subprocess.check_output(['idealize_jd2.default.linuxgccrelease', '-s', CappedNamePdb])
IdealizedPdbOldName = re.sub(r'(.*).pdb$', r'\1_0001.pdb', CappedNamePdb)
IdealizedPdbNewName = re.sub(r'(.*).pdb$', r'\1_Ideal.pdb', CappedNamePdb)
subprocess.check_output(['mv', IdealizedPdbOldName, IdealizedPdbNewName])
time.sleep(0.2)
IdealizedCappedPose = rosetta.pose_from_pdb( IdealizedPdbNewName )
# make constraint mover
Constrainer = rosetta.ConstraintSetMover()
# get constraints from file
Constrainer.constraint_file(CapCstName)
Constrainer.apply(IdealizedCappedPose)
''' SET UP WEIGHTS AS decided '''
# RelativeWeight = 0.1
Talaris = rosetta.getScoreFunction()
TalarisPlusCst = rosetta.getScoreFunction()
AtomPairCst = set_all_weights_zero( rosetta.getScoreFunction() )
AtomPairCst.set_weight(rosetta.atom_pair_constraint, 1.0)
# RosettaScore = Talaris(IdealizedCappedPose)
# AtomPairCstScore = AtomPairCst(IdealizedCappedPose)
# Weight = ( RosettaScore * RelativeWeight ) / AtomPairCstScore
Weight = 1.0
TalarisPlusCst.set_weight(rosetta.atom_pair_constraint, Weight)
TalarisPlusCst.set_weight(rosetta.angle_constraint, Weight)
TalarisPlusCst.set_weight(rosetta.dihedral_constraint, Weight)
print 'relaxing %s with %s'%(IdealizedPdbNewName, CapCstName)
print ' Weight %d '%Weight
rosetta.relax_pose(IdealizedCappedPose, TalarisPlusCst, 'tag')
RelaxedPdbName = re.sub(r'(.*)_Ideal.pdb$', r'\1__Relax.pdb', IdealizedPdbNewName)
rosetta.dump_pdb(IdealizedCappedPose, RelaxedPdbName)
rosetta.relax_pose(IdealizedCappedPose, Talaris, 'tag')
RelaxedPdbName = re.sub(r'(.*)_Ideal.pdb$', r'\1__Relax2.pdb', IdealizedPdbNewName)
rosetta.dump_pdb(IdealizedCappedPose, RelaxedPdbName)
def main(argv=None):
if argv is None:
argv = sys.argv
ArgParser = argparse.ArgumentParser(description=' reattach_cap.py ( -help ) %s'%InfoString)
# Required args
ArgParser.add_argument('-ref_pdb', type=str, help=' Reference pdb ', required=True)
ArgParser.add_argument('-ref_cst', type=str, help=' Reference pdb ', required=True)
ArgParser.add_argument('-repeat_tag', type=str, help=' Input pdb tag ', required=True)
ArgParser.add_argument('-thread', type=int, help=" number of threads to run simultaneously ", default=15 ) # with default, there can be only one !
# Optional args
ArgParser.add_argument('-out', type=str, help=' Output directory ', default='./')
Args = ArgParser.parse_args()
ReferencePdb = Args.ref_pdb
ReferenceCst = Args.ref_cst
if Args.out [-1] != '/':
Args.out = Args.out + '/'
GlobString = '*%s*.pdb'%Args.repeat_tag
# print GlobString
Pdbs = glob.glob( GlobString )
Pdbs = [ Pdb for Pdb in Pdbs if not 'Cap' in Pdb ]
print ' Globbed %d pdb(s) with: %s '%( len(Pdbs), GlobString)
for ThreadChunkNumber in range( (len(Pdbs)/Args.thread) + 1):
Start = ThreadChunkNumber*Args.thread
End = Start+Args.thread
# print Start, End
PdbSubset = Pdbs[Start: End]
# pass strings, not poses, for ez pickling
ParallelizableInputTuples = []
for Pdb in PdbSubset:
ParallelizableInputTuples.append((Pdb, ReferencePdb, ReferenceCst))
### Iterative for debug
# for InputTuple in ParallelizableInputTuples:
# # print 'Error in one of pooled multiprocessing threads; iterating sequentially for debugging '
# cap_and_relax_pdb(InputTuple)
### Parallel for running
pool = Pool(processes=len(ParallelizableInputTuples))
pool.map(cap_and_relax_pdb, ParallelizableInputTuples)
if __name__ == "__main__":
sys.exit(main())
| [
"pylesharley@gmail.com"
] | pylesharley@gmail.com |
49d9a8ec5cc5ed5985d21d452dd07bc47e43e1ca | 274bb29d09e4ebc97aaf8376066a1239f9cf179e | /tests/conftest.py | 447bcdbc6c079901764264736de9cc8d71bcd621 | [
"MIT"
] | permissive | Brewgarten/storm-softlayer | 86a5d17994d01260929fedfbc93158936c36e809 | 9f64fb972f26fa367f151d5e25f5077ba1bf5467 | refs/heads/master | 2021-07-11T01:20:28.026350 | 2017-10-02T21:19:01 | 2017-10-02T21:19:01 | 106,289,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | import logging
import os
import pytest
from libcloud.compute.providers import get_driver
log = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [%(levelname)s] [%(name)s(%(filename)s:%(lineno)d)] - %(message)s', level=logging.INFO)
def getSoftLayerDriver():
import storm.drivers.softlayer
cls = get_driver("sl")
return cls.ex_from_config()
@pytest.fixture(scope="module")
def softlayerDriver():
"""
SoftLayer Cloud driver
"""
import storm.drivers.softlayer
if not os.path.exists(os.path.expanduser("~/.softlayer")):
pytest.skip("requires ~/.softlayer file with account information")
return getSoftLayerDriver()
def pytest_generate_tests(metafunc):
if "driver" in metafunc.fixturenames:
softlayerDriverInstance = getSoftLayerDriver()
metafunc.parametrize("driver", [
pytest.mark.skipif(not os.path.exists(os.path.expanduser("~/.softlayer")),
reason="requires ~/.softlayer file with account information")
(softlayerDriverInstance)
])
| [
"martin.kuehnhausen@us.ibm.com"
] | martin.kuehnhausen@us.ibm.com |
3112a7712ea5bdf4b9b3cea414797d8b41d7e58c | f7024a61e980f026a557b80fb7b064a14a631699 | /src/Rover3D_On_TriangleMesh/MechTests/MechTest2/qdots.py | 2a50fca60890d4568f5d3861ac35276c315d1be4 | [] | no_license | siconos/rover | b3c5ad566d071a4fc2f14451b57b3cb9b6eff543 | 60bd2ae0658d166b8124ff3d3bac020c737c8312 | refs/heads/master | 2021-01-21T23:16:30.523546 | 2017-06-23T14:40:38 | 2017-06-23T14:40:38 | 95,217,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | #! /usr/bin/env python
# plotting rover q generalized coordinates.
import matplotlib
matplotlib.use('Qt4Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams
rc('text', usetex=True)
#wheel's qdot
t, x = np.loadtxt('VELOCITY.dat', usecols=(0, 8), unpack=True)
#mass center qdot
#t, x, y, z = np.loadtxt('VELOCITY.dat', usecols=(0, 1, 2, 3), unpack=True)
fig = plt.figure(1)
ax = fig.add_subplot(111)
#wheel's qdot
ax.plot(t, x, label='x')
#ax.plot(t, y, label='y')
#ax.plot(t, z, label='z')
plt.title(r'$v_{FL}$ $velocity$')
plt.xlabel(r'$T [s]$')
plt.ylabel(r'$v_{FL}$ $velocity$ $[\frac{rad}{s}]$')
plt.annotate('torque turn-on', xy=(50, 0), xytext=(40, 0.10),
arrowprops=dict(facecolor='blue', shrink=0.05),
)
#mass center qdot
#ax.plot(t, x, label='x')
#ax.plot(t, y, label='y')
#ax.plot(t, z, label='z')
#plt.title(r'$v_{COM}$ $velocity$')
#plt.xlabel(r'$T [s]$')
#plt.ylabel(r'$v_{COM}$ $velocity$ $[\frac{m}{s}]$')
plt.legend(loc='upper left')
plt.show()
| [
"vincent.acary@inria.fr"
] | vincent.acary@inria.fr |
0d6229d2d38942de75e024f1434dad829f57195e | 1ec6fe8811cb2b21b68eca7d75ac6b3c88e0f8ba | /Week_07/G20200389010080/hotspot_crawler/spiders/TencentHotspot.py | 05a0c40073ef36235158660a17124073320a1c84 | [] | no_license | hopeqpy/Python000-class01 | 5f0aa8f3aaba7da97819ec073fd9d16c0cd902e8 | 73b8f8606c5cce0ea8982aed3705ad4cfc70cc70 | refs/heads/master | 2022-06-26T13:36:58.766271 | 2020-05-07T07:24:24 | 2020-05-07T07:24:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,510 | py | # -*- coding: utf-8 -*-
import json
import re
from json import JSONDecodeError
import scrapy
from ..items import HotspotCrawlerItem
class TencentHotspotSpider(scrapy.Spider):
name = "TencentHotspot"
allowed_domains = ["qq.com"]
allLink = [
"https://pacaio.match.qq.com/irs/rcd?cid=137&token=d0f13d594edfc180f5bf6b845456f3ea&id=&ext=top&page=0&expIds=&callback=__jp1",
"https://pacaio.match.qq.com/irs/rcd?cid=4&token=9513f1a78a663e1d25b46a826f248c3c&ext=&page=0&expIds=&callback=__jp2"]
start_urls = [
"https://pacaio.match.qq.com/irs/rcd?cid=137&token=d0f13d594edfc180f5bf6b845456f3ea&id=&ext=top&page=0&expIds=&callback=__jp1"]
for i in range(0, 11):
url = "https://pacaio.match.qq.com/irs/rcd?cid=137&token=d0f13d594edfc180f5bf6b845456f3ea&ext=top&page=" + str(
i + 1) + "&callback=__jp" + str(i + 4)
allLink.append(url)
def parse(self, response):
for link in self.allLink:
yield scrapy.Request(url=link, callback=self.parse_top_news)
def parse_top_news(self, response):
origin = response.text
trimmed = re.sub(pattern="__jp\\d+", repl="", string=origin).strip('()')
# print(trimmed)
try:
articles = json.loads(trimmed, encoding='utf-8')
except JSONDecodeError:
# print(trimmed)
try:
if trimmed.endswith('])'):
articles = json.loads(trimmed[:-2], encoding='utf-8')
elif trimmed.startswith('(['):
articles = json.loads(trimmed[1:], encoding='utf-8')
else:
articles = trimmed
raise Exception("数据解析错误,跳过当前url", articles)
# 假如运行到这里,就是json解析出问题了,立刻抛出异常结束这个url的抓取
except Exception as e:
self.logger.critical(msg="遇到异常,调试信息如下:\n%s" % e.args)
return
if 'code' not in articles:
# 是今日要闻
for article in articles:
news = HotspotCrawlerItem()
news['title'] = article['title']
news['content_url'] = article['url']
news['newsId'] = article['article_id'].upper()
news['source'] = "腾讯新闻"
yield scrapy.Request(url=news['content_url'], callback=self.parse_news_contents)
else:
# 是热点精选
if articles['code'] == 0:
articles = articles['data']
for article in articles:
news = HotspotCrawlerItem()
news['title'] = article['title']
news['publish_time'] = article['publish_time']
news['source'] = "腾讯新闻"
news['keywords'] = []
# 腾讯新闻中,tag比keywords包含了更多的信息,故选择tags中的内容作为本次的keyword
for each in article['tags'].split('[;|;]'):
news['keywords'].append(each)
news['content_url'] = article['vurl']
news['source'] = article['source']
news['newsId'] = article['id']
yield scrapy.Request(url=news['content_url'], callback=self.parse_news_contents)
else:
self.logger.error(msg="处理请求出错,原因:返回值非零")
return None
def parse_news_contents(self, response):
url = response.url
print("parsing url %s" % url)
if re.match(r"https://new.qq.com/\w+", string=url):
if re.match(r"https?://new.qq.com/notfound.htm\w+", string=url):
return None
if re.match(r"https://new.qq.com/omn/\w+/\w+.html", string=url):
news_id = url.split('/')[-1][:-5]
elif re.match(r"https://new.qq.com/zt/template/\?id=\w+", string=url):
news_id = url.split('/')[-1][4:]
else:
news_id = url.split('/')[-1]
# print("current news id: %s" % news_id)
yield scrapy.Request(
url="https://openapi.inews.qq.com/getQQNewsNormalContent?id={}&refer=mobilewwwqqcom&otype=json&ext_data=all&srcfrom=newsapp&callback=getNewsContentOnlyOutput".format(
news_id), callback=self.parse_news_api_json)
else:
# 不满足以上三条的链接应该不会是一条新闻了
return None
def parse_news_api_json(self, response):
# print(response.url)
content = json.loads(s=response.text, encoding="utf-8")
# 当且仅当返回码为0的时候,才继续解析
if content.get('ret') == 0:
news = HotspotCrawlerItem()
news['newsId'] = content.get('id')
news['title'] = content.get('title')
news['content_url'] = content.get('url')
news['media_url'] = {}
news['media_url']['img_url'] = []
news['media_url']['img_url'].append(content.get('img').get('imgurl'))
news['media_url']['video_url'] = []
cmt_id = content.get('cid')
news['hot_data'] = self.get_hot_statistics(cmt_id)
news['source'] = "腾讯新闻"
news['source_from'] = content.get('src')
news['publish_time'] = content.get('pubtime')
news['keywords'] = content.get('ext_data').get('ext').get('tags')
if isinstance(content.get("ext_data").get("cnt_attr"), dict):
for key, value in content.get("ext_data").get("cnt_attr").items():
if re.search("VIDEO", key.upper()):
if value.get(key) is not None:
vid = value.get(key).get("vid")
news['media_url']['video_url'].append("https://v.qq.com/x/page/" + vid)
for each in value.get(key).get("img"):
news['media_url']['img_url'].append(each.get("imgurl"))
if re.search("IMG", key.upper()):
if value.get(key) is not None:
for each in value.get(key).get("img"):
news['media_url']['img_url'].append(each.get("imgurl"))
content_news = content.get("ext_data").get("cnt_html")
news['content'] = self.del_html_labels(content_news)
news['abstract'] = content.get("ext_data").get("abstract") or content.get("ext_data").get("abstract_pad") or \
news['content'][:100]
news['abstract'] = re.sub(r'\u3000', repl="", string=news['abstract'])
return news
else:
self.logger.critical(msg="返回码为%d,api解析失败" % content.get('ret'))
return None
def get_hot_statistics(self, cmtId):
import requests
# http://coral.qq.com/article/4000875247/comment?commentid=0&reqnum=1&tag=
url = 'http://coral.qq.com/article/{}/comment?commentid=0&reqnum=1&tag='.format(cmtId)
req = requests.get(url=url, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
})
# print(req.url)
content = req.json()
if content.get('errCode') == 0 and content.get('data'):
comment_num = content.get('data').get('total') or int(
content.get('data').get('targetinfo').get('commentnum'))
participate_count = int(content.get('data').get('targetinfo').get('orgcommentnum')) or comment_num
return {
"comment_num": comment_num,
"participate_count": participate_count
}
else:
# print(content)
return {
"comment_num": str(content.get('errCode')) + ": 无法获取评论数据",
"participate_count": "无法获取参与人数数据",
}
def del_html_labels(self, html_text):
html_text = html_text.replace('\\', '')
html_text = re.sub(r'<!--H2-->\S+<!--/H2-->', repl="", string=html_text)
html_text = re.sub(r'\u3000', repl="", string=html_text)
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_text, "lxml")
return soup.text
| [
"kitezz@junyudeMacBook-Air.local"
] | kitezz@junyudeMacBook-Air.local |
49cdd13d7ed5a5dbe0c370db1b883a88b7635dd4 | 2e45ed5925c2f27e14606f4b1fced2f8924e65f2 | /test.py | 562a47799dfe7ac7918debd0c73a8290233fedd0 | [] | no_license | jenkins-cd-workshop/multiple-stages | c8c3b3560db935d26012a3b2a70ccb727b628eb5 | d6314e847ad5bff8728d38209c25720cb81f503b | refs/heads/master | 2021-01-22T21:54:16.705594 | 2017-05-29T14:00:56 | 2017-05-29T14:00:56 | 92,744,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from unittest import TestCase
class MyTest(TestCase):
def test_answer(self):
answer = 42
self.assertEqual(answer, 42);
| [
"noreply@github.com"
] | jenkins-cd-workshop.noreply@github.com |
2450f5bbe7e32581c7f7d407c655e8a243140def | f109a5d3c71cc591e9ccc6278b382d6ac27a8195 | /RK4/grafica2RK4.py | 71762aabbe0bdbec0c9cecbe946571161474da2f | [] | no_license | KarenChincoya/MetodosNumericos | c9bd648b1c546083e0bd2206f7f55bc411f7b92b | f57e7f7b02c81b1fc6f8ae7e2daa3a09c5438967 | refs/heads/master | 2020-04-09T17:43:12.526595 | 2018-12-05T17:02:18 | 2018-12-05T17:02:18 | 160,489,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | #requiered libraries
import numpy
import matplotlib.pyplot as plt
#Initial parameters
alpha = 0.1 # Prey growing up rate
beta = 0.02 # Predator successful rate
gamma = 0.3 # Predator decrease rate
delta = 0.01 # Pretator hunting successful rate && feeding rate, cuanto alimenta cazar una presa
#Function euler (Para las iteraciones)
def nextValue(u, dt, k1, k2,k3, k4):
suma1 = numpy.add(k1/6,2*k2, 2*k3+k4)
result = u + dt *(suma1) # y1 = y0 + f(x0,y0) * (x1-x0)
print(result)
return result
#Lotka-Volterra equations
def lotka_volterra_function(u):
x = u[0]
y = u[1]
return numpy.array([x*(alpha - beta*y), -y*(gamma - delta*x)])
#Tiempo (coordenadas en x)
T = 200.0 # Limite
dt = 0.25 # Incremento en el tiempo (coordenadas x)
N = int(T/dt) + 1 # Número de intervalos + 1
x0 = 40. # Número inicial de presas
y0 = 9. # Número inicial de depredadores
t0 = 0.
#Arreglo para almacenar la solución
solution = numpy.empty((N, 2))
#Valor inicial
solution[0] = numpy.array([x0, y0])
# use a for loop to call the function rk2_step()
for n in range(N-1):
k1 = lotka_volterra_function(solution[n])
k1e = ((k1[0]**2+k1[1]**2))
xk2 = solution[n][0]+dt/2
yk2 = solution[n][1]+ k1e *dt/2
k2 = lotka_volterra_function(numpy.array([xk2,yk2]))
k2e = ((k2[0]**2+k2[1]**2))
xk3 = solution[n][0]+dt/2
yk3 = solution[n][1] + k2e*dt/2
k3 = lotka_volterra_function(numpy.array([xk3,yk3]))
k3e = ((k3[0]**2+k3[1]**2))
xk4 = solution[n][0]+dt
yk4 = solution[n][1] + k3e*dt
k4 = lotka_volterra_function(numpy.array([xk4, yk4]))
solution[n+1] = nextValue(solution[n], dt, k1, k2, k3, k4)
time = numpy.linspace(0.0, T, N)
x_euler = numpy.empty(N)
y_euler = numpy.empty(N)
for i in range(N):
x_euler[i] = solution[i][0]
y_euler[i] = solution[i][1]
print(x_euler[i] ,",", y_euler[i])
plt.figure("Presas vs depredadores", figsize=(8,5))
plt.title("Población de presas en función de los depredadores")
plt.plot(solution[:, 0], solution[:, 1])# solution[:, 0], solution[:, 1]
plt.xlabel('presas')
plt.ylabel('depredadores')
plt.show() | [
"karenchincoya@gmail.com"
] | karenchincoya@gmail.com |
979a3054aaeebf98a39f6aee0faa382c5c86d234 | df704d25a8d2dd8e36043cbb153a70e9537b4c66 | /rating_use_query.py | d83f1682822c02d2a871441e3db8d864960ae9ac | [] | no_license | lethelimited/Graph-For-Movie | cd7eb254be82750660556b0b90de91cf47b057e2 | 4d7fc56426a4ffe3c1a0e654c29b8895fa88d79b | refs/heads/master | 2020-03-30T10:49:13.091134 | 2018-10-04T01:03:54 | 2018-10-04T01:03:54 | 151,137,961 | 0 | 0 | null | 2018-10-01T18:28:54 | 2018-10-01T18:28:54 | null | UTF-8 | Python | false | false | 1,789 | py | from py2neo import Graph, Relationship, NodeMatcher, Node
import sys
db = Graph("bolt://localhost:7687", password="123")
# tx = db.begin()
# tx.commit()
def ready_to_load():
# Replace "::" to ","
with open('/Users/Lim/Documents/DAnalyticsWorkspace/ml-10M100K/ratings.csv', 'r', encoding='UTF-8') as f:
data = f.readlines()
f.close()
for index, replaced_data in enumerate(data):
data[index] = replaced_data.replace("::", ",")
with open('/Users/Lim/Documents/DAnalyticsWorkspace/ml-10M100K/ratings.csv', 'w', encoding='UTF-8') as f:
f.writelines(data)
f.close()
def insert_tags():
# UserID::MovieID::Tag::Timestamp
with open('/Users/Lim/Documents/DAnalyticsWorkspace/ml-10M100K/tags.dat', 'r', encoding='UTF-8') as f:
text = f.readlines()
f.close()
matcher = NodeMatcher(db)
tx = db.begin()
for data in text:
split_data = data.split("::")
user_id = int(split_data[0])
movie_id = int(split_data[1])
tag = split_data[2]
user = matcher.match("User", id=user_id).first()
movie = matcher.match("Movie", id=movie_id).first()
if user and movie:
relationship = Relationship(user, "TAG", movie, tag=tag)
tx.create(relationship)
sys.stdout.write('{}\r' + str(user_id))
tx.commit()
def query_insert_rating():
# import/rating.csv
query = "USING PERIODIC COMMIT 10000 " \
"LOAD CSV WITH HEADERS FROM 'file:///ratings.csv' " \
"as line FIELDTERMINATOR ',' " \
"MATCH (m:Movie{id:toInteger(line.movie_id)}) " \
"MERGE (u:User{id:toInteger(line.user_id)}) " \
"MERGE (u)-[:RATES {rating:line.rating}]->(m)"
db.run(query)
insert_tags()
| [
"limed.kevin@gmail.com"
] | limed.kevin@gmail.com |
6e21c6403d218298734c0f324988cf29f74bc51e | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/equiLeader_20200827132135.py | 83e3ff7f1c7937960dd74336f7b723351b899079 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | def equi(A):
equi([]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
f564e673cf4088066560d3ab08d2ec371b8f5da5 | 0e93a805e17d7ab73b8e94c66c85500a0ef4e865 | /reddit/store/datastore.py | 64c03ff69896b50eae8ed35b92efa586a2180ecb | [] | no_license | charulagrl/reddit | 506222722ed2e6b88ab9a77485754e743ebf66ad | cd58b4343094da49e37379e05bce85de27cb2fbd | refs/heads/master | 2020-03-18T19:18:21.579766 | 2018-06-04T03:01:49 | 2018-06-04T03:01:49 | 135,146,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | # -*- coding: utf-8 -*-
from reddit.models.upvote import Upvote
from reddit.models.downvote import Downvote
class DataStore(object):
def __init__(self):
self.topics = {}
self.upvotes = Upvote()
self.downvotes = Downvote()
self.users = {}
self.current_user = None
def get_topic(self, topic_id):
topic = self.topics.get(topic_id, None)
if topic:
topic = topic.__dict__
topic["upvotes"] = self.upvotes.get_upvotes(topic['id'])
topic["downvotes"] = self.downvotes.get_downvotes(topic['id'])
return topic
def get_upvotes(self, topic_id):
upvotes = self.upvotes.get(topic_id, None)
return upvotes
def get_downvotes(self, topic_id):
downvotes = self.downvotes.get(topic_id, None)
return downvotes
def get_user(self, user_id):
users = self.users.get(user_id, None)
return users
def get_all_topics(self):
topics = [self.topics[topic].__dict__ for topic in self.topics.keys()]
for topic in topics:
topic["upvotes"] = self.upvotes.get_upvotes(topic['id'])
topic["downvotes"] = self.downvotes.get_downvotes(topic['id'])
return topics
| [
"charul.agrl@gmail.com"
] | charul.agrl@gmail.com |
c0cee13a06c45bab1b2225770ddb43da0ae3d15b | f21231e5dfe9f6240ea3d6da56b6ccfff6ef99ca | /python/Ball.py | d64bed074ae64bb6dfee323fefcb7b95c9f22c31 | [
"MIT"
] | permissive | anwar0101/Basketball-Game | d2454f17e668ad88dfac690b133a9ea89c4f2ea5 | f03eea17bca7ab23e12ad335eb07dfa27e637b8d | refs/heads/master | 2020-05-04T23:22:53.115531 | 2018-10-16T18:08:54 | 2018-10-16T18:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import pygame
import numpy as np
from scipy.integrate import ode
class Ball2D(pygame.sprite.Sprite):
def __init__(self, imgfile, radius, mass=1.0):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(imgfile)
self.image = pygame.transform.scale(self.image, (radius*2, radius*2))
self.state = [0, 0, 0, 0]
self.prev_state = [0, 0, 0, 0]
self.mass = mass
self.t = 0
self.radius = radius
self.friction = 0.0001
self.g = 9.8
self.solver = ode(self.f)
self.solver.set_integrator('dop853')
self.solver.set_f_params(self.friction, self.g)
self.solver.set_initial_value(self.state, self.t)
def f(self, t, state, arg1, arg2):
dx = state[2]
dy = state[3]
dvx = - state[2] * arg1
dvy = -arg2 - state[3]*arg1
dx += dvx
dy += dvy
return [dx, dy, dvx, dvy]
def set_pos(self, pos):
self.state[0:2] = pos
self.solver.set_initial_value(self.state, self.t)
return self
def set_vel(self, vel):
self.state[2:] = vel
self.solver.set_initial_value(self.state, self.t)
return self
def update(self, dt):
self.t += dt
self.prev_state = self.state
self.state = self.solver.integrate(self.t)
def move_by(self, delta):
self.prev_state = state
self.state[0:2] = np.add(self.pos, delta)
return self
def draw(self, surface):
rect = self.image.get_rect()
rect.center = (self.state[0], 640-self.state[1]) # Flipping y
surface.blit(self.image, rect) | [
"seantan@singtel.com"
] | seantan@singtel.com |
dc7d94b33b00888d12d6e212621095425ed0c9ec | 353d8d910513e64017a6c0f2a254eda845373730 | /cult/registration/migrations/0001_initial.py | 7eb96cc7419beb66ba45d1bd07603074c08af9f0 | [] | no_license | AshwiniS-ui/cultApp | bb41ee3ab9b9337da85f0b4e1313b7bbdba6cab2 | 44dce09bfd76dec28c3b972bb3d15fffa65fa35a | refs/heads/master | 2023-08-05T03:14:08.951536 | 2021-09-19T09:42:52 | 2021-09-19T09:42:52 | 408,081,563 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # Generated by Django 3.0.3 on 2020-02-22 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='registration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=45)),
('ph_no', models.CharField(max_length=35)),
('gmail', models.CharField(max_length=45)),
('password', models.CharField(max_length=25)),
],
),
]
| [
"ashwinis9632@gmail.com"
] | ashwinis9632@gmail.com |
ea5422ae5d22e6da43e48d0a14ad4febe6d563d7 | 661a8b8b4b7a0295c44268f2b3d5b208e472c093 | /scraper.py | c3e97dad8a39451394d0f69c0555987c35065f19 | [] | no_license | royerguerrero/La-Republica-Web-Scraper | 75be56a1e3551a156b80721733d9bbcefe4d9798 | 29d2e15a5be5388de2e4079880b87fe506ba8cee | refs/heads/main | 2023-01-31T10:57:10.038151 | 2020-12-16T17:49:52 | 2020-12-16T17:49:52 | 322,060,041 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,616 | py | import requests
import os
from datetime import datetime
from lxml import html
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//text-fill/a/@href'
XPATH_NEWS_TITLE = '//div[@class="mb-auto"]/text-fill/a/text()'
XPATH_NEWS_SUMMARY = '//div[@class="lead"]/p/text()'
XPATH_NEWS_CONTENT = '//div[@class="html-content"]/p/text()'
def parse_notice(link, date):
print(f'[!] Scraping {link[:70]}...')
try:
response = requests.get(link)
if response.status_code == 200:
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_NEWS_TITLE)[0]
title = title.replace('\"', '')
summary = parsed.xpath(XPATH_NEWS_SUMMARY)[0]
content = parsed.xpath(XPATH_NEWS_CONTENT)
except IndexError:
return
with open(f'{date}/{title}.text', 'w', encoding='utf-8') as f:
f.write(title)
f.write('\n')
f.write('--- ' * 10)
f.write('\n')
f.write(summary)
f.write('\n\n')
for p in content:
f.write(p)
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code} to visit {link}')
except ValueError as ve:
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
home = response.content.decode('utf-8')
parsed = html.fromstring(home)
links_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
today = datetime.now().strftime('%d-%m-%Y')
if not os.path.isdir(today):
os.mkdir(today)
for link in links_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
print("""
_ __ __ _____
| | / /__ / /_ / ___/______________ _____ ___ _____
| | /| / / _ \/ __ \ \__ \/ ___/ ___/ __ `/ __ \/ _ \/ ___/
| |/ |/ / __/ /_/ / ___/ / /__/ / / /_/ / /_/ / __/ /
|__/|__/\___/_.___/ /____/\___/_/ \__,_/ .___/\___/_/
/_/
Web Scraper to La Republica News - @RoyerGuerreroP
""")
parse_home()
if __name__ == '__main__':
run()
| [
"royjuni3431@gmail.com"
] | royjuni3431@gmail.com |
1b6cf0089d6fa968fd06bde42a43eded2f0981a2 | 6bea1b91c980c9e87a109019be3879e456d009ba | /hackerrank/hackerrank/pattern/pattern3.py | c992d70135019a2b64eb6f237ed45e7918086bf5 | [] | no_license | Bawya1098/Hackerrank | 631c886f4fcacbb28c637f3073b316059c579c58 | 7eb02f71a5c3c02115301288c9e71b0cfdd5eb3d | refs/heads/master | 2020-04-22T03:15:16.233413 | 2019-05-02T05:43:45 | 2019-05-02T05:43:45 | 170,079,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | maxLength = 5
height = int((maxLength + 1) / 2)
for row in range(1, height + 1):
stars = (maxLength - 2 * (row - 1))
for col in range(0, row - 1):
print(" ", end='')
for star in range(0, stars):
print("*", end='')
print("")
| [
"admin@Intern3-MacBook-Pro.local"
] | admin@Intern3-MacBook-Pro.local |
8198cb149c7daeff52703cb382cec19dd300f4fb | e9c4239c8064d882691314fd5b37208f10447173 | /leetcode/101-200题/149maxPointsOnALine.py | 2513ef40fbc59c951045392b37c3fe62af35e376 | [] | no_license | IronE-G-G/algorithm | 6f030dae6865b2f4ff4f6987b9aee06874a386c1 | 6f6d7928207534bc8fb6107fbb0d6866fb3a6e4a | refs/heads/master | 2020-09-21T03:02:20.908940 | 2020-03-22T15:19:41 | 2020-03-22T15:19:41 | 224,658,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | """
149 直线上的最多点数
给定一个二维平面,平面上有 n 个点,求最多有多少个点在同一条直线上。
示例 1:
输入: [[1,1],[2,2],[3,3]]
输出: 3
解释:
^
|
| o
| o
| o
+------------->
0 1 2 3 4
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/max-points-on-a-line
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution(object):
def maxPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
以每个点为基准,算过这个点的线里面过点最多的线,以斜率的分子分母的最大公约数连成子串为key,(0,-inf,普通)
"""
if len(points) < 3:
return len(points)
res = 0
N = len(points)
for i in range(N):
hashMap = dict()
duplicates = 0
maxsize = 0
for j in range(i + 1, N):
x = points[j][0] - points[i][0]
y = points[j][1] - points[i][1]
if x == 0 and y == 0:
duplicates += 1
continue
if x == 0:
key = '-inf'
elif y == 0:
key = '0'
else:
c = self.gcd(x, y)
x = x // c
y = y // c
key = '%d@%d' % (x, y)
if key not in hashMap:
hashMap[key] = 1
else:
hashMap[key] += 1
maxsize = max(maxsize, hashMap[key])
res = max(res, maxsize + duplicates + 1)
return res
def gcd(self, a, b):
while b != 0:
tmp = a % b
a = b
b = tmp
return a
| [
"linjh95@163.com"
] | linjh95@163.com |
449683a2ea90d6f3f95d067503a36be2a684bfb9 | 8a75eca2c5ced9c9700a87592dfd5f103217fb73 | /mysite/settings.py | d9f0f14584f7231771278f1501f26ef251d87d2e | [] | no_license | myerkes/polls | f463ab3f91ca4790a501346acda1b889fc0c3fb5 | 9a49fdff914bbe9ebb2a1a6984456c9140e83f16 | refs/heads/main | 2023-06-12T05:00:05.878186 | 2021-06-29T18:32:22 | 2021-06-29T18:32:22 | 371,468,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-2#1+8h)msyktfka0e)_&jy3%u@+c%#zqggrs0(a6(3brkxi!%4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"mattyerkes23@gmail.com"
] | mattyerkes23@gmail.com |
bfdfbc285c7836f84dfbcfd36e94844e3b963418 | 8da26dd597e7c2dc9ca4b337aac1c77137a3f20a | /fixture/contact.py | 4a16e89fcb83d61bef88e159403d4ac6f6606de7 | [
"Apache-2.0"
] | permissive | spirit-87/python_training | d4432a8063a5cee13208b97e61134b885c2d0047 | f2e2389ba4e96139d666365abecf16a2db89cd6e | refs/heads/master | 2021-01-30T16:40:16.412104 | 2020-04-22T10:03:18 | 2020-04-22T10:03:18 | 243,503,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,097 | py | from selenium.webdriver.support.ui import Select
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_newcontact_page(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def create(self, contact):
wd = self.app.wd
self.open_newcontact_page()
# fill in contact form
self.change_contact_info(contact)
# submit contact creation
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_home_page()
self.contact_cashe = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
self.contact_cashe = None
def delete_contact_by_index(self, index):
wd = self.app.wd
# select first contact = click first checkbox
self.open_contacts_page()
self.select_contact_by_index(index)
# submit contact deletion
wd.find_element_by_css_selector("input[value='Delete']").click()
# accept dialog window
wd.switch_to_alert().accept()
self.app.return_to_home_page()
self.contact_cashe = None
def delete_contact_by_id(self, id):
wd = self.app.wd
# select first contact = click first checkbox
self.open_contacts_page()
self.select_contact_by_id(id)
# submit contact deletion
wd.find_element_by_css_selector("input[value='Delete']").click()
# accept dialog window
wd.switch_to_alert().accept()
self.app.return_to_home_page()
self.contact_cashe = None
def open_contacts_page(self):
wd = self.app.wd
# select first group = click first checkbox
if not wd.current_url.endswith("/index.php") > 0:
wd.find_element_by_link_text("home").click()
def select_first_contact(self):
self.select_contact_by_index(0)
def select_contact_by_index(self, index):
wd = self.app.wd
# select first contact = click first checkbox
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
# select first contact = click first checkbox
wd.find_element_by_css_selector("input[value='%s']" % id ).click()
def change_contact_info(self, contact):
wd = self.app.wd
# fill in names of new contact
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
# fill in job information of new contact
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
# fill in phones of new contact
self.change_field_value("home", contact.phone_home)
self.change_field_value("mobile", contact.phone_mobile)
self.change_field_value("work", contact.phone_work)
self.change_field_value("fax", contact.phone_fax)
# fill in emails of new contact
self.change_field_value("email", contact.email1)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
# fill in webpage of new contact
self.change_field_value("homepage", contact.webpage)
# fill in birth dates of new contact
self.change_date_value("bday", contact.bday)
self.change_date_value("bmonth", contact.bmonth)
self.change_field_value("byear", contact.byear)
# fill in anniversary dates of new contact
self.change_date_value("aday", contact.aday)
self.change_date_value("amonth", contact.amonth)
self.change_field_value("ayear", contact.ayear)
# fill in secondary info of new contact
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes2)
self.contact_cashe = None
def edit_first_contact(self, new_contact_data):
self.edit_contact_by_index(0)
def edit_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.select_contact_edit_by_index(index)
# edit contact
self.change_contact_info(new_contact_data)
# submit contact edition
wd.find_element_by_xpath("//input[@value='Update']").click()
self.app.return_to_home_page()
self.contact_cashe = None
def edit_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.select_contact_edit_by_id(id)
# edit contact
self.change_contact_info(new_contact_data)
# submit contact edition
wd.find_element_by_xpath("//input[@value='Update']").click()
self.app.return_to_home_page()
self.contact_cashe = None
def select_first_contact_edit(self):
self.select_contact_by_index(0)
def select_contact_edit_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
# init contact edition of first edit link
wd.find_elements_by_css_selector("img[src='icons/pencil.png']")[index].click()
def select_contact_edit_by_id(self, id):
wd = self.app.wd
self.app.return_to_home_page()
# init contact edition of first edit link
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % id).click()
def select_contact_view_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
# init contact view of first edit link
wd.find_elements_by_css_selector("img[src='icons/status_online.png']")[index].click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def change_date_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
Select(wd.find_element_by_name(field_name)).select_by_visible_text(text)
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cashe = None
def get_contact_list(self):
if self.contact_cashe is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cashe = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
lastname = cells[1].text
firstname = cells[2].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cashe.append(Contact(firstname=self.clear_extra_spaces(firstname), lastname=self.clear_extra_spaces(lastname), id=id,
address=self.clear_extra_spaces(address),
all_phones_from_home_page = all_phones,all_emails_from_home_page = all_emails))
# mine decision
# inputs = wd.find_elements_by_css_selector("#maintable .center input")
# first_names = wd.find_elements_by_css_selector("#maintable td:nth-child(3)")
# last_names = wd.find_elements_by_css_selector("#maintable td:nth-child(2)")
# for i in range(0, len(inputs)):
# id = inputs[i].get_attribute("value")
# first_name = first_names[i].text
# last_name = last_names[i].text
# self.contact_cashe.append(Contact(firstname=first_name, lastname=last_name, id=id))
return list(self.contact_cashe)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.select_contact_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
phone_home = wd.find_element_by_name("home").get_attribute("value")
phone_mobile = wd.find_element_by_name("mobile").get_attribute("value")
phone_work = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
address = wd.find_element_by_name("address").text
contact_edit = Contact(firstname=firstname, lastname=lastname, id=id, phone_home=phone_home, phone_mobile=phone_mobile,
phone_work=phone_work, phone2=phone2, email1 = email1, email2 = email2, email3 = email3, address = address)
contact_edit.all_phones_from_home_page = self.merge_phones_like_on_home_page(contact_edit)
contact_edit.all_emails_from_home_page = self.merge_emails_like_on_home_page(contact_edit)
return contact_edit
def get_contact_info_from_view_page(self, index):
wd = self.app.wd
self.select_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
phone_home = re.search("H: (.*)", text).group(1)
phone_mobile = re.search("M: (.*)", text).group(1)
phone_work = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(phone_home=phone_home, phone_mobile=phone_mobile,
phone_work=phone_work, phone2=phone2)
def clear_extra_spaces(self, s):
return re.sub(" ", " ", s.strip())
def clear(self, s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(self, contact):
#filter - удаляем элементы None, map - чистим контакты от лишних символов, filter - выбираем только не пустые значения
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear(x),
filter(lambda x: x is not None,
[contact.phone_home, contact.phone_mobile, contact.phone_work, contact.phone2]))))
def merge_emails_like_on_home_page(self, contact):
#filter - удаляем элементы None, map - чистим контакты от лишних символов, filter - выбираем только не пустые значения
return "\n".join(filter(lambda x: x != "",
map(lambda x:self.clear(x),
filter(lambda x: x is not None,
[contact.email1, contact.email2, contact.email3]))))
def add_contact_to_group(self, contact, group):
wd = self.app.wd
self.select_contact_by_id(contact.id)
wd.find_element_by_name("to_group").click()
Select(wd.find_element_by_name("to_group")).select_by_value(group.id)
wd.find_element_by_name("add").click()
self.app.return_to_home_page()
def remove_contact_from_group(self, contact, group):
wd = self.app.wd
wd.find_element_by_name("group").click()
Select(wd.find_element_by_name("group")).select_by_value(group.id)
wd.find_element_by_id(contact.id).click()
wd.find_element_by_name("remove").click()
self.app.return_to_home_page()
| [
"spirit_87@list.ru"
] | spirit_87@list.ru |
1fca69c251833c7b43d945f04d92a07733cdab58 | c7b941b50f04d526f45b05515f41001298a68b11 | /bicycleparameters/rider.py | eadb40254cad07c41da6584886c3978510b709df | [
"BSD-2-Clause"
] | permissive | helloxss/BicycleParameters | cbb26ae833d44c168c00b36485fdec2796936cc6 | 424056406c662c7ace2feef02f59e8babe4cd5d2 | refs/heads/master | 2020-12-25T12:57:21.665589 | 2014-10-10T02:37:43 | 2014-10-10T02:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,346 | py | #!/usr/bin/env python
import os
import numpy as np
from numpy import sin, cos, sqrt
from scipy.optimize import fsolve
import yeadon
from io import remove_uncertainties
from inertia import combine_bike_rider
def yeadon_vec_to_bicycle_vec(vector, measured_bicycle_par,
benchmark_bicycle_par):
"""
Parameters
----------
vector : np.matrix, shape(3, 1)
A vector from the Yeadon origin to a point expressed in the Yeadon
reference frame.
measured_bicycle_par : dictionary
The raw bicycle measurements.
benchmark_bicycle_par : dictionary
The Meijaard 2007 et. al parameters for this bicycle.
Returns
-------
vector_wrt_bike : np.matrix, shape(3, 1)
The vector from the bicycle origin to the same point above expressed
in the bicycle reference frame.
"""
# This is the rotation matrix that relates Yeadon's reference frame
# to the bicycle reference frame.
# vector_expressed_in_bike = rot_mat * vector_expressed_in_yeadon)
rot_mat = np.matrix([[0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0]])
# The relevant bicycle parameters:
measuredPar = remove_uncertainties(measured_bicycle_par)
benchmarkPar = remove_uncertainties(benchmark_bicycle_par)
# bottom bracket height
hbb = measuredPar['hbb']
# chain stay length
lcs = measuredPar['lcs']
# rear wheel radius
rR = benchmarkPar['rR']
# seat post length
lsp = measuredPar['lsp']
# seat tube length
lst = measuredPar['lst']
# seat tube angle
lambdast = measuredPar['lamst']
# bicycle origin to yeadon origin expressed in bicycle frame
yeadon_origin_in_bike_frame = \
np.matrix([[np.sqrt(lcs**2 - (-hbb + rR)**2) + (-lsp - lst) * np.cos(lambdast)], # bx
[0.0],
[-hbb + (-lsp - lst) * np.sin(lambdast)]]) # bz
vector_wrt_bike = yeadon_origin_in_bike_frame + rot_mat * vector
return vector_wrt_bike
def configure_rider(pathToRider, bicycle, bicyclePar, measuredPar, draw):
"""
Returns the rider parameters, bicycle paramaters with a rider and a
human object that is configured to sit on the bicycle.
Parameters
----------
pathToRider : string
Path to the rider's data folder.
bicycle : string
The short name of the bicycle.
bicyclePar : dictionary
Contains the benchmark bicycle parameters for a bicycle.
measuredPar : dictionary
Contains the measured values of the bicycle.
draw : boolean, optional
If true, visual python will be used to draw a three dimensional
image of the rider.
Returns
-------
riderpar : dictionary
The inertial parameters of the rider with reference to the
benchmark coordinate system.
human : yeadon.human
The human object that represents the rider seated on the
bicycle.
bicycleRiderPar : dictionary
The benchmark parameters of the bicycle with the rider added to
the rear frame.
"""
try:
# get the rider name
rider = os.path.split(pathToRider)[1]
# get the paths to the yeadon data files
pathToYeadon = os.path.join(pathToRider, 'RawData',
rider + 'YeadonMeas.txt')
pathToCFG = os.path.join(pathToRider, 'RawData',
rider + bicycle + 'YeadonCFG.txt')
# generate the human that has been configured to sit on the bicycle
# the human's inertial parameters are expressed in the Yeadon
# reference frame about the Yeadon origin.
human = rider_on_bike(bicyclePar, measuredPar,
pathToYeadon, pathToCFG, draw)
# This is the rotation matrix that relates Yeadon's reference frame
# to the bicycle reference frame.
rot_mat = np.array([[0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0]])
# This is the human's inertia expressed in the bicycle reference
# frame about the human's center of mass.
human_inertia_in_bike_frame = \
human.inertia_transformed(rotmat=rot_mat)
human_com_in_bike_frame = \
yeadon_vec_to_bicycle_vec(human.center_of_mass, measuredPar,
bicyclePar)
# build a dictionary to store the inertial data
riderPar = {'IBxx': human_inertia_in_bike_frame[0, 0],
'IByy': human_inertia_in_bike_frame[1, 1],
'IBzz': human_inertia_in_bike_frame[2, 2],
'IBxz': human_inertia_in_bike_frame[2, 0],
'mB': human.mass,
'xB': human_com_in_bike_frame[0, 0],
'yB': human_com_in_bike_frame[1, 0],
'zB': human_com_in_bike_frame[2, 0]}
except: # except if this fails
# no rider was added
print('Calculations in yeadon failed. No rider added.')
# raise the error that caused things to fail
raise
else:
bicycleRiderPar = combine_bike_rider(bicyclePar, riderPar)
return riderPar, human, bicycleRiderPar
def rider_on_bike(benchmarkPar, measuredPar, yeadonMeas, yeadonCFG,
drawrider):
"""
Returns a yeadon human configured to sit on a bicycle.
Parameters
----------
benchmarkPar : dictionary
A dictionary containing the benchmark bicycle parameters.
measuredPar : dictionary
A dictionary containing the raw geometric measurements of the bicycle.
yeadonMeas : str
Path to a text file that holds the 95 yeadon measurements. See
`yeadon documentation`_.
yeadonCFG : str
Path to a text file that holds configuration variables. See `yeadon
documentation`_. As of now, only 'somersalt' angle can be set as an
input. The remaining variables are either zero or calculated in this
method.
drawrider : bool
Switch to draw the rider, with vectors pointing to the desired
position of the hands and feet of the rider (at the handles and
bottom bracket). Requires python-visual.
Returns
-------
human : yeadon.Human
Human object is returned with an updated configuration.
The dictionary, taken from H.CFG, has the following key's values
updated:
'PJ1extension'
'J1J2flexion'
'CA1extension'
'CA1adduction'
'CA1rotation'
'A1A2extension'
'somersault'
'PK1extension'
'K1K2flexion'
'CB1extension'
'CB1abduction'
'CB1rotation'
'B1B2extension'
Notes
-----
Requires that the bike object has a raw data text input file that contains
the measurements necessary to situate a rider on the bike (i.e.
``<pathToData>/bicycles/<short name>/RawData/<short name>Measurements.txt``).
.. _yeadon documentation : http://packages.python.org/yeadon
"""
# create human using input measurements and configuration files
human = yeadon.Human(yeadonMeas, yeadonCFG)
# The relevant human measurments:
L_j3L = human.meas['Lj3L']
L_j5L = human.meas['Lj5L']
L_j6L = human.meas['Lj6L']
L_s4L = human.meas['Ls4L']
L_s4w = human.meas['Ls4w']
L_a2L = human.meas['La2L']
L_a4L = human.meas['La4L']
L_a5L = human.meas['La5L']
somersault = human.CFG['somersault']
# The relevant bicycle parameters:
measuredPar = remove_uncertainties(measuredPar)
benchmarkPar = remove_uncertainties(benchmarkPar)
# bottom bracket height
h_bb = measuredPar['hbb']
# chain stay length
l_cs = measuredPar['lcs']
# rear wheel radius
r_R = benchmarkPar['rR']
# front wheel radius
r_F = benchmarkPar['rF']
# seat post length
l_sp = measuredPar['lsp']
# seat tube length
l_st = measuredPar['lst']
# seat tube angle
lambda_st = measuredPar['lamst']
# handlebar width
w_hb = measuredPar['whb']
# distance from rear wheel hub to hand
L_hbR = measuredPar['LhbR']
# distance from front wheel hub to hand
L_hbF = measuredPar['LhbF']
# wheelbase
w = benchmarkPar['w']
def zero(unknowns):
"""For the derivation of these equations see:
http://nbviewer.ipython.org/github/chrisdembia/yeadon/blob/v1.2.0/examples/bicyclerider/bicycle_example.ipynb
"""
PJ1extension = unknowns[0]
J1J2flexion = unknowns[1]
CA1extension = unknowns[2]
CA1adduction = unknowns[3]
CA1rotation = unknowns[4]
A1A2extension = unknowns[5]
alpha_y = unknowns[6]
alpha_z = unknowns[7]
beta_y = unknowns[8]
beta_z = unknowns[9]
phi_J1 = PJ1extension
phi_J2 = J1J2flexion
phi_A1 = CA1extension
theta_A1 = CA1adduction
psi_A = CA1rotation
phi_A2 = A1A2extension
phi_P = somersault
zero = np.zeros(10)
zero[0] = (L_j3L*(-sin(phi_J1)*cos(phi_P) - sin(phi_P)*cos(phi_J1))
+ (-l_sp - l_st)*cos(lambda_st) + (-(-sin(phi_J1)*
sin(phi_P) + cos(phi_J1)*cos(phi_P))*sin(phi_J2) +
(-sin(phi_J1)*cos(phi_P) - sin(phi_P)*cos(phi_J1))*
cos(phi_J2))*(-L_j3L + L_j5L + L_j6L))
zero[1] = (L_j3L*(-sin(phi_J1)*sin(phi_P) + cos(phi_J1)*cos(phi_P))
+ (-l_sp - l_st)*sin(lambda_st) + ((-sin(phi_J1)*
sin(phi_P) + cos(phi_J1)*cos(phi_P))*cos(phi_J2) -
(sin(phi_J1)*cos(phi_P) + sin(phi_P)*cos(phi_J1))*
sin(phi_J2))*(-L_j3L + L_j5L + L_j6L))
zero[2] = -L_hbF + sqrt(alpha_y**2 + alpha_z**2 + 0.25*w_hb**2)
zero[3] = -L_hbR + sqrt(beta_y**2 + beta_z**2 + 0.25*w_hb**2)
zero[4] = alpha_y - beta_y - w
zero[5] = alpha_z - beta_z + r_F - r_R
zero[6] = (-L_a2L*sin(theta_A1) + L_s4w/2 - 0.5*w_hb + (sin(phi_A2)*
sin(psi_A)*cos(theta_A1) + sin(theta_A1)*cos(phi_A2))*
(L_a2L - L_a4L - L_a5L))
zero[7] = (-L_a2L*(-sin(phi_A1)*cos(phi_P)*cos(theta_A1) -
sin(phi_P)*cos(phi_A1)*cos(theta_A1)) - L_s4L*sin(phi_P)
- beta_y - sqrt(l_cs**2 - (-h_bb + r_R)**2) - (-l_sp -
l_st)*cos(lambda_st) + (-(-(sin(phi_A1)*cos(psi_A) +
sin(psi_A)*sin(theta_A1)*cos(phi_A1))*sin(phi_P) +
(-sin(phi_A1)*sin(psi_A)*sin(theta_A1) + cos(phi_A1)*
cos(psi_A))*cos(phi_P))*sin(phi_A2) + (-sin(phi_A1)*
cos(phi_P)*cos(theta_A1) - sin(phi_P)*cos(phi_A1)*
cos(theta_A1))*cos(phi_A2))*(L_a2L - L_a4L - L_a5L))
zero[8] = (-L_a2L*(-sin(phi_A1)*sin(phi_P)*cos(theta_A1) +
cos(phi_A1)*cos(phi_P)*cos(theta_A1)) + L_s4L*cos(phi_P)
- beta_z + h_bb - r_R - (-l_sp - l_st)*sin(lambda_st) +
(-((sin(phi_A1)*cos(psi_A) + sin(psi_A)*sin(theta_A1)*
cos(phi_A1))*cos(phi_P) + (-sin(phi_A1)*sin(psi_A)*
sin(theta_A1) + cos(phi_A1)*cos(psi_A))*sin(phi_P))*
sin(phi_A2) + (-sin(phi_A1)*sin(phi_P)*cos(theta_A1) +
cos(phi_A1)*cos(phi_P)*cos(theta_A1))*cos(phi_A2))*(L_a2L
- L_a4L - L_a5L))
zero[9] = ((sin(phi_A1)*sin(psi_A) - sin(theta_A1)*cos(phi_A1)*
cos(psi_A))*cos(phi_P) + (sin(phi_A1)*sin(theta_A1)*
cos(psi_A) + sin(psi_A)*cos(phi_A1))*sin(phi_P))
return zero
g_PJ1extension = -np.deg2rad(90.0)
g_J1J2flexion = np.deg2rad(75.0)
g_CA1extension = -np.deg2rad(15.0)
g_CA1adduction = np.deg2rad(2.0)
g_CA1rotation = np.deg2rad(2.0)
g_A1A2extension = -np.deg2rad(40.0)
g_alpha_y = L_hbF * np.cos(np.deg2rad(45.0))
g_alpha_z = L_hbF * np.sin(np.deg2rad(45.0))
g_beta_y = -L_hbR * np.cos(np.deg2rad(30.0))
g_beta_z = L_hbR * np.sin(np.deg2rad(30.0))
guess = [g_PJ1extension, g_J1J2flexion, g_CA1extension, g_CA1adduction,
g_CA1rotation, g_A1A2extension, g_alpha_y, g_alpha_z, g_beta_y,
g_beta_z]
solution = fsolve(zero, guess)
cfg_dict = human.CFG.copy()
cfg_dict['PJ1extension'] = solution[0]
cfg_dict['J1J2flexion'] = solution[1]
cfg_dict['CA1extension'] = solution[2]
cfg_dict['CA1adduction'] = solution[3]
cfg_dict['CA1rotation'] = solution[4]
cfg_dict['A1A2extension'] = solution[5]
cfg_dict['somersault'] = somersault
cfg_dict['PK1extension'] = cfg_dict['PJ1extension']
cfg_dict['K1K2flexion'] = cfg_dict['J1J2flexion']
cfg_dict['CB1extension'] = cfg_dict['CA1extension']
cfg_dict['CB1abduction'] = -cfg_dict['CA1adduction']
cfg_dict['CB1rotation'] = -cfg_dict['CA1rotation']
cfg_dict['B1B2extension'] = cfg_dict['A1A2extension']
# assign configuration to human and check that the solution worked
human.set_CFG_dict(cfg_dict)
# draw rider for fun, but possibly to check results aren't crazy
if drawrider:
human.draw()
return human
| [
"moorepants@gmail.com"
] | moorepants@gmail.com |
28dc472c1553c59b56d20300f3625cc15980d1fd | f0cbc90651160b6eb34d55564f7ffb42bf835530 | /NNCancellation.py | dae249d04d289d46d5b104741ed2a1de107bd33c | [] | no_license | ChaoRong-Zhang/fdnn | 95dfad55a0b98bad8e85cee3660a2396faef507d | 7b880c27faa781d551025f31a3a61f42c8048b15 | refs/heads/master | 2022-04-05T13:30:59.356655 | 2020-01-03T19:46:29 | 2020-01-03T19:46:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | import scipy.io
from scipy.signal import savgol_filter
import numpy as np
import fullduplex as fd
from keras.models import Model, Sequential
from keras.layers import Dense, Input, SimpleRNN, Dropout
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import os
# This line disables the use of the GPU for training. The dataset is not large enough to get
# significant gains from GPU training and, in fact, sometimes training can even be slower on
# the GPU than on the CPU. Comment out to enable GPU use.
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Define system parameters
params = {
'samplingFreqMHz': 20, # Sampling frequency, required for correct scaling of PSD
'hSILen': 13, # Self-interference channel length
'pamaxordercanc': 7, # Maximum PA non-linearity order
'trainingRatio': 0.9, # Ratio of total samples to use for training
'dataOffset': 14, # Data offset to take transmitter-receiver misalignment into account
'nHidden': 17, # Number of hidden layers in NN
'nEpochs': 20, # Number of training epochs for NN training
'learningRate': 0.004, # Learning rate for NN training
'batchSize': 32, # Batch size for NN training
}
##### Load and prepare data #####
x, y, noise, measuredNoisePower = fd.loadData('data/fdTestbedData20MHz10dBm', params)
# Get self-interference channel length
chanLen = params['hSILen']
# Create feedforward NN using Keras
nHidden = params['nHidden']
nEpochs = params['nEpochs']
input = Input(shape=(2*chanLen,))
hidden1 = Dense(nHidden, activation='relu')(input)
output1 = Dense(1, activation='linear')(hidden1)
output2 = Dense(1, activation='linear')(hidden1)
model = Model(inputs=input, outputs=[output1, output2])
adam = Adam(lr=params['learningRate'])
model.compile(loss = "mse", optimizer = adam)
print("Total number of real parameters to estimate for neural network based canceller: {:d}".format((2*chanLen+1)*nHidden + 2*(nHidden+1)+2*chanLen))
# Split into training and test sets
trainingSamples = int(np.floor(x.size*params['trainingRatio']))
x_train = x[0:trainingSamples]
y_train = y[0:trainingSamples]
x_test = x[trainingSamples:]
y_test = y[trainingSamples:]
##### Training #####
# Step 1: Estimate linear cancellation arameters and perform linear cancellation
hLin = fd.SIestimationLinear(x_train, y_train, params)
yCanc = fd.SIcancellationLinear(x_train, hLin, params)
# Normalize data for NN
yOrig = y_train
y_train = y_train - yCanc
yVar = np.var(y_train)
y_train = y_train/np.sqrt(yVar)
# Prepare training data for NN
x_train_real = np.reshape(np.array([x_train[i:i+chanLen].real for i in range(x_train.size-chanLen)]), (x_train.size-chanLen, chanLen))
x_train_imag = np.reshape(np.array([x_train[i:i+chanLen].imag for i in range(x_train.size-chanLen)]), (x_train.size-chanLen, chanLen))
x_train = np.zeros((x_train.size-chanLen, 2*chanLen))
x_train[:,0:chanLen] = x_train_real
x_train[:,chanLen:2*chanLen] = x_train_imag
y_train = np.reshape(y_train[chanLen:], (y_train.size-chanLen, 1))
# Prepare test data for NN
yCanc = fd.SIcancellationLinear(x_test, hLin, params)
yOrig = y_test
y_test = y_test - yCanc
y_test = y_test/np.sqrt(yVar)
x_test_real = np.reshape(np.array([x_test[i:i+chanLen].real for i in range(x_test.size-chanLen)]), (x_test.size-chanLen, chanLen))
x_test_imag = np.reshape(np.array([x_test[i:i+chanLen].imag for i in range(x_test.size-chanLen)]), (x_test.size-chanLen, chanLen))
x_test = np.zeros((x_test.size-chanLen, 2*chanLen))
x_test[:,0:chanLen] = x_test_real
x_test[:,chanLen:2*chanLen] = x_test_imag
y_test = np.reshape(y_test[chanLen:], (y_test.size-chanLen, 1))
##### Training #####
# Step 2: train NN to do non-linear cancellation
history = model.fit(x_train, [y_train.real, y_train.imag], epochs = nEpochs, batch_size = params['batchSize'], verbose=2, validation_data= (x_test, [y_test.real, y_test.imag]))
##### Test #####
# Do inference step
pred = model.predict(x_test)
yCancNonLin = np.squeeze(pred[0] + 1j*pred[1], axis=1)
##### Evaluation #####
# Get correctly shaped test and cancellation data
y_test = yOrig[chanLen:]
yCanc = yCanc[chanLen:]
# Calculate various signal powers
noisePower = 10*np.log10(np.mean(np.abs(noise)**2))
scalingConst = np.power(10,-(measuredNoisePower-noisePower)/10)
noise /= np.sqrt(scalingConst)
y_test /= np.sqrt(scalingConst)
yCanc /= np.sqrt(scalingConst)
yCancNonLin /= np.sqrt(scalingConst)
# Plot PSD and get signal powers
noisePower, yTestPower, yTestLinCancPower, yTestNonLinCancPower = fd.plotPSD(y_test, yCanc, yCancNonLin, noise, params, 'NN', yVar)
# Print cancellation performance
print('')
print('The linear SI cancellation is: {:.2f} dB'.format(yTestPower-yTestLinCancPower))
print('The non-linear SI cancellation is: {:.2f} dB'.format(yTestLinCancPower-yTestNonLinCancPower))
print('The noise floor is: {:.2f} dBm'.format(noisePower))
print('The distance from noise floor is: {:.2f} dB'.format(yTestNonLinCancPower-noisePower))
# Plot learning curve
plt.plot(np.arange(1,len(history.history['loss'])+1), -10*np.log10(history.history['loss']), 'bo-')
plt.plot(np.arange(1,len(history.history['loss'])+1), -10*np.log10(history.history['val_loss']), 'ro-')
plt.ylabel('Self-Interference Cancellation (dB)')
plt.xlabel('Training Epoch')
plt.legend(['Training Frame', 'Test Frame'], loc='lower right')
plt.grid(which='major', alpha=0.25)
plt.xlim([ 0, nEpochs+1 ])
plt.xticks(range(1,nEpochs,2))
plt.savefig('figures/NNconv.pdf', bbox_inches='tight')
plt.show()
| [
"a.k.balatsoukas.stimming@tue.nl"
] | a.k.balatsoukas.stimming@tue.nl |
d06b847172ee459aa81f54ed944958591033cca8 | a45c581547c676e0d82e40ed0879f77ab9ba6d40 | /KeyPressModule.py | 3d299d93061d00810e978cf1b72cb31379752db1 | [] | no_license | saidzalehan/AIR-DRONE | b975b03aa221f9cc9458c1828362ac583fc6e44b | a3fbd6f050000a28a2007f7fcf374922d8432359 | refs/heads/main | 2023-06-03T03:59:24.656213 | 2021-06-29T07:23:54 | 2021-06-29T07:23:54 | 381,253,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import pygame # The libraries that need to install to run this program
def init():
pygame.init()
pygame.display.set_mode((200, 200))
pygame.display.set_caption('Emergency Button')
def getKey(keyname):
ans = False
for _ in pygame.event.get():
pass
keyinput = pygame.key.get_pressed()
mykey = getattr(pygame, 'K_{}'.format(keyname))
# print('K_{}'.format(keyname))
if keyinput[mykey]:
ans = True
pygame.display.update()
return ans
def main():
if getKey("LEFT"):
print("Left key pressed")
if getKey("RIGHT"):
print("Right key Pressed")
if __name__ == '__main__':
init()
while True:
main()
| [
"noreply@github.com"
] | saidzalehan.noreply@github.com |
290790ec3f76145d981ed8c61a532b3b99fedf6f | aaa2b1fde3025bd882081cae68c0fba67de45da5 | /VSB_Partial_Discharge_Detection_Due_March_2018/source/dataexplorer.py | c443e34f690dc888db1c6c00485d0fb7efb3112d | [] | no_license | mnight08/MachineLearningProjects | e8827cf17143dd19517a146282fbae6a78e0c1a6 | b45ce50a2b908951096f5d304e64fe2d7ae29daa | refs/heads/master | 2023-06-16T13:02:48.757398 | 2021-07-10T06:13:26 | 2021-07-10T06:13:26 | 384,750,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,513 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 18 16:14:17 2018
This file will take a data frame and create various visualizations of the
variables and combinations.
@author: vpx365
"""
from datamanager import DataManager
import matplotlib as plt
from scipy import signal
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import functools
class DataExplorer():
'''
Create various plots of the signals, and signal features.
'''
def __init__(self, dm):
self.dm=dm
def plot_spectrogram(self, ids):
''''''
f, t, Sxx = signal.spectrogram(self.dm.train.iloc[:,ids], 800000/20*1000, mode ='magnitude')
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()
def plot_periodogram(self, ids):
f, Pxx_den = signal.periodogram(self.dm.train.iloc[:,ids], 800000/20*1000)
def plot_signal(self, ids):
x = np.linspace(0,1/50,800000)
if isinstance(ids, int):
ids=[ids]
#Check if
self.dm.load_missing_signals(ids)
state = self.dm.train_meta.loc[self.dm.train_meta['signal_id'].isin(ids),'target'].sum()
plt.plot(x,self.dm.train.loc[:,[str(id) for id in ids]])
plt.ylabel('Voltage')
plt.xlabel('Time [sec]')
plt.title("Signals: "+str(ids)+ " Num Failures: "+ str(state))
plt.show()
def plot_signal_pair(self, pairs):
x_id,y_id=pairs
self.dm.load_missing_signals([x_id,y_id])
x=self.dm.train.loc[:,str(x_id)]
y=self.dm.train.loc[:,str(y_id)]
plt.ylabel('Signal '+ str(x_id))
plt.xlabel('Signal '+ str(y_id))
plt.title("Signal Pair: "+str(x_id)+ " "+ str(y_id))
plt.plot(x,y)
plt.show()
def get_index_signals(self,state=0):
'''Reutrn the columns of signals that are experiencing the state: 0 for good, 1 for partial discharge.'''
return [str(id) for id in self.dm.train_meta.groupby('target').groups[state]]
def get_index_triple(self,state=0):
''''Return the list of integer ids of triples that are experiencing the given state. 0, for good,
1 for one bad line, 2, 3, etc.'''
return [id for id in self.dm.train_meta.groupby('target').groups[state]]
def plot_triple(self, ids):
if isinstance(ids, int):
ids=[ids]
for id in ids:
self.plot_signal([3*id,3*id+1,3*id+2])
def get_power(self, ids):
'''return the root mean square error '''
pass
def plot_statisitc(self):
pass
def shift_triple(self):
'''Take a collection of triples, to shift so that phase 0 starts at 0 and rises, phase 1, is delayed by 2pi/3, and phase 2 is delayed by 4pi/3'''
#find the offset of the first signal with its rising from zero part of cycle.
#Original noise free signal is of the form asin(100pit), measurements are shifted versions.
#s1(t)=asin(100pit-shift)
#s2(t)=asin(100pi(t-2/3)-shift)
#s3(t)=asin(100pi(t-4/3)-shift)
#Need to find shift, then shift each signal to the left by that amount.
#Let F(s1)=S1(fourier transform), and S1=F(asin(100pit))=aF(sin(100pit)).
#S1=F(asin(100pit-shift))=aF(sin(100opit))e^-2piaf=
#find the distance between the
#correlate with base wave: sin(100pi t)
pass
def get_proportion_of_classes(self):
'''Find the proportion of signals that experiences partial discharge, and those that did not.'''
total=self.dm.train_meta.shape[0]
partial=self.dm.train_meta[self.dm.train_meta['target']==0].shape[1]
perfect=self.dm.train_meta[self.dm.train_meta['target']==0].shape[0]
return partial/total, perfect/total
def get_prop_lines_partial(self, num_pdis=0):
'''Find the proportion of line measurements(three signals) that have num_p phases
equal to the given num_pdis. num_pdis refers to the number of phases that
are experiencing partial discharge. 0 for none, ..., 3 for all three lines'''
N = max(self.dm.train_meta['id_measurement'])
count=0
for id in range(0, N+1):
p1=self.dm.train_meta.loc[3*id,'target']
p2=self.dm.train_meta.loc[3*id+1,'target']
p3=self.dm.train_meta.loc[3*id+2,'target']
#count the number of phases experiencing partial discharge,
#and see if it is equal to state.
if p1+p2+p3 == num_pdis:
count=count+1
return count/N
def visualize_colesium_results(self, coliseum_results, stages=['Imputer', 'Model'], save=False):
targets=['test_score', 'fit_time', 'score_time']
for target in targets:
for stage in stages:
ax=self.make_colesium_results_box_plot(coliseum_results, stage, target)
if save:
fig=ax.get_figure()
fig.savefig(self.make_path+stage+"-"+target+".png")
'''
This will create a box plot for test score for the given colesium results
'''
def make_colesium_results_box_plot(self, coliseum_results, stage='Model', target= "test_score"):
return coliseum_results.boxplot(column=target, figsize=(6,6), by=stage)
| [
"alejandro.martinez@utrgv.edu"
] | alejandro.martinez@utrgv.edu |
adddceb83c81ca6f6f4b8d0b2c685f9ff4fe8289 | ea18b356682ec77b127a077579286b89383f62a5 | /network/server.py | e1bf70b90eaa32bf5f86b9f055971cd839685609 | [] | no_license | neoniemand/python | d37a6cbb03adfb6f2f3b12387ccd6e184bd94118 | 1733463851c701fafe654d0da793f3977f09c70c | refs/heads/master | 2023-08-29T04:59:03.895000 | 2021-10-26T02:25:32 | 2021-10-26T02:25:32 | 246,759,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 18:26:54 2020
@author: julian
"""
import socket
# 접속할 서버 주소입니다. 여기에서는 루프백(loopback) 인터페이스 주소 즉 localhost를 사용합니다.
HOST = '127.0.0.1'
# 클라이언트 접속을 대기하는 포트 번호입니다.
PORT = 9999
# 소켓 객체를 생성합니다.
# 주소 체계(address family)로 IPv4, 소켓 타입으로 TCP 사용합니다.
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 포트 사용중이라 연결할 수 없다는
# WinError 10048 에러 해결를 위해 필요합니다.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind 함수는 소켓을 특정 네트워크 인터페이스와 포트 번호에 연결하는데 사용됩니다.
# HOST는 hostname, ip address, 빈 문자열 ""이 될 수 있습니다.
# 빈 문자열이면 모든 네트워크 인터페이스로부터의 접속을 허용합니다.
# PORT는 1-65535 사이의 숫자를 사용할 수 있습니다.
server_socket.bind((HOST, PORT))
# 서버가 클라이언트의 접속을 허용하도록 합니다.
server_socket.listen()
# accept 함수에서 대기하다가 클라이언트가 접속하면 새로운 소켓을 리턴합니다.
#client_socket, addr = server_socket.accept()
# 접속한 클라이언트의 주소입니다.
#print('Connected by', addr)
# 무한루프를 돌면서
while True:
# accept 함수에서 대기하다가 클라이언트가 접속하면 새로운 소켓을 리턴합니다.
client_socket, addr = server_socket.accept()
# 접속한 클라이언트의 주소입니다.
print('Connected by', addr)
# 클라이언트가 보낸 메시지를 수신하기 위해 대기합니다.
data = client_socket.recv(1024)
# 빈 문자열을 수신하면 루프를 중지합니다.
if not data:
break
# 수신받은 문자열을 출력합니다.
print('Received from', addr, data.decode())
# 받은 문자열을 다시 클라이언트로 전송해줍니다.(에코)
client_socket.sendall(data)
# 소켓을 닫습니다.
client_socket.close()
server_socket.close() | [
"niemand@naver.com"
] | niemand@naver.com |
be4b8b0f92c24c4a2fd5b424d69dbc1337852880 | 4dbf21735357b99403f092c74fd3462e2b2d40db | /make_wish/djangoPy3Env/bin/pip | b07934d40f0f0b58f4d6ff638bb16ae8d36625e2 | [] | no_license | uamahmood/Wishes | dcb1413cbc694d4456c2f7404f3e45f4614de973 | 6ae5a77242736aeacdfdd8d44b29adc2e1cca17e | refs/heads/master | 2022-12-10T14:41:24.152950 | 2020-08-29T23:13:11 | 2020-08-29T23:13:11 | 291,363,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | #!/Users/UmerMahmood/Desktop/12/make_wish/djangoPy3Env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mahmoodumer@hotmail.com"
] | mahmoodumer@hotmail.com | |
88c6858fc1679d76e8c850158e5ec133528912a6 | 7ef49f5af8d0ae58ff2d45dbe8feb9cdabd1640a | /Python2/RFID1.py | d3aeef9ee7fe914cc9f4f3f818c6adda4365e378 | [] | no_license | achoi2/RFID | d36e67dfd6a8dd20c754018241ad3319d30d92b6 | d436b0540df98d3f7f4bb2c7727591fcae35e67e | refs/heads/master | 2020-04-28T06:47:27.786474 | 2019-03-14T21:02:49 | 2019-03-14T21:02:49 | 175,071,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,001 | py | #!/usr/bin/env python
########################################################################
# Filename : RFID.py
# Description : Use MFRC522 read and write Mifare Card.
# auther : www.freenove.com
# modification: 2018/09/08
########################################################################
import RPi.GPIO as GPIO
import MFRC522
import time
# Create an object of the class MFRC522
mfrc = MFRC522.MFRC522()
def scanCard():
(status,TagType) = mfrc.MFRC522_Request(mfrc.PICC_REQIDL)
# If a card is found
if status == mfrc.MI_OK:
print "Card detected"
# Get the UID of the card
(status,uid) = mfrc.MFRC522_Anticoll()
# If we have the UID, continue
if status == mfrc.MI_OK:
print "Card UID: "+ str(map(hex,uid))
# Select the scanned tag
if mfrc.MFRC522_SelectTag(uid) == 0:
print "MFRC522_SelectTag Failed!"
return uid
def dis_ConmandLine():
print "RC522>",
def dis_CardID(cardID):
print "%2X%2X%2X%2X%2X>"%(cardID[0],cardID[1],cardID[2],cardID[3],cardID[4]),
def setup():
print "Program is starting ... "
print "Press Ctrl-C to exit."
pass
def loop():
global mfrc
while(True):
dis_ConmandLine()
inCmd = raw_input()
print inCmd
if (inCmd == "scan"):
print "Scanning ... "
mfrc = MFRC522.MFRC522()
isScan = True
while isScan:
# Scan for cards
(status,TagType) = mfrc.MFRC522_Request(mfrc.PICC_REQIDL)
# If a card is found
if status == mfrc.MI_OK:
print "Card detected"
# Get the UID of the card
(status,uid) = mfrc.MFRC522_Anticoll()
# If we have the UID, continue
if status == mfrc.MI_OK:
print "Card UID: "+ str(map(hex,uid))
# Select the scanned tag
if mfrc.MFRC522_SelectTag(uid) == 0:
print "MFRC522_SelectTag Failed!"
if cmdloop(uid) < 1 :
isScan = False
elif inCmd == "quit":
destroy()
exit(0)
else :
print "\tUnknown command\n"+"\tscan:scan card and dump\n"+"\tquit:exit program\n"
def cmdloop(cardID):
pass
while(True):
dis_ConmandLine()
dis_CardID(cardID)
inCmd = raw_input()
cmd = inCmd.split(" ")
print cmd
if(cmd[0] == "read"):
blockAddr = int(cmd[1])
if((blockAddr<0) or (blockAddr>63)):
print "Invalid Address!"
# This is the default key for authentication
key = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
while(scanCard() != cardID):
print cardID
time.sleep(1)
# Authenticate
status = mfrc.MFRC522_Auth(mfrc.PICC_AUTHENT1A, blockAddr, key, cardID)
# Check if authenticated
if status == mfrc.MI_OK:
mfrc.MFRC522_Readstr(blockAddr)
else:
print "Authentication error"
return 0
elif cmd[0] == "dump":
# This is the default key for authentication
key = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
while(scanCard() != cardID):
print cardID
time.sleep(1)
mfrc.MFRC522_Dump_Str(key,cardID)
elif cmd[0] == "write":
blockAddr = int(cmd[1])
if((blockAddr<0) or (blockAddr>63)):
print "Invalid Address!"
data = [0]*16
if(len(cmd)<2):
data = [0]*16
else:
data = cmd[2][0:17]
data = map(ord,data)
if len(data)<16:
data+=[0]*(16-len(data))
# This is the default key for authentication
key = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
while(scanCard() != cardID):
print cardID
time.sleep(1)
# Authenticate
status = mfrc.MFRC522_Auth(mfrc.PICC_AUTHENT1A, blockAddr, key, cardID)
# Check if authenticated
if status == mfrc.MI_OK:
print "Before writing , The data in block %d is: "%(blockAddr)
mfrc.MFRC522_Readstr(blockAddr)
mfrc.MFRC522_Write(blockAddr, data)
print "After written , The data in block %d is: "%(blockAddr)
mfrc.MFRC522_Readstr(blockAddr)
else:
print "Authentication error"
return 0
elif cmd[0] == "clean":
blockAddr = int(cmd[1])
if((blockAddr<0) or (blockAddr>63)):
print "Invalid Address!"
data = [0]*16
# This is the default key for authentication
key = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
while(scanCard() != cardID):
print cardID
time.sleep(1)
# Authenticate
status = mfrc.MFRC522_Auth(mfrc.PICC_AUTHENT1A, blockAddr, key, cardID)
# Check if authenticated
if status == mfrc.MI_OK:
print "Before cleaning , The data in block %d is: "%(blockAddr)
mfrc.MFRC522_Readstr(blockAddr)
mfrc.MFRC522_Write(blockAddr, data)
print "After cleaned , The data in block %d is: "%(blockAddr)
mfrc.MFRC522_Readstr(blockAddr)
else:
print "Authentication error"
return 0
elif cmd[0] == "halt":
return 0
else :
print "Usage:\r\n" "\tread <blockstart>\r\n" "\tdump\r\n" "\thalt\r\n" "\tclean <blockaddr>\r\n" "\twrite <blockaddr> <data>\r\n"
def destroy():
GPIO.cleanup()
if __name__ == "__main__":
setup()
try:
loop()
except KeyboardInterrupt: # Ctrl+C captured, exit
destroy()
| [
"andrewchoi1@hotmail.com"
] | andrewchoi1@hotmail.com |
5200c915c304e4b215c13c04001bd670f3f2201d | 947c65092c2628d29e9e20d55921316c10ea43cc | /tests/test_all.py | 3a5bf2f4540381a06fd820493bd45916e34010a5 | [
"Apache-2.0"
] | permissive | mmatuson/SchemaSync | 257d44fa10cb1f99953e17274b86bb2c5fa23e33 | 7c4bfecdb60a8ab4c35c48347191a192128556eb | refs/heads/master | 2023-08-13T11:45:31.993533 | 2020-01-27T07:50:24 | 2020-01-27T07:50:24 | 367,914 | 251 | 97 | NOASSERTION | 2019-06-05T08:25:53 | 2009-11-10T19:18:54 | Python | UTF-8 | Python | false | false | 1,635 | py | #!/usr/bin/python
import unittest
from test_sync_database import TestSyncDatabase
from test_sync_tables import TestSyncTables
from test_sync_columns import TestSyncColumns
from test_sync_constraints import TestSyncConstraints
from test_utils import TestVersioned, TestPNames, TestPatchBuffer
from test_regex import TestTableCommentRegex, TestTableAutoIncrementRegex, TestMultiSpaceRegex,TestFileCounterRegex,TestDistantSemiColonRegex
def get_database_url():
database_url = raw_input("\nTests need to be run against the Sakila Database v0.8\n"
"Enter the MySQL Database Connection URL without the database name\n"
"Example: mysql://user:pass@host:port/\n"
"URL: ")
if not database_url.endswith('/'):
database_url += '/'
return database_url
def regressionTest():
test_cases = [
TestTableCommentRegex,
TestTableAutoIncrementRegex,
TestMultiSpaceRegex,
TestDistantSemiColonRegex,
TestFileCounterRegex,
TestSyncDatabase,
TestSyncTables,
TestSyncColumns,
TestSyncConstraints,
TestVersioned,
TestPNames,
TestPatchBuffer,
]
database_url = get_database_url()
suite = unittest.TestSuite()
for tc in test_cases:
tc.database_url = database_url
suite.addTest(unittest.makeSuite(tc))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="regressionTest") | [
"mitch@mitch-2.local"
] | mitch@mitch-2.local |
734dd55c239e86a08f161f887237cb6e79536fc9 | df7a630449e104acef2826b95b154f423717c628 | /pages/views.py | 82cc73fa7c0ffb41bdd215e96f9d8ebd258a4dc1 | [] | no_license | DanielSup/MI-MPR2 | e6265d01f64f8601279f5b811c2894fc211bd18e | 3359c415881945296e5ad15c542aa6c085e8f482 | refs/heads/master | 2020-12-28T06:44:03.286087 | 2020-02-04T13:46:18 | 2020-02-04T13:46:18 | 238,215,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | import os
from django.http import HttpResponse
from django.shortcuts import render
from forms import MyForm
from speedcheck import loadInputAndProcess
from django.conf import settings
import datetime
def home_view(request, *args, **kwargs):
today = datetime.datetime.now().date()
if request.method=='POST':
form = MyForm(request.POST, request.FILES)
if form.is_valid():
action = request.POST['action']
title = str(request.FILES['field'])
content = request.FILES['field'].read()
f = open(str(settings.MEDIA_ROOT)+title, "wb")
f.write(content)
f.close()
loadInputAndProcess('media/'+title, action == 'watch')
if action == 'watch':
return render(request, "myaction.html", {'value': title})
else:
return render(request, "showlink.html", {'value': "output.avi"})
form = MyForm()
return render(request, "home.html", {"today" : today, 'form': form }) | [
"supdanie@fit.cvut.cz"
] | supdanie@fit.cvut.cz |
bc99270cd0e781b4de2801db63b825af6347e5f5 | a4b56daaca218129f9bf3ec13dec3c4b69d0b43c | /code/nmf/beta_nmf.py | b2c82531c6d931c5042b2fafd94b18590ec6d7a5 | [
"Apache-2.0"
] | permissive | Sandy4321/pof | 8e78fdddc29fcd2f5231c31793ef48f105b4dd11 | 9b580a4bf5d37c29a5a2a29417b60b3df5501594 | refs/heads/master | 2021-01-22T12:20:48.197136 | 2014-11-25T01:12:24 | 2014-11-25T01:12:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,567 | py | """
Beta-divergence NMF with multiplicative updates
Translate from MATLAB code by Minje Kim <minje@illinois.edu>
CREATED: 2013-09-16 03:04:33 by Dawen Liang <dliang@columbia.edu>
"""
import numpy as np
eps = np.spacing(1)
def NMF_beta(X, K, W=None, beta=None, maxiter=500, tol=None, seed=None,
normalize=False, verbose=False):
''' Beta-divergence NMF
'''
f, t = X.shape
if seed is None:
np.random.seed()
else:
np.random.seed(seed)
if W is None:
W = np.random.rand(f, K)
updateW = True
else:
if K != W.shape[1]:
raise ValueError('K != W.shape[1]')
updateW = False
H = np.random.rand(K, t)
score = -np.inf
if not beta in [0, 1, 2]:
raise ValueError('beta has to be 0, 1 or 2')
elif beta == 2:
# EUC-NMF
for i in xrange(maxiter):
if updateW:
X_bar = W.dot(H)
W = W * X.dot(H.T)
W = W / (X_bar.dot(H.T) + eps)
H = H * W.T.dot(X) / (W.T.dot(W).dot(H) + eps)
if normalize:
_normalize(W, H)
lastscore = score
score = _compute_loss(X, W, H, beta)
improvement = (lastscore - score) / abs(lastscore)
if verbose:
print ('Iteration %d: obj = %.2f (%.5f improvement)' %
(i, score, improvement))
if i >= 10 and improvement < tol:
break
elif beta == 1:
# KL-NMF
for i in xrange(maxiter):
if updateW:
X_bar = W.dot(H)
W = W * np.dot(X / (X_bar + eps), H.T)
W = W / (np.dot(np.ones((f, t)), H.T) + eps)
X_bar = W.dot(H)
H = H * W.T.dot(X / (X_bar + eps))
H = H / (W.T.dot(np.ones((f, t))) + eps)
if normalize:
(W, H) = _normalize(W, H)
lastscore = score
score = _compute_loss(X, W, H, beta)
improvement = (lastscore - score) / abs(lastscore)
if verbose:
print ('Iteration %d: obj = %.2f (%.5f improvement)' %
(i, score, improvement))
if improvement < tol:
break
elif beta == 0:
# IS-NMF
for i in xrange(maxiter):
if updateW:
X_bar = W.dot(H)
W = W * np.dot(X / (X_bar + eps)**2, H.T)
W = W / (np.dot((X_bar + eps)**(-1), H.T) + eps)
X_bar = W.dot(H)
H = H * W.T.dot(X / (X_bar + eps)**2)
H = H / (W.T.dot((X_bar + eps)**(-1)) + eps)
if normalize:
(W, H) = _normalize(W, H)
lastscore = score
score = _compute_loss(X, W, H, beta)
improvement = (lastscore - score) / abs(lastscore)
if verbose:
print ('Iteration %d: obj = %.2f (%.5f improvement)' %
(i, score, improvement))
if improvement < tol:
break
return (W, H)
def _normalize(W, H):
scale = np.sqrt(np.sum(W**2, axis=0, keepdims=True))
W = W / scale
H = H * scale.T
return (W, H)
def _compute_loss(X, W, H, beta):
loss = np.inf
f, t = X.shape
X_bar = W.dot(H)
if beta == 0:
loss = np.sum(X / X_bar - np.log(X) + np.log(X_bar)) - f * t
elif beta == 1:
loss = np.sum(X * (np.log(X) - np.log(X_bar)) - X + X_bar)
elif beta == 2:
loss = np.sum((X - X_bar)**2)
return loss
| [
"dl2771@columbia.edu"
] | dl2771@columbia.edu |
36630e3c4ae14aff5e3e8187c3761c48caa6972a | 8b6a4ead4fe1c38b8bfc5d1d8ae4ea1ac31ba2ac | /vacancy/urls.py | 2994e196bbf649eb1d84cd930d04ec2a7ee3be3a | [] | no_license | infernowadays/hr_itmo | 99f38d86fd21a8b08a7789ae3f8a0786ac16f26c | d9232afe01d4dc84874adb3e1c76d3864e674928 | refs/heads/master | 2023-06-25T07:42:18.987465 | 2021-04-20T04:00:35 | 2021-04-20T04:00:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from vacancy import views
urlpatterns = [
path('vacancies/', csrf_exempt(views.VacancyListView.as_view())),
path('vacancies/<int:pk>/', csrf_exempt(views.VacancyDetailView.as_view())),
path('favouriteVacancies/', csrf_exempt(views.FavouriteVacancyListView.as_view())),
path('invitations/', csrf_exempt(views.RequestListView.as_view())),
path('invitations/<int:pk>/', csrf_exempt(views.RespondRequestView.as_view())),
]
| [
"maximus1998g@mail.ru"
] | maximus1998g@mail.ru |
495632157b92ea029f13f8b863a4d624397b5f76 | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /tests/unit/modules/packaging/language/test_maven_artifact.py | 2f01414a47d9c3ec2c3a416f0f1af5274aff458a | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,510 | py | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.ansible.community.plugins.modules import maven_artifact
from ansible_collections.ansible.community.plugins.module_utils import basic
pytestmark = pytest.mark.usefixtures('patch_ansible_module')
maven_metadata_example = b"""<?xml version="1.0" encoding="UTF-8"?>
<metadata>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<versioning>
<latest>4.13-beta-2</latest>
<release>4.13-beta-2</release>
<versions>
<version>3.7</version>
<version>3.8</version>
<version>3.8.1</version>
<version>3.8.2</version>
<version>4.0</version>
<version>4.1</version>
<version>4.2</version>
<version>4.3</version>
<version>4.3.1</version>
<version>4.4</version>
<version>4.5</version>
<version>4.6</version>
<version>4.7</version>
<version>4.8</version>
<version>4.8.1</version>
<version>4.8.2</version>
<version>4.9</version>
<version>4.10</version>
<version>4.11-beta-1</version>
<version>4.11</version>
<version>4.12-beta-1</version>
<version>4.12-beta-2</version>
<version>4.12-beta-3</version>
<version>4.12</version>
<version>4.13-beta-1</version>
<version>4.13-beta-2</version>
</versions>
<lastUpdated>20190202141051</lastUpdated>
</versioning>
</metadata>
"""
@pytest.mark.parametrize('patch_ansible_module, version_by_spec, version_choosed', [
(None, "(,3.9]", "3.8.2"),
(None, "3.0", "3.8.2"),
(None, "[3.7]", "3.7"),
(None, "[4.10, 4.12]", "4.12"),
(None, "[4.10, 4.12)", "4.11"),
(None, "[2.0,)", "4.13-beta-2"),
])
def test_find_version_by_spec(mocker, version_by_spec, version_choosed):
_getContent = mocker.patch('ansible_collections.ansible.community.plugins.modules.maven_artifact.MavenDownloader._getContent')
_getContent.return_value = maven_metadata_example
artifact = maven_artifact.Artifact("junit", "junit", None, version_by_spec, "jar")
mvn_downloader = maven_artifact.MavenDownloader(basic.AnsibleModule, "https://repo1.maven.org/maven2")
assert mvn_downloader.find_version_by_spec(artifact) == version_choosed
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
51f4f498079a910c7fcc1fbaa739bb50a94e60a0 | 5089105e64999f7affe1627e2b2d6ec0e67a4720 | /django-import-export-master/tests/core/admin.py | 67e347dc9efaa974231480e0a7502fdb8a91306f | [
"BSD-2-Clause"
] | permissive | MWTechnology/API_EXCEL | 914f1ddff8685f09d6a3ccd960bbe924cec055e3 | 39ebf98632f72e045bd72205553acd8dbbb7ce64 | refs/heads/master | 2022-07-31T19:43:27.351437 | 2020-05-25T11:04:31 | 2020-05-25T11:04:31 | 266,739,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | from django.contrib import admin
from import_export.admin import ExportActionModelAdmin, ImportExportMixin, ImportMixin
from import_export.resources import ModelResource
from .forms import CustomConfirmImportForm, CustomImportForm
from .models import Author, Book, Category, Child, EBook
class ChildAdmin(ImportMixin, admin.ModelAdmin):
pass
class BookResource(ModelResource):
class Meta:
model = Book
def for_delete(self, row, instance):
return self.fields['id_people'].clean(row) == ''
class BookAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ('id_people', 'description')
resource_class = BookResource
class CategoryAdmin(ExportActionModelAdmin):
pass
class AuthorAdmin(ImportMixin, admin.ModelAdmin):
pass
# class CustomBookAdmin(BookAdmin):
# """BookAdmin with custom import forms"""
#
# def get_import_form(self):
# return CustomImportForm
#
# def get_confirm_import_form(self):
# return CustomConfirmImportForm
#
# def get_form_kwargs(self, form, *args, **kwargs):
# # update kwargs with authors (from CustomImportForm.cleaned_data)
# if isinstance(form, CustomImportForm):
# if form.is_valid():
# author = form.cleaned_data['id_people']
# kwargs.update({'id_people': author.id})
# return kwargs
admin.site.register(Book, BookAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Child, ChildAdmin)
# admin.site.register(EBook, CustomBookAdmin)
| [
"MWTech@mail.ru"
] | MWTech@mail.ru |
286365df9076f0e9091ead9c909ed81d3dda9940 | f2922e79d4fb292c75cef24b60912f7b403e2db0 | /back_end/back_end/common/Renderer.py | df7489d3a410256fbe20e0cdefd5533b601c1257 | [] | no_license | rixingw/DocuScan | 8a4e2f05d7cb9e0c995fee5736fb8f192baa6de4 | d2a00d312c1bc12add84a5fd6537775b73318bf8 | refs/heads/master | 2020-03-17T06:46:55.122768 | 2018-08-08T17:42:28 | 2018-08-08T17:42:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py |
from django.conf import settings
import os
import math
import uuid
from PIL import Image
import fpdf
class Renderer(object):
MAX_WIDTH = 215.9
MAX_HEIGHT = 279.4
MARGIN = 5
MARKER_WIDTH = 5
x1 = MARGIN
y1 = MARGIN
x2 = MAX_WIDTH - MARGIN
y2 = MAX_HEIGHT - MARGIN
def createBoxes(self, cx, cy):
_incr = 0.01
line = self.item("Hello", 'L', cx + _incr, cy, cx + _incr, cy, size=5)
return line
def item(self, id, type, x1, y1, x2, y2, text='', align='I', italic=0, bold=0, size=12, color=0):
_item = {'name': id,
'type': type,
'x1': x1,
'y1': y1,
'x2': x2,
'y2': y2,
'font': 'Arial',
'size': size,
'bold': bold,
'italic': italic,
'underline': 0,
'foreground': 0,
'background': color,
'align': align,
'text': text,
'priority': 2}
return _item
def header(self, elID, dy, title, uuid):
width = 30
qrcode = self.item('company_logo', 'I', self.x1, self.y1 + dy, self.x1 + width, self.y1 + width + dy)
title = self.item(elID, 'T', qrcode['x2'],10 + dy, self.x2, 20 + dy, text=title, align='C', italic=0, bold=0)
uuid = self.item(elID, 'T',qrcode['x2'], 10 + dy, self.x2, 40 + dy, align='C', text=uuid)
return [qrcode, title, uuid]
def title(self, elID, dy, title):
line = self.item(elID, 'L', self.x1, self.y1 + dy, self.x2, self.y1 + dy, size=0)
title = self.item(elID, 'T', self.x1,2.5 + dy,self.x2, 20 + dy, text=title, align='L', italic=0, bold=1)
return [line, title]
def textfield(self, elID, dy, title, information):
title = self.item(elID, 'T', self.x1, self.y1 + dy, self.x2, 20 + dy, text=title, align='L', italic=0, bold=0)
text_box = self.item(elID, 'B', self.x1, title['y2'], self.x2, title['y2'] + 10, text='', align='L', italic=0, bold=1, size=0, color=0x00FFFF)
info = self.item(elID, 'T', self.x1, text_box['y2'] - 10, self.x2, 20 + text_box['y2'], text=information, align='L', italic=0, bold=0)
return [title, text_box, info]
def column(self, elID1, elID2, dy, text1, text2):
midX = self.MAX_WIDTH/2.0
title1 = self.item(elID1, 'T', self.x1, self.y1 + dy, midX, 20 + dy, text=text1, align='C', italic=0, bold=0)
title2 = self.item(elID2, 'T', self.x1 + midX, self.y1 + dy, self.x2, 20 + dy, text=text2, align='C', italic=0, bold=0)
text_box1 = self.item(elID1, 'B', self.x1, title1['y2'], midX -2.5, 10 + title1['y2'], text='', align='I', italic=0, bold=1, size=0)
text_box2 = self.item(elID2, 'B', midX + 2.5, title2['y2'], self.x2, 10 + title2['y2'], text='', align='I', italic=0, bold=1, size=0)
return [title1, title2,text_box1, text_box2]
def max_height(self, items):
max_height = float("-inf")
min_height = float("inf")
for item in items:
if item['y2'] >= max_height:
max_height = item['y2']
if item['y1'] <= min_height:
min_height = item['y1']
return max_height - min_height
def render(self, data):
items = []
items.append(self.createBoxes(5, 5))
items.append(self.createBoxes(self.MAX_WIDTH - self.MARKER_WIDTH, 5))
items.append(self.createBoxes(5, self.MAX_HEIGHT - self.MARKER_WIDTH))
items.append(self.createBoxes(self.MAX_WIDTH - self.MARKER_WIDTH, self.MAX_HEIGHT - self.MARKER_WIDTH))
dy = 5
form_uuid = ''
for d in data:
print(d)
_uuid_ = str(uuid.uuid4())
if d['itemType'] == 'header':
# extract uuid
form_uuid = d['formUUID']
header = self.header(_uuid_, dy, d['title'], d['formUUID'])
dy += self.max_height(header)
items = items + header
elif d['itemType'] == 'titleview':
title = self.title(_uuid_, dy, d['title'])
dy += self.max_height(title)
items = items + title
elif d['itemType'] == 'textfield':
textfield = self.textfield(_uuid_, dy, d['title'], d['information'])
dy += self.max_height(textfield)
items = items + textfield
elif d['itemType'] == 'phoneview':
_uuid_1 = str(uuid.uuid4())
_uuid_2 = str(uuid.uuid4())
column = self.column(_uuid_1, _uuid_2, dy, d['title1'], d['title2'])
dy += self.max_height(column)
items = items + column
return {'uuid': form_uuid, 'items': items} | [
"wur1@wit.edu"
] | wur1@wit.edu |
83e75209f2bacc14d06c126a68a7eeea79b89029 | ff26eafc192f053c0c8fcd49bda26607e52e0d49 | /flask_app/scripts/process_phase.py | da1ec266e9affd0ae25fcbc7104cd33f6b51d84d | [] | no_license | csu-anzai/chaudiere | e8342fb95464d85e2bf9027866d500feb5a4ee57 | e4ff5036fcd660c3abec3afb86fd070da738bd1e | refs/heads/master | 2020-07-19T13:31:11.423917 | 2019-09-04T22:30:12 | 2019-09-04T22:30:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,972 | py | # -*- coding: ISO-8859-1 -*-
"""
# script process_phase.py
## Summary
This script determines the `phase` value of ChaudiereMinute entries.
This script is supposed to be run every 1 or 2 minutes by cron
## CLI Usage :
Idem archive_minute.py
## CRON Config :
1-59/2 * * * * /home/pi/Envs/dev/bin/python /home/pi/Dev/chaudiere/chaudiereapp/scripts/process_phase.py
"""
import os, sys, argparse
from datetime import datetime, timedelta
import logging, logging.config
currentpath = os.path.abspath(os.path.dirname(__file__)) # /home/pi/Dev/chaudiere/chaudiereapp/script
chaudiereapp = os.path.dirname(currentpath) # /home/pi/Dev/chaudiere/chaudiereapp
projectpath = os.path.dirname(chaudiereapp) # /home/pi/Dev/chaudiere
envpath = os.path.dirname(projectpath) # /home/pi/Dev
envname = os.path.basename(envpath) # Dev
app_path = os.path.join(chaudiereapp, 'app')
sys.path.append(chaudiereapp)
import send_email_sms
from progress_bar import print_bar
from app import db
from app.models import ChaudiereMinute, timedelta_in_minute
from app.constantes import *
from app import create_app
app = create_app().app_context().push()
# import and get logger
logger_directory = os.path.join(projectpath, 'logger')
sys.path.append(logger_directory)
import logger_config
logger = logging.getLogger(__name__)
# import TEMP_CHAUDIERE_FAILURE from AdminConfig database
from app.models.admin_config import AdminConfig
admin_config = AdminConfig.first(AdminConfig)
if admin_config is not None:
TEMP_CHAUDIERE_FAILURE = admin_config.temp_chaudiere_failure
ALERTS_ENABLE = admin_config.alerts_enable
else:
logger.error("Could not fetch AdminConfig (temp_chaudiere_failure, alerts_enable)")
def temperature_variation(entry, periode):
"""
retourne la variation de température (+/- Float) sur la période (minutes)
retourne None si pas d'information
si la période est incomplete (info capteurs absente), calcule avec une valeur plus récente
"""
try:
first_dt = entry.dt - timedelta(minutes=periode)
dt = first_dt
last_dt = entry.dt
minute = 0
old_entry = None
# retourne la plus ancienne entry existante dans la période
while old_entry is None and dt < last_dt:
dt = first_dt + timedelta(minutes=minute)
old_entry = ChaudiereMinute.get_by_datetime(ChaudiereMinute, dt)
minute += 1
old_temp = old_entry.get(TEMP_CHAUDIERE)
temp = entry.get(TEMP_CHAUDIERE)
return temp - old_temp
except Exception as e:
logger.warning("temperature variation failed ({0})".format(e))
return None
def find_last_phase():
"""
Return last processed ChaudiereMinute entry.dt or None
"""
# if no ChaudiereMinute entry exists, return None
if ChaudiereMinute.last(ChaudiereMinute) == None:
return None
# if some ChaudiereMinute entry exists but phase is None
# (first call of this script since db creation) :
# return first ChaudiereMinute dt
try_first = ChaudiereMinute.first(ChaudiereMinute)
if try_first is not None and try_first.phase is None :
logger.debug('returning first entry')
return try_first.dt
# else search for lat processed entry
dt = ChaudiereMinute.last(ChaudiereMinute).dt
phase = None
while (phase is None):
dt = dt - timedelta(minutes=1)
entry = ChaudiereMinute.get_by_datetime(ChaudiereMinute, dt)
if entry is not None:
phase = ChaudiereMinute.get_by_datetime(ChaudiereMinute, dt).phase
logger.debug('last phase found is at : '+str(dt))
return dt
def find_date_end(date):
"""
return the datetime of an existing Chaudiere entry close to the given *date* parameter
"""
last_ch_minute_date = ChaudiereMinute.last(ChaudiereMinute).dt
entry = ChaudiereMinute.get_by_datetime(ChaudiereMinute, date)
while ((entry is None) and (date < last_ch_minute_date)):
date = date + timedelta(minutes=1)
entry = ChaudiereMinute.get_by_datetime(ChaudiereMinute, date)
logger.debug('found ChaudiereMinute at (dt end):'+ str(entry.dt))
return entry.dt
def process_phase(mode='normal', hours=None, date=None):
"""
Détermine la date de début (begin) et de fin (end) des minutes à traiter en fonction du `mode`
"""
rework_mode_disable_alert = False
if mode is 'normal':
begin = find_last_phase() # begin = last processed ChaudiereMinute entry.dt
end = ChaudiereMinute.last(ChaudiereMinute).dt # end = last existing ChaudiereMinute entry.dt
elif mode is 'rework_from_now':
rework_mode_disable_alert = True
end = ChaudiereMinute.last(ChaudiereMinute).dt
begin = end - timedelta(hours=hours)
elif mode is 'rework_from_date':
rework_mode_disable_alert = True
end = find_date_end(date)
begin = end - timedelta(hours=hours)
else:
logger.error('wrong arguments')
return
if begin is None:
logger.info('No records')
return
if ((begin + timedelta(minutes=1)) >= end):
logger.info('Strating from '+str(begin)+' ...Waiting more records')
else:
logger.info('processing phase From ' + str(begin) + ' To ' + str(end))
# Progress bar Init (for console mode)
bar_items = timedelta_in_minute(begin, end)
bar_item = 0
# while some entries to process
while ((begin + timedelta(minutes=1)) <= end):
entry = ChaudiereMinute.get_by_datetime(ChaudiereMinute, begin)
# entry should not be missing, test just in case and create missing entry
if entry is None:
logger.warning('create missing ChaudiereMinute entry (should not be the case')
ChaudiereMinute.create(ChaudiereMinute, begin, None, None, None, None, None, None, None, None, None, None)
entry = ChaudiereMinute.get_by_datetime(ChaudiereMinute, begin)
# Progress bar Print (for console mode)
bar_item += 1
# print_bar(bar_item, bar_items, prefix=str(entry.dt))
update_phase(entry)
update_change(entry)
process_alerts(entry, rework_mode_disable_alert)
begin = begin + timedelta(minutes=1)
def update_phase(entry):
"""
Met à jour le champ phase en fonction des valeur ventilateur, température, allumeur
"""
# Si des informations capteurs sont disponibles
if entry.get(ALLUMAGE) is not None and\
entry.get(VENT_PRIMAIRE) is not None and\
entry.get(TEMP_CHAUDIERE) is not None:
# détecter l'allumage (allumeur > 0)
if (entry.get(ALLUMAGE) > 0):
entry.phase = PHASE_ALLUMAGE
db.session.commit()
# Détecter la combustion (ventilateur > 0 et allumeur == 0)
elif entry.get(VENT_PRIMAIRE) > 0:
entry.phase = PHASE_COMBUSTION
db.session.commit()
# Détecter le maintien de feu (vent == 0 et allumeur == 0)
elif entry.get(VENT_PRIMAIRE) == 0:
entry.phase = PHASE_MAINTIEN
db.session.commit()
# Détecter l'arrêt (condition température basse : temp_chaudiere < TEMP_CHAUDIERE_FAILURE)
# malgré la condition température basse,
# - si allumeur en marche => ALLUMAGE
# - si ventilateur en marche et allumeur à été en marche [depuis moins de 20 minutes] => COMBUSTION
# - si ventilateur en marche et température augmente [10 min] => COMBUSTION
# - cas non nominal : si ventilateur en marche et température diminue pendant + de 30 minutes => BOURRAGE
if entry.get(TEMP_CHAUDIERE) < TEMP_CHAUDIERE_FAILURE:
# Si allumeur en marche => ALLUMAGE
if entry.get(ALLUMAGE) > 0:
entry.phase = PHASE_ALLUMAGE
db.session.commit()
# Si ventilateur en marche et allumeur à été en marche [depuis moins de 20 minutes] => COMBUSTION
elif entry.get(VENT_PRIMAIRE) > 0:
condition_precs_was_allumage = '((prec is not None) and (prec.phase == '+str(PHASE_ALLUMAGE)+' ))'
if entry.at_least_one_prec_verify_condition(20, condition_precs_was_allumage):
entry.phase = PHASE_SURVEILLANCE
db.session.commit()
# Si ventilateur en marche et température augmente [10 min] => COMBUSTION
delta_temp = temperature_variation(entry, 10)
if delta_temp is not None and delta_temp > 0.2:
entry.phase = PHASE_SURVEILLANCE
db.session.commit()
# Cas non nominal : si ventilateur en marche et température diminue pendant + de 30 minutes => BOURRAGE
delta_temp = temperature_variation(entry, 30)
if delta_temp is not None and delta_temp < 1.0:
entry.phase = PHASE_RISQUE_BOURAGE
db.session.commit()
# Dans tout les autres cas (par défaut) => ARRET
else:
entry.phase = PHASE_ARRET
db.session.commit()
# cas par défaut, aucune information des capteurs disponible, phase est UNDEFINED
else:
entry.phase = PHASE_UNDEFINED
db.session.commit()
def update_change(entry):
"""
Met a jour le champ "chaudiere.change" si phase courante est différent de phase prec
"""
#condition_prec_has_same_phase = '((prec is not None) and (prec.phase == '+str(entry.get(PHASE))+' or prec.phase == PHASE_UNDEFINED ))'
if entry.prec() is not None and entry.prec().phase != entry.phase:
entry.change = True
else:
entry.change = False
db.session.commit()
def process_alerts(entry, rework_mode_disable_alert):
"""
Envoi une alerte (mail, sms) si phase courante est ALERT et si change est True
et si aucun des precs n'etaient deja en ALERT (cette condition permet de la gestion de la séquence
ALERT -> UNDEFINED -> ALERT (change == True)
Si une alerte est envoyee, alors le champ entry.event vaut "Alert mail/sms"
"""
condition_precs_was_not_alert = '((prec is not None) and (prec.phase != '+str(PHASE_ARRET)+' ))'
if entry.change is True and\
entry.phase == PHASE_ARRET and\
entry.all_prec_verify_condition(10, condition_precs_was_not_alert):
if rework_mode_disable_alert is False:
if envname == 'Prod':
if ALERTS_ENABLE == True:
logger.info('Sending email/sms alert for entry :'+str(entry.dt))
send_email_sms.Send_Mail_Chaudiere_Alert(entry.dt)
send_email_sms.Send_SMS_Chaudiere_Alert(entry.dt)
else:
logger.info('Not Sending email/sms alert (disable by Admin Config) for entry :'+str(entry.dt))
else:
logger.info('Not Sending email/sms alert (not Prod env) for entry :'+str(entry.dt))
else:
logger.info('Not Sending email/sms alert (already sent previously) for entry :'+str(entry.dt))
entry.event = EVENT_ALERT
db.session.commit()
def test_alerts():
logger.debug('test_alerts()')
entry = ChaudiereMinute.last(ChaudiereMinute)
send_email_sms.Send_Mail_Chaudiere_Alert(entry.dt)
send_email_sms.Send_SMS_Chaudiere_Alert(entry.dt)
if __name__ == '__main__':
# PARSE ARGS
parser = argparse.ArgumentParser(description = "process phase calculation", epilog = "" )
#group = parser.add_mutually_exclusive_group()
parser.add_argument('--test_alerts', action='store_true', default=False, dest='test_alerts', help='test email and sms alerts')
parser.add_argument('--rework_from_now', action='store_true', default=False, dest='rework_from_now', help='rework N hours from now')
parser.add_argument('--rework_from_date', action='store_true', default=False, dest='rework_from_date', help='rework N hours from given END date')
parser.add_argument('--hours', type=int, default=None, help='number of hour to rework')
parser.add_argument('--date', default=None, help='end date to rework YYYY/MM/DD/HH')
args = parser.parse_args()
print (args)
if args.test_alerts:
print('sending test alerts')
test_alerts()
elif args.rework_from_now:
if not args.hours:
print('Argument error : --hours must be set')
exit()
print('mode=rework_from_now '+str(args.hours))
process_phase(mode='rework_from_now', hours=args.hours)
elif args.rework_from_date:
#python process_phase.py --rework_from_date --hours 10 --date 2018/05/9/10
if not args.hours:
print('Argument error : --hours must be set')
exit()
if not args.date:
print('Argument error : --date must be set')
exit()
date = args.date.split('/')
print (date)
try:
ts_end = datetime(int(date[0]), int(date[1]), int(date[2]), int(date[3]), 0, 0)
print (ts_end)
print (str(ts_end))
except IndexError:
print('Argument error : --date must be YYYY/MM/DD/HH')
exit()
print('mode=rework_from_date date='+str(ts_end)+' hours='+str(args.hours))
process_phase(mode='rework_from_date', date=ts_end, hours=args.hours)
else:
print('mode=normal')
process_phase(mode='normal')
| [
"matthieujouve@gmail.com"
] | matthieujouve@gmail.com |
cfed8b87698352893ca84ff62078e002f03646b4 | 2b6645da81eadf45e8df1573befaa8e84a0f52ea | /Aggregator/feed.py | c30cb24c8ceb2cd82aed3fc0d20b2afd7158071c | [] | no_license | jamesfebin/NewsReaderApp | da4817a262361ca57cf95a06f6c3ad7c136d859f | 0ede5e9d3f6af80560a4638fc56700c9d8764148 | refs/heads/master | 2022-11-24T03:27:55.710326 | 2016-08-23T14:31:30 | 2016-08-23T14:31:30 | 65,388,244 | 0 | 1 | null | 2022-11-22T00:31:43 | 2016-08-10T14:15:01 | Python | UTF-8 | Python | false | false | 2,735 | py | import feedparser
import time
import MySQLdb
import re
from BeautifulSoup import BeautifulSoup
import HTMLParser
import datetime
import urllib, cStringIO
import MySQLdb.cursors
import requests
import json
server='myfirstdb.cgl42w3ennqs.us-west-1.rds.amazonaws.com'
database='myfirstdb'
username='febin'
password='febin123'
client = MySQLdb.connect(server,username,password,database,cursorclass=MySQLdb.cursors.DictCursor)
cursor = client.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
def writeToDatabase(data):
try :
cursor.execute("SELECT * FROM news WHERE link=%s",(data['link'],))
data['published'] = time.time()
if cursor.rowcount == 0:
cursor.execute("INSERT INTO news (title,link,summary,published,image,category) VALUES (%s,%s,%s,%s,%s,%s)",(data['title'],data['link'],data['summary'],data['published'],data['image'],data['category']))
client.commit()
except Exception as e:
print(e)
def cleanhtml(raw_html):
cleanr =re.compile('<.*?>')
cleantext = re.sub(cleanr,'', raw_html)
return cleantext
sources = [{'category':'business','url':'http://www.business-standard.com/rss/home_page_top_stories.rss'},{'category':'tech','url':'http://feeds.feedburner.com/TechCrunch/'},{'category':'entertainment','url':'http://feeds.feedburner.com/thr/news'}]
for source in sources:
feedData=feedparser.parse(source['url'])
for entry in feedData.entries:
try:
link = ''
published = 0
summary= ''
title = ''
image = ''
if 'link' in entry:
link = entry.link
if 'enclosures' in entry:
enclosures = entry.enclosures
for enclosure in enclosures:
if "image" in enclosure['type'] and 'href' in enclosure:
image = enclosure['href']
break
if 'title' in entry:
title = entry.title.encode('ascii','ignore')
if 'summary' in entry:
summary=entry.summary.encode('ascii','ignore')
elif 'description' in entry:
summary=entry.description.encode('ascii','ignore')
if summary != '':
soup = BeautifulSoup(summary)
urls = soup.findAll("img")
for urlTag in urls:
image = urlTag['src']
break
data={'link':link,'summary':summary,'title':title,'image':image,'category':source['category']}
writeToDatabase(data)
except Exception as e:
print(e)
client.close()
| [
"jamesfebin@gmail.com"
] | jamesfebin@gmail.com |
27e85e7a355675b6b4ac3e2e40d629554bc2caf4 | 7bab49987d21cdbad08536a7fa5627757cca546d | /AS1115/__ref/AS1115-master/AS1115-master/Python/AS1115.py | 9450766e835ba6a03ff6bea7c8b02eb88be9c276 | [] | no_license | leehands83/raspberrypizero | 7321df6f044b9a127076d8fd6cdd5b154da7e63b | e5f2dade2115800260646c1abf12d8c5e0e9a1ac | refs/heads/main | 2023-03-23T08:17:24.200788 | 2021-03-01T03:08:59 | 2021-03-01T03:08:59 | 342,430,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# AS1115
# This code is designed to work with the AS1115_I2CL_3CE I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/LED-Display?sku=AS1115_I2CL_3CE#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# AS1115 address, 0x00(00)
# Select Shutdown register, 0x0C(12)
# 0x01(01) Normal Operation, Reset Feature Register to Default Settings
bus.write_byte_data(0x00, 0x0C, 0x01)
# AS1115 address, 0x00(00)
# Select Intensity Control Register, 0x0A(10)
# 0x80(128) Duty cycle = 1/16
bus.write_byte_data(0x00, 0x0A, 0x80)
# AS1115 address, 0x00(00)
# Select Feature Register, 0x0E(14)
# 0x04(04) Enable HEX decoding
bus.write_byte_data(0x00, 0x0E, 0x04)
# AS1115 address, 0x00(00)
# Select Scan-limit Register, 0x0B(11)
# 0x02(02) Display digits 0-2
bus.write_byte_data(0x00, 0x0B, 0x02)
# AS1115 address, 0x00(00)
# Select Decode-enable Register, 0x09(09)
# 0x07(07) Set all digits to font decode
bus.write_byte_data(0x00, 0x09, 0x07)
time.sleep(0.3)
for data in range(0, 16):
for digit in range(3):
# Write data on the digits
bus.write_byte_data(0x00, digit+1, data)
# Output to screen
print "Display on 7-Segment : ",hex(data)
time.sleep(0.8) | [
"leehands83@icloud.com"
] | leehands83@icloud.com |
f69aeef790b0710eee14082b761cf37a520d9511 | 52a2260be6a8ff7baab861f1e79d9095b37bf347 | /gentripstat2.py | dfe8bd0df29920bdef4476d35b01933536d3eec0 | [] | no_license | jittat/hcr-scripts | af9b5ba2067fc00b5a1be0fb7ba66d6abd75d157 | 75c0a70abc36ea963096924e0b14a65398de7ab4 | refs/heads/master | 2016-09-11T02:44:12.754535 | 2014-10-08T02:01:38 | 2014-10-08T02:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | import sys
import config
from distance import map_distance
from mapgraph import MapGraph
from mapgraph import map_to_frame_point
MAP_FRAME = config.MAP_FRAME
SVG_SIZE = config.SVG_SIZE
SVG_SCALE = config.SVG_SCALE
def generate_svg(map_graph, trips, frame, scale):
header = "<html><body>"
svg_element_open = '<svg width="%d" height="%d" style="border: 1px solid gray">' % (SVG_SIZE[0], SVG_SIZE[1])
svg_element_close = "</svg>"
footer = "</body></html>"
print header
print svg_element_open
for n in map_graph.nodes.values():
print n.render(frame, scale)
for e in map_graph.edges.values():
print e.render(frame, scale, map_graph.nodes)
total_trips = len(trips)
switched_trips = 0
for t in trips:
x1,y1,x2,y2 = t
direct_distance = map_distance(x1,y1,x2,y2)
rail_distance = map_graph.network_distance(x1,y1,x2,y2)
if rail_distance < direct_distance:
rate = ((direct_distance / rail_distance)**2) / 20.0
if rate > 1:
rate = 1
cx1,cy1 = map_to_frame_point(x1,y1,frame,scale)
cx2,cy2 = map_to_frame_point(x2,y2,frame,scale)
#print '<circle cx="%f" cy="%f" r="10" fill="%s" fill-opacity="0.02"></circle>' % (cx1,cy1,color)
#print '<circle cx="%f" cy="%f" r="10" fill="%s" fill-opacity="0.02"></circle>' % (cx2,cy2,color)
print '<line x1="%f" y1="%f" x2="%f" y2="%f" stroke="black" stroke-opacity="%f"/>' % (cx1,cy1,cx2,cy2,rate/5.)
print svg_element_close
print '<br>'
print '% usage: ',float(switched_trips*100)/total_trips
print footer
def read_network_config(filename):
nconfig = { 'networks': [],
'trips': [] }
lines = open(filename).readlines()
for l in lines:
items = l.strip().split()
if len(items)==0:
continue
if items[0] == 'network':
nconfig['networks'].append(items[1])
elif items[0] == 'trip':
nconfig['trips'].append(items[1])
return nconfig
def build_map(network_config):
map_graph = MapGraph()
for n in network_config['networks']:
map_graph.append_from_file(n)
return map_graph
def read_trip(filename):
return [[float(x) for x in l.strip().split(',')]
for l in open(filename).readlines()[1:]]
def main():
network_config = read_network_config(sys.argv[1])
map_graph = build_map(network_config)
map_graph.compute_apsp()
if(len(network_config['trips'])==0):
trips = read_trip(sys.argv[2])
generate_svg(map_graph, trips, MAP_FRAME, SVG_SCALE)
if __name__ == '__main__':
main()
| [
"jittat@gmail.com"
] | jittat@gmail.com |
0c113ce6b913465b8c49211d4864ea8c81210f3f | ca29a816efe420df9490b6056a77ff078cd2aa0f | /distribution.py | b7c897c3c2b1e583950353cc769eda91f21abd7d | [] | no_license | Martin819/NumGenProbDist | 5bc0f01a643133319f57a3557804f842e2eca333 | 09f8566789ba0eb4b61f2bb4c7c365d3ef164f44 | refs/heads/master | 2021-10-08T06:30:35.394767 | 2018-12-09T12:20:17 | 2018-12-09T12:20:17 | 159,064,644 | 0 | 0 | null | 2018-12-08T15:10:04 | 2018-11-25T19:03:44 | Python | UTF-8 | Python | false | false | 1,674 | py | import random
from math import sqrt, cos, pi, log, exp
import numpy
## Uniform
def getUniform(low, high):
return random.uniform(low, high)
## Triangular
def calculateTriangular(a, b, c):
U = random.uniform(0.0, 1.0)
F = (c - a) / (b - a)
if (U <= F):
return a + sqrt(U * (b - a) * (c - a))
else:
return b - sqrt((1.0 - U) * (b - a) * (b - c))
def getTriangular(low, high, mode):
return random.triangular(low, high, mode)
def numpyTriangular(low, high, mode):
return numpy.random.triangular(low, mode, high)
## Beta
def calculateBetavariate(alpha, beta):
x = random.gammavariate(alpha, 1.0)
y = random.gammavariate(beta, 1.0)
z = x / (x + y)
return z
def getBetavariate(alpha, beta):
return random.betavariate(alpha, beta)
def numpyBetavariate(alpha, beta):
return numpy.random.beta(alpha, beta)
## Exponential
def calculateExpovariate(lambd):
return log(1.0 - random.uniform(0.0,1.0))/lambd
def getExpovariate(lambd):
return random.expovariate(lambd)
def numpyExpovariate(lambd):
return numpy.random.exponential(lambd)
## Lognormal
def calculateLognormalvariate(mu, sigma):
return exp(calculateNormalvariate(mu, sigma))
def getLognormvariate(mu, sigma):
return random.lognormvariate(mu, sigma)
def numpyLognormalvariate(mu, sigma):
return numpy.random.lognormal(mu, sigma)
## Normal
def calculateNormalvariate(mu, sigma):
y1 = random.uniform(0.0, 1.0) + 1
y2 = random.uniform(0.0, 1.0) + 1
z = cos(2 * pi * y2) * sqrt(-2 * log(y1))
return (z * sigma) + mu
def getNormalvariate(mu, sigma):
return random.normalvariate(mu, sigma)
def numpyNormalvariate(mu, sigma):
return numpy.random.normal(mu, sigma) | [
"polreichmartin@gmail.com"
] | polreichmartin@gmail.com |
a0500e29e662f0d40278b41d8bd1a22179bded49 | 90c8f09838360c4d31cd927e252e8a81f6b143f3 | /plugins/afishareviews.py | 1545ea0b45dace558d7c494d770009e4353348cf | [] | no_license | 9seconds/ZineStorekeeper | 9859b63ad8c3dde825dc178654ca37001c3307be | 1eb06e3b4c5345adb4adf8f261154467d356d52a | refs/heads/master | 2020-05-26T23:28:50.444851 | 2012-01-06T09:26:54 | 2012-01-06T09:26:54 | 2,992,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,080 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# afishareviews.py
#
# Copyright 2011 Serge Arkhipov <serge@aerialsounds.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from utils.website import TwoStep, parser, parser_str
from utils.papercuts import convert_rudate, stripped, exceptionable, urlopen
class AfishaReviews (TwoStep):
@staticmethod
@exceptionable
def get_ly_line (info):
return info.cssselect('.b-object-summary .m-margin-btm')[0].text_content()\
.strip().split("\n")[-1].split(',')
@staticmethod
@stripped
@exceptionable
def get_artist (info):
return info.cssselect('.b-object-summary .b-object-header h1')[0].text_content()
@staticmethod
@stripped
@exceptionable
def get_album (info):
return info.cssselect('.b-object-summary a')[0].text_content()\
.replace(u'«', '').replace(u'»', '')
@staticmethod
@stripped
@exceptionable
def get_label (info):
return AfishaReviews.get_ly_line(info)[0]
@staticmethod
@stripped
@exceptionable
def get_release_year (info):
return AfishaReviews.get_ly_line(info)[1]
@staticmethod
@exceptionable
def get_pubdate (info):
return convert_rudate(
info.cssselect('.b-review-list .b-entry-info')[0].text_content()\
.strip().split("\n")[0].strip()
)
@staticmethod
@stripped
@exceptionable
def get_author (info):
return info.cssselect('.b-review-list .user h3 a')[0].text_content()
@staticmethod
@exceptionable
def get_score (info):
line = info.cssselect('.b-review-list .b-rating em.mask')[0].get('title')
return int(line[8:-5]) if u':' in line else 0
def __init__ (self, output = None):
csv_header = ('URL', 'Artist', 'Album', 'Label', 'Release Year', 'Publication date', 'Author', 'Score')
super(AfishaReviews, self).__init__(
'afisha.ru',
'/cd/cd_list/page{0}/sortbyalpha/',
output = output,
csv_header = csv_header
)
self.task_name = '{0} reviews'.format(self.domain)
self.css_content = '#content'
def get_page_data (self, url, content):
artist = self.get_artist(content)
album = self.get_album(content)
label = self.get_label(content)
release_year = self.get_release_year(content)
pub_date = self.get_pubdate(content)
author = self.get_author(content)
score = self.get_score(content)
return (url, artist, album, label, release_year, pub_date, author, score)
def get_elements (self, document):
for el in document.cssselect('#objects-list .places-list-item'):
if len(el.cssselect('a')) > 1: # как правило, если там есть 2 ссылки (альбом и лейбл), то рецензия не битая
yield el.cssselect('h3 a')[0].get('href')
def get_pagecount (self):
handler = urlopen(self.page_counter.construct_url(1))
pagination = parser(handler.read()).cssselect('#ctl00_CenterPlaceHolder_ucPager_LastPageLink')[0]
handler.close()
return int(pagination.text)
AfishaReviews.register()
| [
"nineseconds@yandex.ru"
] | nineseconds@yandex.ru |
85c1bb8420286b68f79239dbbeddde3f837944e1 | 1ef558babdbbf4b45d40b1ab63222f8800e4bd21 | /SConstruct | 5bed8a6a10526bcc0c035abb66ffa0ee5ee7daa2 | [] | no_license | maroxe/maroxe3d | e74aac4256d43d1331b950154172c275458d5812 | f83bb68c4783db6669460542392c8108594a95d0 | refs/heads/master | 2020-05-31T17:44:23.428060 | 2011-03-07T18:47:22 | 2011-03-07T18:47:22 | 1,451,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | env = Environment(CPPPATH = ['.'])
libs = [
'Irrlicht',
'BulletDynamics',
'BulletCollision',
'LinearMath',
]
files = Glob('*cpp');
env.Program(
'myprogram',
files,
LIBS=libs
)
Repository('/usr/include/bullet/')
| [
"bachir009@gmail.com"
] | bachir009@gmail.com | |
1ac92e7c1cd1ada7a4efc691a7fceb9c9662e58c | bef33b66de0645c60981ce81d1c59b5edfed28da | /edwin_groundwater_model/modflow_offline.py | f0a1060b995ff4e90b46673f640ed3221c663ff3 | [] | no_license | edwinkost/rahasia_edwin | 4b4a09b80581406a355f3346d433b44920737ee7 | 1e9b85c07c0794267f3ba5fb366cffb3c34f722b | refs/heads/master | 2023-03-17T20:36:56.497559 | 2023-03-04T14:14:59 | 2023-03-04T14:14:59 | 33,725,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | import os
import sys
import math
import gc
import pcraster as pcr
import virtualOS as vos
import groundwater_MODFLOW
import logging
logger = logging.getLogger(__name__)
'''
Created on May 20, 2015
@author: Edwin H. Sutanudjaja
'''
class ModflowOfflineCoupling(object):
def __init__(self, configuration, currTimeStep):
self._configuration = configuration
self._modelTime = currTimeStep
pcr.setclone(configuration.cloneMap)
# read the ldd map
self.lddMap = vos.netcdf2PCRobjCloneWithoutTime(configuration.modflowParameterOptions['channelNC'],'lddMap',\
configuration.cloneMap)
# ensure ldd map is correct, and actually of type "ldd"
self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))
# defining the landmask map
if configuration.globalOptions['landmask'] != "None":
self.landmask = vos.readPCRmapClone(\
configuration.globalOptions['landmask'],
configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'])
else:
self.landmask = pcr.defined(self.lddMap)
# preparing the sub-model(s) - Currently, there is only one sub-model.
self.createSubmodels()
@property
def configuration(self):
return self._configuration
def createSubmodels(self):
# initializing sub modules
self.modflow = groundwater_MODFLOW.GroundwaterModflow(self.configuration,\
self.landmask)
def dumpState(self, outputDirectory, timeStamp = "Default"):
#write all state to disk to facilitate restarting
state = self.getState()
groundWaterState = state['groundwater']
# time stamp used as part of the file name:
if timeStamp == "Default": timeStamp = str(self._modelTime.fulldate)
for variable, map in groundWaterState.iteritems():
vos.writePCRmapToDir(\
map,\
str(variable)+"_"+
timeStamp+".map",\
outputDirectory)
def getState(self):
result = {}
result['groundwater'] = self.modflow.getState()
return result
def update(self):
logger.info("Updating model for time %s", self._modelTime)
self.modflow.update(self._modelTime)
# save/dump states at the end of the month or at the end of model simulation
if self._modelTime.isLastDayOfMonth() or self._modelTime.isLastTimeStep():
logger.info("Save or dump states to pcraster maps for time %s to the directory %s", self._modelTime, self._configuration.endStateDir)
self.dumpState(self._configuration.endStateDir)
def get_initial_heads(self):
logger.info("Get initial head values (based on a steady-state simulation or a pre-defined pcraster map.")
self.modflow.get_initial_heads()
# save/dump states used as the initial conditions
logger.info("Save/dump states of the initial conitions used to pcraster maps to the directory %s", self._configuration.endStateDir)
self.dumpState(outputDirectory = self._configuration.endStateDir,\
timeStamp = self._configuration.globalOptions['startTime']+".ini")
| [
"edwin@workstationedwin.geo.uu.nl"
] | edwin@workstationedwin.geo.uu.nl |
5f4e3f9359c4b587f02c13d234bc79142af780ce | df000d3eff14f4c4714d688a9173672303672e4e | /olpc-sugar/network.py | 0e25d731ad27a634b2112efe238824e4520cbef8 | [] | no_license | dannyiland/OLPC-Mesh-Messenger | bd370fcb2fb78d5ce2dfb5acf812c62ddc0e4418 | 6cc17391420b05fe64966a6c8b7ac709c0c0e7a2 | refs/heads/master | 2021-01-01T18:23:07.980696 | 2012-04-13T11:37:36 | 2012-04-13T11:37:36 | 3,792,600 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,365 | py | # Copyright (C) 2006-2007 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
STABLE.
"""
import os
import threading
import urllib
import fcntl
import tempfile
import gobject
import SimpleHTTPServer
import SocketServer
__authinfos = {}
def _add_authinfo(authinfo):
__authinfos[threading.currentThread()] = authinfo
def get_authinfo():
return __authinfos.get(threading.currentThread())
def _del_authinfo():
del __authinfos[threading.currentThread()]
class GlibTCPServer(SocketServer.TCPServer):
"""GlibTCPServer
Integrate socket accept into glib mainloop.
"""
allow_reuse_address = True
request_queue_size = 20
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address,
RequestHandlerClass)
self.socket.setblocking(0) # Set nonblocking
# Watch the listener socket for data
gobject.io_add_watch(self.socket, gobject.IO_IN, self._handle_accept)
def _handle_accept(self, source, condition):
"""Process incoming data on the server's socket by doing an accept()
via handle_request()."""
if not (condition & gobject.IO_IN):
return True
self.handle_request()
return True
def close_request(self, request):
"""Called to clean up an individual request."""
# let the request be closed by the request handler when its done
pass
class ChunkedGlibHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""RequestHandler class that integrates with Glib mainloop. It writes
the specified file to the client in chunks, returning control to the
mainloop between chunks.
"""
CHUNK_SIZE = 4096
def __init__(self, request, client_address, server):
self._file = None
self._srcid = 0
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, server)
def log_request(self, code='-', size='-'):
pass
def do_GET(self):
"""Serve a GET request."""
self._file = self.send_head()
if self._file:
self._srcid = gobject.io_add_watch(self.wfile, gobject.IO_OUT |
gobject.IO_ERR,
self._send_next_chunk)
else:
self._cleanup()
def _send_next_chunk(self, source, condition):
if condition & gobject.IO_ERR:
self._cleanup()
return False
if not (condition & gobject.IO_OUT):
self._cleanup()
return False
data = self._file.read(self.CHUNK_SIZE)
count = os.write(self.wfile.fileno(), data)
if count != len(data) or len(data) != self.CHUNK_SIZE:
self._cleanup()
return False
return True
def _cleanup(self):
if self._file:
self._file.close()
self._file = None
if self._srcid > 0:
gobject.source_remove(self._srcid)
self._srcid = 0
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
def finish(self):
"""Close the sockets when we're done, not before"""
pass
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
** [dcbw] modified to send Content-disposition filename too
"""
path = self.translate_path(self.path)
if not path or not os.path.exists(path):
self.send_error(404, "File not found")
return None
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(os.fstat(f.fileno())[6]))
self.send_header("Content-Disposition", 'attachment; filename="%s"' %
os.path.basename(path))
self.end_headers()
return f
class GlibURLDownloader(gobject.GObject):
"""Grabs a URL in chunks, returning to the mainloop after each chunk"""
__gsignals__ = {
'finished': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT])),
'error': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'progress': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT]))
}
CHUNK_SIZE = 4096
def __init__(self, url, destdir=None):
self._url = url
if not destdir:
destdir = tempfile.gettempdir()
self._destdir = destdir
self._srcid = 0
self._fname = None
self._outf = None
self._suggested_fname = None
self._info = None
self._written = 0
gobject.GObject.__init__(self)
def start(self, destfile=None, destfd=None):
self._info = urllib.urlopen(self._url)
self._outf = None
self._fname = None
if destfd and not destfile:
raise ValueError("Must provide destination file too when" \
"specifying file descriptor")
if destfile:
self._suggested_fname = os.path.basename(destfile)
self._fname = os.path.abspath(os.path.expanduser(destfile))
if destfd:
# Use the user-supplied destination file descriptor
self._outf = destfd
else:
self._outf = os.open(self._fname, os.O_RDWR |
os.O_TRUNC | os.O_CREAT, 0644)
else:
fname = self._get_filename_from_headers(self._info.headers)
self._suggested_fname = fname
garbage_, path = urllib.splittype(self._url)
garbage_, path = urllib.splithost(path or "")
path, garbage_ = urllib.splitquery(path or "")
path, garbage_ = urllib.splitattr(path or "")
suffix = os.path.splitext(path)[1]
(self._outf, self._fname) = tempfile.mkstemp(suffix=suffix,
dir=self._destdir)
fcntl.fcntl(self._info.fp.fileno(), fcntl.F_SETFD, os.O_NDELAY)
self._srcid = gobject.io_add_watch(self._info.fp.fileno(),
gobject.IO_IN | gobject.IO_ERR,
self._read_next_chunk)
def cancel(self):
if self._srcid == 0:
raise RuntimeError("Download already canceled or stopped")
self.cleanup(remove=True)
def _get_filename_from_headers(self, headers):
if not headers.has_key("Content-Disposition"):
return None
ftag = "filename="
data = headers["Content-Disposition"]
fidx = data.find(ftag)
if fidx < 0:
return None
fname = data[fidx+len(ftag):]
if fname[0] == '"' or fname[0] == "'":
fname = fname[1:]
if fname[len(fname)-1] == '"' or fname[len(fname)-1] == "'":
fname = fname[:len(fname)-1]
return fname
def _read_next_chunk(self, source, condition):
if condition & gobject.IO_ERR:
self.cleanup(remove=True)
self.emit("error", "Error downloading file.")
return False
elif not (condition & gobject.IO_IN):
# shouldn't get here, but...
return True
try:
data = self._info.fp.read(self.CHUNK_SIZE)
count = os.write(self._outf, data)
self._written += len(data)
# error writing data to file?
if count < len(data):
self.cleanup(remove=True)
self.emit("error", "Error writing to download file.")
return False
self.emit("progress", self._written)
# done?
if len(data) < self.CHUNK_SIZE:
self.cleanup()
self.emit("finished", self._fname, self._suggested_fname)
return False
except Exception, err:
self.cleanup(remove=True)
self.emit("error", "Error downloading file: %s" % err)
return False
return True
def cleanup(self, remove=False):
if self._srcid > 0:
gobject.source_remove(self._srcid)
self._srcid = 0
del self._info
self._info = None
os.close(self._outf)
if remove:
os.remove(self._fname)
self._outf = None
| [
"dannyiland@gmail.com"
] | dannyiland@gmail.com |
9819bf6e91ec963b55aa396ebec70a5559039d37 | 0896bfbf84c6fdf33b5d2790f633d5dab9d5b386 | /blog/urls.py | a74813ea33b354b991217b78c9dd4bfd6f3035d3 | [] | no_license | wlsdnrsmell/HelloKorea | b88f788a04f126c140e3e2b4595558fea52af8ef | e3d9a61c60c34b891d82136abe9287f693c1d03e | refs/heads/master | 2021-01-01T05:18:46.238950 | 2016-04-28T03:42:12 | 2016-04-28T03:42:12 | 57,102,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.post_list, name = 'post_list'),
#문자열 표현하는 정결표현
url(r'^(?P<pk>\d+)/$',views.post_detail, name = 'post_detail'),
] | [
"jin3670@gmail.com"
] | jin3670@gmail.com |
bbb28e56d8c01176b8a73e2d3a56b3d1b959658e | 56d5b5ec8b528e7518d47e7704bd0c93fee2682f | /genomics/conversion/containment2csv.py | 9f321407adf83d7c73078da587af9b7db985363c | [] | no_license | sethips/scripts | 4c55a123d72bae475dad961802d2ee3f21d3493d | 58ce36f30391531d7add6ff412e8aa86cc2ffb82 | refs/heads/master | 2020-04-01T18:30:08.030427 | 2016-01-16T06:27:51 | 2016-01-16T06:27:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
# ----------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--containment', required=True)
parser.add_argument('-s', '--csv', required=True)
args = parser.parse_args()
# ----------------------------------------------------------------------------
# load containment
containment = dict()
with open(args.containment) as f:
for line in f:
fields = line.strip().split()
ctg, chrom, start, end = fields
if ctg not in containment:
containment[ctg] = list()
containment[ctg].append('%s-%s-%s' % (chrom, start, end))
out = open(args.csv, 'w')
out.write('Node name,Containment Label')
for ctg, matches in containment.iteritems():
matches_str = ':'.join(matches)
out.write('%s,%s\n' % (ctg, matches_str))
| [
"kuleshov@stanford.edu"
] | kuleshov@stanford.edu |
be91097f8dd7ad8ebf07bb6e5d0d496ec1966715 | 2010834b349c0a0c99e675c9b4b10b3f7e6d6a1d | /interface_django/interface_django/settings.py | 8189de7cbb085fab1ff68cb61f8ef6ab42cf7d5f | [] | no_license | gperroch/Interface_Informations_gatherer | 40903fae8d5f0564dbb38c740d4601a98566d09d | 55f21772ed8f270cd952caf8e74b99c9046d7f9b | refs/heads/master | 2021-06-14T18:48:05.905200 | 2017-03-25T17:57:12 | 2017-03-25T17:57:12 | 86,177,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,237 | py | """
Django settings for interface_django project.
Generated by 'django-admin startproject' using Django 1.11.dev20160717191839.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tpq)tyc-a&$m6x)p3&0+!-$a9d=_#em@qg_2^1eq(e8np*_n=o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'informations_gatherer.apps.InformationsGathererConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'interface_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'interface_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'informations_gatherer',
'USER': 'django',
'PASSWORD': 'thisisnotafuckingjoke'
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| [
"gperroch@student.42.fr"
] | gperroch@student.42.fr |
3ab12b9e1c9dbf14a106add3cb2c61501846d75f | 2f18669c3103856e230ad6e6016a6cc4cb2c8d6d | /module.py | 476bccb5113d636c1c82d04790025b6dabbe7ef9 | [] | no_license | mtsatsev/new-devices-lab | 58c942b08044ac1eb583e83d19fb6b560713a042 | da3920a5cd0008446efe12908c630bf99cad20e1 | refs/heads/master | 2022-10-09T20:10:53.152100 | 2020-06-09T00:15:58 | 2020-06-09T00:15:58 | 267,870,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import numpy as np
import cv2
class Device():
def __init__(self):
self.device = 0
self.capture = cv2.VideoCapture(self.device)
if not(self.capture.isOpened()):
print("I failed")
self.capture.open(self.device)
if self.capture.isOpened():
print("HEYLYA")
while True:
ret,frame = self.capture.read()
cv2.imshow("str", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.capture.release()
cv2.destroyAllWindows()
def main():
device = Device()
main()
| [
"mario.tsatsev@student.ru.nl"
] | mario.tsatsev@student.ru.nl |
b0d6022b68e4134075b56f5041dbad99d551e348 | 45803e79987429bc3685d9bd3a79e124631a9dad | /read_data.py | bdf7fe654a934d85b97b45fcafafd36deb4c77cd | [] | no_license | qiuyumo208/NSCaching | ec1a56b25422693f156a2268c12e2a60afe592fb | 183da2023be6a654efa7a2e147939b3831873a49 | refs/heads/master | 2020-05-17T07:02:06.988134 | 2019-03-25T05:43:10 | 2019-03-25T05:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,582 | py | import os
import torch
import numpy as np
from collections import defaultdict
class DataLoader:
def __init__(self, task_dir, n_sample):
self.inPath = task_dir
self.n_sample = n_sample
print("The toolkit is importing datasets.\n")
with open(os.path.join(self.inPath, "relation2id.txt")) as f:
tmp = f.readline()
self.n_rel = int(tmp.strip())
print("The total of relations is {}".format(self.n_rel))
with open(os.path.join(self.inPath, "entity2id.txt")) as f:
tmp = f.readline()
self.n_ent = int(tmp.strip())
print("The total of entities is {}".format(self.n_ent))
self.train_head, self.train_tail, self.train_rela = self.read_data("train2id.txt")
self.valid_head, self.valid_tail, self.valid_rela = self.read_data("valid2id.txt")
self.test_head, self.test_tail, self.test_rela = self.read_data("test2id.txt")
def read_data(self, filename):
allList = []
head = []
tail = []
rela = []
with open(os.path.join(self.inPath, filename)) as f:
tmp = f.readline()
total = int(tmp.strip())
for i in range(total):
tmp = f.readline()
h, t, r = tmp.strip().split()
h, t, r = int(h), int(t), int(r)
allList.append((h, t, r))
allList.sort(key=lambda l:(l[0], l[1], l[2]))
head.append(allList[0][0])
tail.append(allList[0][1])
rela.append(allList[0][2])
for i in range(1, total):
if allList[i] != allList[i-1]:
h, t, r = allList[i]
head.append(h)
tail.append(t)
rela.append(r)
return head, tail, rela
def graph_size(self):
return (self.n_ent, self.n_rel)
def load_data(self, index):
if index == 'train':
return self.train_head, self.train_tail, self.train_rela
elif index == 'valid':
return self.valid_head, self.valid_tail, self.valid_rela
else:
return self.test_head, self.test_tail, self.test_rela
def heads_tails(self):
all_heads = self.train_head + self.valid_head + self.test_head
all_tails = self.train_tail + self.valid_tail + self.test_tail
all_relas = self.train_rela + self.valid_rela + self.test_rela
heads = defaultdict(lambda: set())
tails = defaultdict(lambda: set())
for h, t, r in zip(all_heads, all_tails, all_relas):
tails[(h, r)].add(t)
heads[(t, r)].add(h)
heads_sp = {}
tails_sp = {}
for k in heads.keys():
heads_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(heads[k])]),
torch.ones(len(heads[k])), torch.Size([self.n_ent]))
for k in tails.keys():
tails_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(tails[k])]),
torch.ones(len(tails[k])), torch.Size([self.n_ent]))
print("heads/tails size:", len(tails), len(heads))
return heads_sp, tails_sp
def get_cache_list(self):
head_cache = {}
tail_cache = {}
head_pos = []
tail_pos = []
head_idx = []
tail_idx = []
count_h = 0
count_t = 0
for h, t, r in zip(self.train_head, self.train_tail, self.train_rela):
if not head_cache.__contains__((t,r)):
head_cache[(t,r)] = count_h
head_pos.append([h])
count_h += 1
else:
head_pos[head_cache[(t,r)]].append(h)
if not tail_cache.__contains__((h,r)):
tail_cache[(h,r)] = count_t
tail_pos.append([t])
count_t += 1
else:
tail_pos[tail_cache[(h,r)]].append(t)
head_idx.append(head_cache[(t,r)])
tail_idx.append(tail_cache[(h,r)])
head_idx = np.array(head_idx, dtype=int)
tail_idx = np.array(tail_idx, dtype=int)
head_cache = np.random.randint(low=0, high=self.n_ent, size=(count_h, self.n_sample))
tail_cache = np.random.randint(low=0, high=self.n_ent, size=(count_t, self.n_sample))
print('head/tail_idx: head/tail_cache', len(head_idx), len(tail_idx), head_cache.shape, tail_cache.shape, len(head_pos), len(tail_pos))
return head_idx, tail_idx, head_cache, tail_cache, head_pos, tail_pos
| [
"yzhangee@connect.ust.hk"
] | yzhangee@connect.ust.hk |
738cbf07d2935862dba2c498ee1e47730367a1c5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03694/s635934204.py | 90dc16375dd7f23d2ceba7c002a5c596fdaaa5cd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | n = int(input())
list_a = sorted((int(n) for n in input().split()))
print(list_a[-1] - list_a[0]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d09f709088884cf99d562490eeb1218f436d42db | 175813b661f6ec1b8403fda2beb3ec42365a0501 | /Algorithms/026-删除有序数组中的重复项.py | 63c52c6aee0dedef7155a7d4a7a90becd688bbbe | [] | no_license | jty-cu/LeetCode | 21eccc9958e590f8093a5a47d853727b45ca94a5 | 16aed82c76c9e9adc8e04ce0730160601f8f17a4 | refs/heads/master | 2023-05-26T11:58:12.887504 | 2023-05-25T05:48:34 | 2023-05-25T05:48:34 | 287,852,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | ## 数组;快慢指针
'''
注意快慢指针是怎么使用的
快慢指针进行赋值操作时, index变换的顺序
'''
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
slow, fast = 0, 0
while fast < len(nums):
if nums[slow] == nums[fast]:
fast += 1
else:
## 注意顺序
slow += 1
nums[slow] = nums[fast]
fast += 1
return slow+1
| [
"eden1519@outlook.com"
] | eden1519@outlook.com |
ccb789e24d4482568e50f13c82c13a90c42ef5ca | e81026ef08ccb6109fbe0ac44db3aed9d86bb196 | /src/app/pyserver_development/tests/packing_advisor_gravity_check_tests.py | 6762128231cc8a39662fe474b60f96bb3dee92ab | [] | no_license | sheikhomar/axolotl | a6573a11875938b3017583a7c31c81e7353992fe | ccf80aa16bf5feec7a3bfc9ecc8ab06c7074f15d | refs/heads/master | 2021-03-30T17:27:11.764826 | 2016-12-20T18:38:07 | 2016-12-20T18:38:07 | 67,419,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | import unittest
from lib import Bin
from lib import Package
from lib import PackingAdvisor
class PackingAdvisorGravityCheckTest(unittest.TestCase):
def test_gravity_check(self):
bin1 = Bin(4,4,3)
pa = PackingAdvisor(bin1)
p1 = Package(2, 2)
p2 = Package(4, 2)
p3 = Package(4, 2)
p4 = Package(4, 2)
pa.handle(p1)
pa.handle(p2)
pa.bins[0].new_layer()
pa.x = 0
pa.y = 0
self.assertFalse(pa.calc_gravity(pa.bins[0].current_layer, p3) > 0.75)
pa.handle(p3)
pa.bins[0].new_layer()
pa.x = 2
pa.y = 0
self.assertTrue(pa.calc_gravity(pa.bins[0].current_layer, p4) > 0.75)
def test_gravity_capable_of_opening_new_layers(self):
bin1 = Bin(4,4,4)
p1 = Package(4,2)
p2 = Package(4,2)
p3 = Package(4,2)
p4 = Package(width=4, length=2, colour=3)
p5 = Package(4,2)
p6 = Package(4,2)
pa = PackingAdvisor(bin1)
pa.handle(p1)
pa.handle(p2)
pa.handle(p3)
pa.handle(p4)
pa.handle(p5)
pa.handle(p6)
self.assertEqual(pa.bins[0].current_layer.find_layer_number(), 4)
| [
"Frederik_KKristensen@hotmail.com"
] | Frederik_KKristensen@hotmail.com |
2e00f22fba30e14fcd9fcdefa4a10f3e22b18248 | 58553afdd5dd51b5b2a84abb536a3b5c20c5e19e | /Decider/utils.py | 210433aefe249065ba9da482ae1b7ad2832dcc4a | [] | no_license | magentanova/decider | 819ad4fe4529de48187f7afdbcb8f7aa2ca8163b | 4e73f47ddb4a6b7d07c457cb4bee77794490a056 | refs/heads/master | 2020-04-08T16:48:16.341117 | 2018-11-30T04:55:14 | 2018-11-30T04:55:14 | 159,535,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | def serialize_query_result(ClassName):
return map(ClassName.to_dict, ClassName.query.all()) | [
"richards.justind@gmail.com"
] | richards.justind@gmail.com |
9442cca3e5807f52ba4a9fe7804330d87d241097 | c0425cb3c67b0f9e9077475d70e463c06aa176f6 | /decompress_compressed_string.py | 3a54dbe729043c8cc29371a643f7036455eeb061 | [] | no_license | EugeneStill/PythonCodeChallenges | e3eba938c74cb64347843cb7e8c8937b6d50aed8 | 4ddeb506f984503d83cb0ad1a2fa2e915009c38f | refs/heads/main | 2023-04-29T00:26:26.537946 | 2023-04-15T01:18:33 | 2023-04-15T01:18:33 | 145,318,074 | 0 | 1 | null | 2023-04-15T01:18:34 | 2018-08-19T16:09:34 | Python | UTF-8 | Python | false | false | 878 | py | import unittest
class Decompress(unittest.TestCase):
"""
Your input is a compressed string of the format number[string] and the decompressed output form should be the
string written number times. For example:
3[abc]4[ab]c
Would be output as
abcabcabcababababc
Number can have more than one digit. For example, 10[a] is allowed, and just means aaaaaaaaaa
One repetition can occur inside another. For example, 2[3[a]b] decompresses into aaabaaab
Characters allowed as input include digits, small English letters and brackets [ ].
Digits are only to represent amount of repetitions.
Letters are just letters.
Brackets are only part of syntax of writing repeated substring.
Input is always valid, so no need to check its validity.
"""
def restore_original_str(self, cs):
print("work on this later")
| [
"eugenestill@eugenes-mbp.lan"
] | eugenestill@eugenes-mbp.lan |
941cc73462d265c18ea95e53a1fa2f7fd4a76a01 | fda7d21dea4a6c5c478804cf3664729c64820176 | /rpmci/cloudinit.py | 669b31e409dca273e55144cf076e5871307c75e8 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | msehnout/rpmci | 8c3eb4d1865c0c85dac0730294911a3dab6c5048 | e679226951e5a1a377c32853a78cd2c534a67fea | refs/heads/main | 2023-04-07T10:39:13.335099 | 2020-12-07T10:52:38 | 2020-12-07T10:52:38 | 301,734,388 | 0 | 0 | null | 2020-10-06T13:22:39 | 2020-10-06T13:22:39 | null | UTF-8 | Python | false | false | 5,891 | py | import base64
import logging
import pathlib
import subprocess
from pathlib import Path
import yaml
class CloudInit:
def __init__(self):
self.repos = {}
self.user = {}
self.ssh_keypair = None
self.ssh_configs = {}
def add_repo(self, name: str, baseurl: str):
self.repos[name] = {
"name": name,
"baseurl": baseurl,
"enabled": True,
"gpgcheck": False,
}
return self
def set_user(self, username: str, password: str, ssh_pubkey: str):
self.user["user"] = username
self.user["password"] = password
self.user["ssh_authorized_keys"] = [ssh_pubkey]
self.user["sudo"] = "ALL=(ALL) NOPASSWD:ALL"
return self
def add_ssh_key_pair(self, public_key: str, private_key: str):
self.ssh_keypair = {
"public_key": public_key,
"private_key": private_key,
}
return self
def add_ssh_config(self, host_alias: str, hostname: str, port: int, username: str):
"""Create new entry in /etc/ssh_config.
Parameters
----------
host_alias The name that the user can use to connect to this machine.
hostname Domain name or IP address of the machine.
port Port where the SSH daemon listens.
username User with known password or SSH key.
"""
self.ssh_configs[host_alias] = {
"HostName": hostname,
"Port": port,
"User": username,
"IdentityFile": "/etc/ssh/id_rsa",
"StrictHostKeyChecking": "no",
}
return self
def get_userdata_str(self):
write_files = []
user_data = {}
if len(self.repos.keys()) > 0:
user_data["yum_repos"] = self.repos
if len(self.user.keys()) > 0:
user_data["chpasswd"] = {
"expire": False,
}
user_data["ssh_pwauth"] = True
for k,v in self.user.items():
user_data[k] = v
if self.ssh_keypair is not None:
write_files += [
{
"path": "/etc/ssh/id_rsa.pub",
"encoding": "b64",
"content": base64.b64encode(self.ssh_keypair["public_key"].encode("utf-8")).decode("utf-8"),
"permissions": "0644",
},
{
"path": "/etc/ssh/id_rsa",
"encoding": "b64",
"content": base64.b64encode(self.ssh_keypair["private_key"].encode("utf-8")).decode("utf-8"),
"permissions": "0644",
}
]
if len(self.ssh_configs.keys()) > 0:
ssh_config_content = ""
for host, config in self.ssh_configs.items():
ssh_config_content += f"Host {host}\n"
for k,v in config.items():
ssh_config_content += f" {k} {v}\n"
write_files += [
{
"path": "/etc/ssh/ssh_config",
"encoding": "b64",
"content": base64.b64encode(ssh_config_content.encode("utf-8")).decode("utf-8"),
"permissions": "0644",
}
]
if len(write_files) > 0:
user_data["write_files"] = write_files
user_data_str = yaml.dump(user_data, Dumper=yaml.SafeDumper)
return f"#cloud-config\n{user_data_str}"
@staticmethod
def _write_userdata_file(filename: Path, content: str):
"""Write user-data file for cloud-init."""
logging.info("Writing user-data file")
with open(filename, "w") as f:
f.write(content)
@staticmethod
def _write_metadata_file(filename: Path, vm_name: str):
"""Write meta-data file for cloud-init."""
logging.info("Writing meta-data file")
with open(filename, "w") as f:
print("instance-id: nocloud", file=f)
print(f"local-hostname: {vm_name}", file=f)
def get_iso(self, cache_dir: Path, vm_name: str) -> Path:
logging.info("Generating cloud-init ISO file")
cloudinit_file = cache_dir.joinpath(f"{vm_name}.iso")
userdata_file = cache_dir.joinpath("user-data")
self._write_userdata_file(userdata_file, self.get_userdata_str())
metadata_file = cache_dir.joinpath("meta-data")
self._write_metadata_file(metadata_file, vm_name)
# Create an ISO that cloud-init can consume with userdata.
subprocess.run(["genisoimage",
"-quiet",
"-input-charset", "utf-8",
"-output", cloudinit_file,
"-volid", "cidata",
"-joliet",
"-rock",
"-quiet",
"-graft-points",
str(userdata_file),
str(metadata_file)],
check=True)
return pathlib.Path(cloudinit_file)
def test_CloudInit_get_userdata_str():
cloudinit = CloudInit()
cloudinit.add_repo("osbuild", "osbuild.org")
cloudinit.set_user("admin", "foobar", "abc")
cloudinit.add_ssh_key_pair("pubkey", "privkey")
cloudinit.add_ssh_config("target", "127.0.0.1", 2222, "admin")
resulting_string = cloudinit.get_userdata_str()
loaded_yaml = yaml.load(resulting_string, Loader=yaml.SafeLoader)
assert loaded_yaml["users"][0]["user"] == "admin"
generated_ssh_config = base64.b64decode(loaded_yaml["write_files"][2]["content"].encode("utf-8")).decode("utf-8")
expected_ssh_config = """Host target
HostName 127.0.0.1
Port 2222
User admin
IdentityFile /etc/ssh/id_rsa
StrictHostKeyChecking no
"""
assert generated_ssh_config == expected_ssh_config
| [
"sehnoutka.martin@gmail.com"
] | sehnoutka.martin@gmail.com |
6764cba22e307454492da2ab7d46f84b4f731f33 | e11dc1b8affc293ff4eb38b5506c7a43f080441a | /multipage.py | 7b30c0398070ad8c9ef410e7499314c514b31f52 | [] | no_license | YYx00xZZ/olxer | c8231b838630f69e9b4e3547e33ce0570b1b64fd | 2d26d79974c1ad37de662a537897d0d747e453c1 | refs/heads/master | 2022-04-19T21:29:28.603619 | 2020-04-21T06:00:24 | 2020-04-21T06:00:24 | 256,202,863 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | # import click
# import requests
# import random
# import logging
# from time import sleep
# from itertools import cycle
# # from bs4 import BeautifulSoup
# from fake_useragent import UserAgent, FakeUserAgentError
from helpers import random_header, get_proxies, create_pools, getSoup
def getPages(link): #remove soup
""" Fetch link for all pages """
soup = getSoup(link)
if (soup.find('div', class_='pager rel clr')) != None:
lpage = soup.find(attrs={"data-cy" : "page-link-last"}).span.text
links = [link+f'/?page={p}' for p in range(2,int(lpage))]
pages = (page for page in links)
return pages
else:
pass | [
"47386361+YYx00xZZ@users.noreply.github.com"
] | 47386361+YYx00xZZ@users.noreply.github.com |
33aba823f1d698ffa709894f8619f24b983a7bcb | 3429ae58db1573fd67fe1b71adb88f33e33c567c | /剑指20-表示数值的字符串/solution.py | d4c74d985271d53029b7e2cc31c779d6a0064a3e | [
"MIT"
] | permissive | zhikunluo175/daily-leetcode | 3855197c21554705e4d059cddb5a19eed1e348d7 | 03532ad697562495724f579e33dacc087711eea4 | refs/heads/master | 2022-12-18T14:34:04.439281 | 2020-09-14T02:25:20 | 2020-09-14T02:25:20 | 292,040,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | # coding=utf-8
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
try:
s_f = float(s)
except ValueError as e:
return False
return True
| [
"zhikun.luo@train042.hogpu.cc"
] | zhikun.luo@train042.hogpu.cc |
3984879dca7b2eb8a533b2a2e27f2befe03e82e2 | 82f1b4c0bccd66933f93d02703a3948f08ebc1a9 | /tests/pytests/unit/states/test_boto_dynamodb.py | b8a59a2e6adb3ee1d5b4ce7fa5429b580ab4667b | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | waynew/salt | ddb71301944b64f5429e0dbfeccb0ea873cdb62d | ac9f139f795295de11be3fb1490ab8cec29611e5 | refs/heads/master | 2023-01-24T10:43:53.104284 | 2022-03-29T04:27:22 | 2022-03-29T13:45:09 | 163,890,509 | 1 | 0 | Apache-2.0 | 2019-01-02T21:17:12 | 2019-01-02T21:17:11 | null | UTF-8 | Python | false | false | 3,478 | py | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.states.boto_dynamodb as boto_dynamodb
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {boto_dynamodb: {}}
def test_present():
"""
Test to ensure the DynamoDB table exists.
"""
name = "new_table"
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
exists_mock = MagicMock(side_effect=[True, False, False])
dict_mock = MagicMock(return_value={})
mock_bool = MagicMock(return_value=True)
pillar_mock = MagicMock(return_value=[])
with patch.dict(
boto_dynamodb.__salt__,
{
"boto_dynamodb.exists": exists_mock,
"boto_dynamodb.describe": dict_mock,
"config.option": dict_mock,
"pillar.get": pillar_mock,
"boto_dynamodb.create_table": mock_bool,
},
):
comt = (
"DynamoDB table {0} exists,\n"
"DynamoDB table {0} throughput matches,\n"
"All global secondary indexes match,\n".format(name)
)
ret.update({"comment": comt})
assert boto_dynamodb.present(name) == ret
with patch.dict(boto_dynamodb.__opts__, {"test": True}):
comt = "DynamoDB table {} would be created.".format(name)
ret.update({"comment": comt, "result": None})
assert boto_dynamodb.present(name) == ret
changes = {
"new": {
"global_indexes": None,
"hash_key": None,
"hash_key_data_type": None,
"local_indexes": None,
"range_key": None,
"range_key_data_type": None,
"read_capacity_units": None,
"table": "new_table",
"write_capacity_units": None,
}
}
with patch.dict(boto_dynamodb.__opts__, {"test": False}):
comt = (
"DynamoDB table {} was successfully created,\n"
"DynamoDB table new_table throughput matches,\n".format(name)
)
ret.update({"comment": comt, "result": True, "changes": changes})
assert ret == boto_dynamodb.present(name)
def test_absent():
"""
Test to ensure the DynamoDB table does not exist.
"""
name = "new_table"
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
mock = MagicMock(side_effect=[False, True, True])
mock_bool = MagicMock(return_value=True)
with patch.dict(
boto_dynamodb.__salt__,
{"boto_dynamodb.exists": mock, "boto_dynamodb.delete": mock_bool},
):
comt = "DynamoDB table {} does not exist".format(name)
ret.update({"comment": comt})
assert boto_dynamodb.absent(name) == ret
with patch.dict(boto_dynamodb.__opts__, {"test": True}):
comt = "DynamoDB table {} is set to be deleted".format(name)
ret.update({"comment": comt, "result": None})
assert boto_dynamodb.absent(name) == ret
changes = {
"new": "Table new_table deleted",
"old": "Table new_table exists",
}
with patch.dict(boto_dynamodb.__opts__, {"test": False}):
comt = "Deleted DynamoDB table {}".format(name)
ret.update({"comment": comt, "result": True, "changes": changes})
assert boto_dynamodb.absent(name) == ret
| [
"megan.wilhite@gmail.com"
] | megan.wilhite@gmail.com |
10c996ab1d3ff3f0d6e4a33e010b34379ce8aa0f | 49ee6a519bf54b83cd30c57b773497272f6c81a8 | /res/sequential.py | b2ccd644518775f5df358eb14b7872cb10201bc0 | [] | no_license | DeepLearningLibrary/Recurrent-Neural-Networks | f106e571d4c0279045c6e2210b3d454263ffba27 | c10588c36401dfbf5798371b21316807ea434685 | refs/heads/main | 2023-02-09T11:31:46.119626 | 2021-01-04T05:15:56 | 2021-01-04T05:15:56 | 326,567,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,025 | py | import numpy as np
import six
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
def to_categorical(y, num_classes=None, dtype='float32'):
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
class EchoData():
def __init__(self, series_length=40000, batch_size=32,
echo_step=3, truncated_length=10, seed=None):
self.series_length = series_length
self.truncated_length = truncated_length
self.n_batches = series_length//truncated_length
self.echo_step = echo_step
self.batch_size = batch_size
if seed is not None:
np.random.seed(seed)
self.x_batch = None
self.y_batch = None
self.x_chunks = []
self.y_chunks = []
self.generate_new_series()
self.prepare_batches()
def __getitem__(self, index):
if index == 0:
self.generate_new_series()
self.prepare_batches()
return self.x_chunks[index], self.y_chunks[index]
def __len__(self):
return self.n_batches
def generate_new_series(self):
x = np.random.choice(
2,
size=(self.batch_size, self.series_length),
p=[0.5, 0.5])
y = np.roll(x, self.echo_step, axis=1)
y[:, 0:self.echo_step] = 0
self.x_batch = x
self.y_batch = y
def prepare_batches(self):
x = np.expand_dims(self.x_batch, axis=-1)
y = np.expand_dims(self.y_batch, axis=-1)
self.x_chunks = np.split(x, self.n_batches, axis=1)
self.y_chunks = np.split(y, self.n_batches, axis=1)
class TemporalOrderExp6aSequence():
"""
From Hochreiter&Schmidhuber(1997):
The goal is to classify sequences. Elements and targets are represented locally
(input vectors with only one non-zero bit). The sequence starts with an E, ends
with a B (the "trigger symbol") and otherwise consists of randomly chosen symbols
from the set {a, b, c, d} except for two elements at positions t1 and t2 that are
either X or Y . The sequence length is randomly chosen between 100 and 110, t1 is
randomly chosen between 10 and 20, and t2 is randomly chosen between 50 and 60.
There are 4 sequence classes Q, R, S, U which depend on the temporal order of X and Y.
The rules are:
X, X -> Q,
X, Y -> R,
Y , X -> S,
Y , Y -> U.
"""
def __init__(self, length_range=(100, 111), t1_range=(10, 21), t2_range=(50, 61),
batch_size=32, seed=None):
self.classes = ['Q', 'R', 'S', 'U']
self.n_classes = len(self.classes)
self.relevant_symbols = ['X', 'Y']
self.distraction_symbols = ['a', 'b', 'c', 'd']
self.start_symbol = 'B'
self.end_symbol = 'E'
self.length_range = length_range
self.t1_range = t1_range
self.t2_range = t2_range
self.batch_size = batch_size
if seed is not None:
np.random.seed(seed)
all_symbols = self.relevant_symbols + self.distraction_symbols + \
[self.start_symbol] + [self.end_symbol]
self.n_symbols = len(all_symbols)
self.s_to_idx = {s: n for n, s in enumerate(all_symbols)}
self.idx_to_s = {n: s for n, s in enumerate(all_symbols)}
self.c_to_idx = {c: n for n, c in enumerate(self.classes)}
self.idx_to_c = {n: c for n, c in enumerate(self.classes)}
def generate_pair(self):
length = np.random.randint(self.length_range[0], self.length_range[1])
t1 = np.random.randint(self.t1_range[0], self.t1_range[1])
t2 = np.random.randint(self.t2_range[0], self.t2_range[1])
x = np.random.choice(self.distraction_symbols, length)
x[0] = self.start_symbol
x[-1] = self.end_symbol
y = np.random.choice(self.classes)
if y == 'Q':
x[t1], x[t2] = self.relevant_symbols[0], self.relevant_symbols[0]
elif y == 'R':
x[t1], x[t2] = self.relevant_symbols[0], self.relevant_symbols[1]
elif y == 'S':
x[t1], x[t2] = self.relevant_symbols[1], self.relevant_symbols[0]
else:
x[t1], x[t2] = self.relevant_symbols[1], self.relevant_symbols[1]
return ''.join(x), y
# encoding/decoding single instance version
def encode_x(self, x):
idx_x = [self.s_to_idx[s] for s in x]
return to_categorical(idx_x, num_classes=self.n_symbols)
def encode_y(self, y):
idx_y = self.c_to_idx[y]
return to_categorical(idx_y, num_classes=self.n_classes)
def decode_x(self, x):
x = x[np.sum(x, axis=1) > 0] # remove padding
return ''.join([self.idx_to_s[pos] for pos in np.argmax(x, axis=1)])
def decode_y(self, y):
return self.idx_to_c[np.argmax(y)]
# encoding/decoding batch versions
def encode_x_batch(self, x_batch):
return pad_sequences([self.encode_x(x) for x in x_batch],
maxlen=self.length_range[1])
def encode_y_batch(self, y_batch):
return np.array([self.encode_y(y) for y in y_batch])
def decode_x_batch(self, x_batch):
return [self.decode_x(x) for x in x_batch]
def decode_y_batch(self, y_batch):
return [self.idx_to_c[pos] for pos in np.argmax(y_batch, axis=1)]
def __len__(self):
""" Let's assume 1000 sequences as the size of data. """
return int(1000. / self.batch_size)
def __getitem__(self, index):
batch_x, batch_y = [], []
for _ in range(self.batch_size):
x, y = self.generate_pair()
batch_x.append(x)
batch_y.append(y)
return self.encode_x_batch(batch_x), self.encode_y_batch(batch_y)
class DifficultyLevel:
""" On HARD, settings are identical to the original settings from the '97 paper."""
EASY, NORMAL, MODERATE, HARD, NIGHTMARE = range(5)
@staticmethod
def get_predefined_generator(difficulty_level, batch_size=32, seed=8382):
EASY = TemporalOrderExp6aSequence.DifficultyLevel.EASY
NORMAL = TemporalOrderExp6aSequence.DifficultyLevel.NORMAL
MODERATE = TemporalOrderExp6aSequence.DifficultyLevel.MODERATE
HARD = TemporalOrderExp6aSequence.DifficultyLevel.HARD
if difficulty_level == EASY:
length_range = (7, 9)
t1_range = (1, 3)
t2_range = (4, 6)
elif difficulty_level == NORMAL:
length_range = (30, 41)
t1_range = (2, 6)
t2_range = (20, 28)
elif difficulty_level == MODERATE:
length_range = (60, 81)
t1_range = (10, 21)
t2_range = (45, 55)
elif difficulty_level == HARD:
length_range = (100, 111)
t1_range = (10, 21)
t2_range = (50, 61)
else:
length_range = (300, 501)
t1_range = (10, 81)
t2_range = (250, 291)
return TemporalOrderExp6aSequence(length_range, t1_range, t2_range,
batch_size, seed)
| [
"noreply@github.com"
] | DeepLearningLibrary.noreply@github.com |
f771de59dc398f4da0e47876b27c7e3ed7250b0f | 07fc1f52826d6b88a230a49a0818d4595788dba8 | /setup.py | cb3824fbdb36b4475ab5166ed9dc35bc5eb08dc3 | [
"BSD-3-Clause"
] | permissive | parietal-io/module-2-library | a69f17970334ba6689c31921eb795439cc178fa8 | e4d1cb798dc24dca21f9d455cd97491e5ff39f78 | refs/heads/master | 2020-03-16T10:14:33.742498 | 2018-05-16T23:33:12 | 2018-05-16T23:33:12 | 132,632,425 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | from setuptools import find_packages, setup
import versioneer
setup(name='module-2-package',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Create Python packages from single modules',
packages=['m2l'],
package_data={'m2l':['templates/*/*']},
install_requires=['click', 'jinja2', 'versioneer'],
tests_require='pytest',
entry_points={
'console_scripts': [
'm2l = m2l:cli',
],
},
zip_safe=False,
include_package_data=True)
| [
"parietal.io@gmail.com"
] | parietal.io@gmail.com |
a3f192be9d3d1d6d638d044739492a103e9df043 | bc4ac4d9874142337050e7777b6c180c0f19bdde | /wc3.py | 9feb943dd883dc94dac3916b19859655f14947de | [] | no_license | Techypanda/Warcraft-III-Reforged-Lobby-Refresher | ec5d3f6b1fe2668fb3f2cb0b1babdd4094b5961c | 27ed0e313ec81aa721ca7db221339eb9bc3b9ec7 | refs/heads/master | 2020-12-21T15:46:51.806688 | 2020-01-27T11:36:05 | 2020-01-27T11:36:05 | 236,477,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | import pywinauto
import pyautogui
import time
import keyboard
from pynput.keyboard import Key, Controller
################################################################################
# PURPOSE: This function will type the inputted word through a keyboard.
# IMPORT: inWord - String
# EXPORT: NONE
################################################################################
def type(inWord):
fakekeyboard = Controller()
for i in range(len(inWord)):
fakekeyboard.press(inWord[i])
fakekeyboard.release(inWord[i])
keyboard.press_and_release('enter')
################################################################################
# PURPOSE: This function will execute one alt tab.
# IMPORT: None
# EXPORT: None
################################################################################
def altTab():
fakekeyboard = Controller()
fakekeyboard.press(Key.alt)
fakekeyboard.press(Key.tab)
fakekeyboard.release(Key.tab)
fakekeyboard.release(Key.alt)
################################################################################
# PURPOSE: This function will simply find the Warcraft Client and tab into it.
# IMPORT: None
# EXPORT: None
################################################################################
def getIntoWC():
app = pywinauto.Application().connect(title="Warcraft III Beta")
app_dialog = app.top_window()
app_dialog.set_focus()
pyautogui.click(1800, 1400)
################################################################################
# PURPOSE: RefreshLobby will execute !closeall !openall to update the warcraft
# lobby.
# IMPORT: None
# EXPORT: None
################################################################################
def refreshLobby():
type("!closeall")
type("!openall")
altTab()
################################################################################
# PURPOSE: Init will loop through eternally until you end the script.
# IMPORT: None
# EXPORT: None
################################################################################
def init():
while 2 == 2:
try:
getIntoWC()
except:
print("Could Not Locate WC3 Client")
break;
refreshLobby()
time.sleep(60)
init()
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
68d1e3ef04ed10a6155d43bdc9731908c84a8c4d | c078e13700b237a063b95beda9997a8c8a6c6e9d | /client.py | 097429df303eb84994cc1d134d8f6a428d9df8cf | [] | no_license | hwchen/log-agg-py | 81fdf05aa70d1c1c319ca91fcd02f89a7462b081 | a75d977445e451847e4f16611bfdc3ede17608c1 | refs/heads/master | 2021-01-16T18:56:33.117661 | 2015-03-23T21:19:38 | 2015-03-23T21:19:38 | 32,759,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | #! /usr/bin/env python3
import random
import sys
import zmq
context = zmq.Context()
print("Conecting to log aggregator server...")
sender = context.socket(zmq.PUSH)
sender.connect("tcp://localhost:5555")
# possible log messages for randomly constructing
log_messages = ["successful login",
"unsuccessful login",
"logout",
"posted status",
"changed profile"]
# get client ID number using sys args
client_id = sys.argv[1] if len(sys.argv) > 1 else random.randint(10000,100000)
for request in range(100000):
# Construct Log Message
log_message = "Server {} : User {} {}".format(client_id,
random.randint(0,1000),
random.choice(log_messages))
# Send Log Message
print("Server {} sending log {} ...".format(client_id, request))
sender.send(log_message.encode('utf-8'))
| [
"walther.chen@gmail.com"
] | walther.chen@gmail.com |
87a04ffa4aa0d5c213e7825a9c366ed150f20d23 | a3b306df800059a5b74975793251a28b8a5f49c7 | /Graphs/LX-2/molecule_otsu = False/BioImageXD-1.0/GUI/ColorTransferEditor.py | c576aa950de1866e4c8fc0a275e7777da6235b44 | [] | no_license | giacomo21/Image-analysis | dc17ba2b6eb53f48963fad931568576fda4e1349 | ea8bafa073de5090bd8f83fb4f5ca16669d0211f | refs/heads/master | 2016-09-06T21:42:13.530256 | 2013-07-22T09:35:56 | 2013-07-22T09:35:56 | 11,384,784 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 38,261 | py | # -*- coding: iso-8859-1 -*-
"""
Unit: ColorTransferEditor
Project: BioImageXD
Description:
A widget used to view and modify a color transfer function. The widget
draws the graph of the function and allows the user to modify the function.
Copyright (C) 2005 BioImageXD Project
See CREDITS.txt for details
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__author__ = "BioImageXD Project <http://www.bioimagexd.org/>"
__version__ = "$Revision: 1.28 $"
__date__ = "$Date: 2005/01/13 14:52:39 $"
import wx.lib.buttons as buttons
import wx.lib.colourselect as csel
import GUI.Dialogs
import lib.ImageOperations
import Logging
import scripting
import Configuration
import lib.messenger
import math
import os.path
import time
import vtk
import wx
class CTFButton(wx.BitmapButton):
"""
Description: A button that shows a ctf as a palette and lets the user modify it
by clicking on the button
"""
def __init__(self, parent, alpha = 0):
"""
Initialization
"""
wx.BitmapButton.__init__(self, parent, -1)
self.changed = 0
self.alpha = alpha
self.ctf = vtk.vtkColorTransferFunction()
self.ctf.AddRGBPoint(0, 0, 0, 0)
self.ctf.AddRGBPoint(255, 1, 1, 1)
self.bmp = lib.ImageOperations.paintCTFValues(self.ctf)
self.SetBitmapLabel(self.bmp)
self.Bind(wx.EVT_LEFT_DOWN, self.onModifyCTF)
self.Bind(wx.EVT_RIGHT_DOWN, self.onRightClick)
self.ID_SAVE_PALETTE_IMAGE = wx.NewId()
self.ID_SAVE_PALETTE_KEY = wx.NewId()
self.menu = wx.Menu()
item = wx.MenuItem(self.menu, self.ID_SAVE_PALETTE_IMAGE, "Save palette image...")
self.Bind(wx.EVT_MENU, self.onSavePaletteImage, id = self.ID_SAVE_PALETTE_IMAGE)
self.menu.AppendItem(item)
item = wx.MenuItem(self.menu, self.ID_SAVE_PALETTE_KEY, "Save palette key...")
self.Bind(wx.EVT_MENU, self.onSavePaletteKey, id = self.ID_SAVE_PALETTE_KEY)
self.menu.AppendItem(item)
def isChanged(self):
"""
Was the ctf or otf changed
"""
return self.changed
def setColorTransferFunction(self, ctf):
"""
Set the color transfer function that is edited
"""
self.ctf = ctf
self.minval, self.maxval = ctf.GetRange()
self.minval = int(self.minval)
self.maxval = int(self.maxval)
self.bmp = lib.ImageOperations.paintCTFValues(self.ctf)
self.SetBitmapLabel(self.bmp)
def getColorTransferFunction(self):
"""
Return the color transfer function that is edited
"""
return self.ctf
def onModifyCTF(self, event):
"""
Modify the color transfer function
"""
dlg = wx.Dialog(self, -1, "Edit color transfer function")
sizer = wx.BoxSizer(wx.VERTICAL)
panel = ColorTransferEditor(dlg, alpha = self.alpha)
panel.setColorTransferFunction(self.ctf)
self.panel = panel
sizer.Add(panel)
dlg.SetSizer(sizer)
dlg.SetAutoLayout(1)
self.changed = 1
sizer.Fit(dlg)
dlg.ShowModal()
self.bmp = lib.ImageOperations.paintCTFValues(self.ctf)
self.SetBitmapLabel(self.bmp)
lib.messenger.send(None, "data_changed", 0)
lib.messenger.send(self, "ctf_modified")
def getOpacityTransferFunction(self):
"""
Returns the opacity function
"""
return self.panel.getOpacityTransferFunction()
def setOpacityTransferFunction(self, otf):
"""
Returns the opacity function
"""
return self.panel.setOpacityTransferFunction(otf)
def onRightClick(self, event):
"""
Handler of right mouse button click
"""
self.PopupMenu(self.menu, event.GetPosition())
def onSavePaletteImage(self, event):
"""
Event handler of save palette image
"""
filename,mime = self.askPaletteFileName("Save palette image")
if not filename:
return
img = self.bmp.ConvertToImage()
img.SaveMimeFile(filename, mime)
def onSavePaletteKey(self, event):
"""
Event handler of save palette key
"""
filename,mime = self.askPaletteFileName("Save palette key")
if not filename:
return
width = self.bmp.GetWidth()
bmpHeight = self.bmp.GetHeight()
height = bmpHeight + 16
bitmap = wx.EmptyBitmap(width = width, height = height)
black = (0,0,0)
white = (255,255,255)
longMark = 5
shortMark = 3
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
dc.BeginDrawing()
# Paint background and copy palette
dc.SetBackground(wx.Brush(black))
dc.SetBrush(wx.Brush(black))
dc.DrawRectangle(0, 0, width, height)
dc.DrawBitmap(self.bmp, 0, 0)
# Draw scale
lineList = []
dc.SetPen(wx.Pen(white, 1))
longStartY = bmpHeight + 1
for x in (0,64,128,192,255):
lineList.append((x, longStartY, x, longStartY + longMark))
shortStartY = longStartY + (longMark - shortMark) / 2
for x in range(4, 256, 4):
lineList.append((x, shortStartY, x, shortStartY + shortMark))
lineList.append((0, longStartY + longMark / 2, width - 1,longStartY + longMark / 2))
dc.DrawLineList(lineList)
# Write numbers
textY = longStartY + longMark + 1
textHeight = 6
textWidth = 32
font = self.GetFont()
font.SetPointSize(6)
dc.SetFont(font)
dc.SetTextForeground(white)
dc.DrawLabel(str(self.minval), wx.Rect(0, textY, textWidth, textHeight), wx.ALIGN_LEFT)
dc.DrawLabel(str(self.maxval), wx.Rect(width - textWidth - 1, textY, textWidth, textHeight), wx.ALIGN_RIGHT)
diff = self.maxval - self.minval + 1
for i in range(1,4):
value = i * diff / 4
dc.DrawLabel(str(value), wx.Rect(i*256/4 - textWidth/2, textY, textWidth, textHeight), wx.ALIGN_CENTER_HORIZONTAL)
dc.EndDrawing()
img = bitmap.ConvertToImage()
img.SaveMimeFile(filename, mime)
del dc
del bitmap
def askPaletteFileName(self, title):
"""
Asks filename to save a palette
"""
wcDict = {"png": "Portable Network Graphics Image (*.png)", "jpeg": "JPEG Image (*.jpeg)",
"tiff": "TIFF Image (*.tiff)", "bmp": "Bitmap Image (*.bmp)"}
conf = Configuration.getConfiguration()
defaultExt = conf.getConfigItem("ImageFormat", "Output")
if defaultExt == "jpg":
defaultExt = "jpeg"
if defaultExt == "tif":
defaultExt = "tiff"
if defaultExt not in wcDict:
defaultExt = "png"
initFile = "palette.%s" % (defaultExt)
wc = wcDict[defaultExt] + "|*.%s" % defaultExt
del wcDict[defaultExt]
for key in wcDict.keys():
wc += "|%s|*.%s" % (wcDict[key], key)
filename = GUI.Dialogs.askSaveAsFileName(self, title, initFile, wc, "palette")
if not filename:
return None,None
ext = filename.split(".")[-1].lower()
if ext == "jpg":
ext = "jpeg"
if ext == "tif":
ext = "tiff"
mime = "image/%s"%ext
return filename,mime
class CTFPaintPanel(wx.Panel):
"""
Description: A widget onto which the transfer function is painted
"""
def __init__(self, parent, **kws):
self.maxx = 255
self.maxy = 255
self.scale = 1
# maxval and minval are used in paintFreeMode and dont seem to be initialized anywhere. So lets make them None for now
self.maxval = None
self.minval = None
self.background = None
self.xoffset = 16
self.yoffset = 22
if kws.has_key("width"):
w = kws["width"]
else:
w = self.xoffset + self.maxx / self.scale
w += 15
if kws.has_key("height"):
h = kws["height"]
else:
h = self.yoffset + self.maxy / self.scale
h += 15
wx.Panel.__init__(self, parent, -1, size = (w, h))
self.buffer = wx.EmptyBitmap(w, h, -1)
self.w = w
self.h = h
self.dc = None
self.Bind(wx.EVT_PAINT, self.onPaint)
def toGraphCoords(self, x, y, maxval):
"""
Returns x and y of the graph for given coordinates
"""
rx, ry = x - self.xoffset, self.maxy - (y - self.yoffset)
xcoeff = maxval / self.maxx
rx *= xcoeff
#print "Maxval=",maxval,"maxx=",self.maxx,"xcoeff=",xcoeff
if rx < 0:rx = 0
if ry < 0:ry = 0
if rx > maxval:rx = maxval
if ry > maxval:ry = maxval
#print "toGraph(%d, %d) = (%d,%d)"%(x,y,rx,ry)
return (rx, ry)
def onPaint(self, event):
dc = wx.BufferedPaintDC(self, self.buffer)
def createLine(self, x1, y1, x2, y2, color = "WHITE", brush = None, **kws):
"""
Draws a line from (x1,y1) to (x2,y2). The method
takes into account the scale factor
"""
if brush:
self.dc.SetBrush(brush)
self.dc.SetPen(wx.Pen(color))
# (x1,y1) and (x2,y2) are in coordinates where
# origo is the lower left corner
x12 = x1 + self.xoffset
y12 = self.maxy - y1 + self.yoffset
y22 = self.maxy - y2 + self.yoffset
x22 = x2 + self.xoffset
arr = None
try:
self.dc.DrawLine(x12 / self.scale, y12 / self.scale,
x22 / self.scale, y22 / self.scale)
except:
Logging.info("Failed to draw line from %f/%f,%f/%f to %f/%f,%f/%f" % (x12, self.scale, y12, self.scale, x22, self.scale, y22, self.scale), kw = "ctf")
if kws.has_key("arrow"):
if kws["arrow"] == "HORIZONTAL":
lst = [(x22 / self.scale - 3, y22 / self.scale - 3), (x22 / self.scale, y22 / self.scale), (x22 / self.scale - 3, y22 / self.scale + 3)]
elif kws["arrow"] == "VERTICAL":
lst = [(x22 / self.scale - 3, y22 / self.scale + 3), (x22 / self.scale, y22 / self.scale), (x22 / self.scale + 3, y22 / self.scale + 3)]
self.dc.DrawPolygon(lst)
def createOval(self, x, y, r, color = "GREY"):
"""
Draws an oval at point (x,y) with given radius
"""
self.dc.SetBrush(wx.Brush(color, wx.SOLID))
self.dc.SetPen(wx.Pen(color))
y = self.maxy - y + self.yoffset
ox = x / self.scale
ox += self.xoffset
self.dc.DrawCircle(ox, y / self.scale, r)
def createText(self, x, y, text, color = "WHITE", **kws):
"""
Draws a text at point (x,y) using the given font
"""
self.dc.SetTextForeground(color)
self.dc.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL))
useoffset = 1
if kws.has_key("use_offset"):
useoffset = kws["use_offset"]
y = self.maxy - y
if useoffset:
y += self.yoffset
ox = x / self.scale
if useoffset:
ox += self.xoffset
self.dc.DrawText(text, ox, y / self.scale)
def drawBackground(self, minval, maxval):
"""
Paint the background for the CTF as a bitmap so it won't have to be done each time
"""
olddc = self.dc
dc = wx.MemoryDC()
self.dc = dc
x0, y0, w, h = self.GetClientRect()
bmp = wx.EmptyBitmap(w, h)
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush("BLACK"))
dc.Clear()
dc.BeginDrawing()
self.createLine(0, 0, 0, self.maxy + 5, arrow = "VERTICAL")
self.createLine(0, 0, self.maxx + 5, 0, arrow = "HORIZONTAL")
for i in range(32, self.maxx, 32):
# Color gray and stipple with gray50
self.createLine(i, 0, i, self.maxy, 'GREY', wx.LIGHT_GREY_BRUSH)
self.createLine(0, i, self.maxx, i, 'GREY', wx.LIGHT_GREY_BRUSH)
textcol = "GREEN"
self.createText(0, -5, "0", textcol)
halfval = int((maxval - minval) / 2)
self.createText(self.maxx / 2, -5, "%d" % halfval, textcol)
self.createText(self.maxx, -12, "%d" % maxval, textcol)
self.createText(-10, self.maxy / 2, "127", textcol)
self.createText(-10, self.maxy, "255", textcol)
self.dc.EndDrawing()
dc.EndDrawing()
dc.SelectObject(wx.NullBitmap)
self.dc = olddc
return bmp
def paintFreeMode(self, redfunc, greenfunc, bluefunc, alphafunc, maximumValue = -1):
"""
Paints the graph of the function specified as a list of all values of the function
"""
self.dc = wx.MemoryDC()
self.dc.SelectObject(self.buffer)
d = self.maxx / float(maximumValue)
if d < 1:d = 1
if not self.background:
Logging.info("Constructing background from minval = %d, maxval = %d" % (self.minval, self.maxval))
self.background = self.drawBackground(self.minval, self.maxval)
self.dc.BeginDrawing()
self.dc.DrawBitmap(self.background, 0, 0)
coeff = float(self.maxx) / maximumValue
redline = [(int(x * coeff), self.maxy - y) for x, y in enumerate(redfunc)]
greenline = [(int(x * coeff), self.maxy - y) for x, y in enumerate(greenfunc)]
blueline = [(int(x * coeff), self.maxy - y) for x, y in enumerate(bluefunc)]
alphaline = [(int(x * coeff), self.maxy - y) for x, y in enumerate(alphafunc)]
self.dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 1))
self.dc.DrawLines(redline, self.xoffset, self.yoffset)
self.dc.SetPen(wx.Pen(wx.Colour(0, 255, 0), 1))
self.dc.DrawLines(greenline, self.xoffset, self.yoffset)
self.dc.SetPen(wx.Pen(wx.Colour(0, 0, 255), 1))
self.dc.DrawLines(blueline, self.xoffset, self.yoffset)
self.dc.SetPen(wx.Pen(wx.Colour(255, 255, 255), 1))
self.dc.DrawLines(alphaline, self.xoffset, self.yoffset)
self.dc.SelectObject(wx.NullBitmap)
self.dc = None
def paintTransferFunction(self, alphaMode = None, selectedPoint = None, red = [], green = [], blue = [], maximumValue = -1, alpha = [], drawAlpha = 0):
"""
Paints the graph of the function specified by points in the graph
"""
(r, rv), (g, gv), (b, bv) = red[-1], green[-1], blue[-1]
a = 0
if alpha and drawAlpha:
(a, av) = alpha[-1]
maxval = max(r, g, b, a)
(r, rv), (g, gv), (b, bv) = red[0], green[0], blue[0]
a = maximumValue
if alpha and drawAlpha:
(a, av) = alpha[0]
minval = min(r, g, b, a)
d = maxval / float(self.maxx)
if d < 1:d = 1
self.dc = wx.MemoryDC()
self.dc.SelectObject(self.buffer)
self.dc.BeginDrawing()
self.dc.Clear()
if not self.background:
self.background = self.drawBackground(minval, maxval)
x0, y0, w, h = self.GetClientRect()
self.dc.DrawBitmap(self.background, 0, 0)
ax0, rx0, gx0, bx0 = 0, 0, 0, 0
r0, g0, b0, a0 = 0, 0, 0, 0
coeff = float(self.maxx) / maxval
if selectedPoint:
x, y = selectedPoint
x *= coeff
self.createOval(x, y, 2, (255, 255, 255))
n = max(len(red), len(green), len(blue), len(alpha))
for i in range(n):
try:
rx, r = red[i]
rx *= coeff
self.createOval(rx, r, 2)
except:
r, rx = r0, rx0
try:
gx, g = green[i]
gx *= coeff
self.createOval(gx, g, 2)
except:
g, gx = g0, gx0
try:
bx, b = blue[i]
bx *= coeff
self.createOval(bx, b, 2)
except:
b, bx = b0, bx0
if drawAlpha and alpha:
try:
ax, a = alpha[i]
ax *= coeff
self.createOval(ax, a, 2)
except:
a, ax = a0, ax0
if drawAlpha and alpha:
self.createLine(ax0, a0, ax, a, '#ffffff')
if not alphaMode:
self.createLine(rx0,r0,rx,r,'#ff0000')
self.createLine(gx0, g0, gx, g, '#00ff00')
self.createLine(bx0,b0,bx,b,'#0000ff')
rx0, gx0, bx0 = rx, gx, bx
r0, g0, b0 = r, g, b
a0 = a
#ax0 = ax
if alpha and drawAlpha:
ax0 = ax
if abs(rx0 / coeff - maximumValue) > 0.5:
self.createLine(rx0, r0, self.maxx, 0, '#ff0000')
if abs(gx0/coeff - maximumValue)>0.5:
self.createLine(gx0,g0,self.maxx, 0,'#00ff00')
if abs(bx0 / coeff - maximumValue) > 0.5:
self.createLine(bx0, b0, self.maxx, 0, '#0000ff')
if abs(ax0/coeff - maximumValue)>0.5:
self.createLine(ax0,a0,self.maxx, 0,'#ffffff')
self.dc.SelectObject(wx.NullBitmap)
self.dc = None
class CTFValuePanel(wx.Panel):
"""
Description: A widget onto which the colors transfer function is painted
"""
def __init__(self, parent):
self.lineheight = 32
w, h = (256 + 16, self.lineheight)
self.w, self.h = w, h
wx.Panel.__init__(self, parent, -1, size = (w, h))
self.buffer = wx.EmptyBitmap(w, h, -1)
self.Bind(wx.EVT_PAINT, self.onPaint)
self.xoffset = 16
self.yoffset = 0
def onPaint(self, event):
dc = wx.BufferedPaintDC(self, self.buffer)
def paintTransferFunction(self, ctf, pointlist = []):
"""
Paints the graph of the function specified by the six points
"""
dc = wx.MemoryDC()
dc.SelectObject(self.buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
dc.BeginDrawing()
bmp = lib.ImageOperations.paintCTFValues(ctf, height = self.lineheight)
dc.DrawBitmap(bmp, self.xoffset, 0)
dc.EndDrawing()
dc.SelectObject(wx.NullBitmap)
class ColorTransferEditor(wx.Panel):
"""
Description: A widget used to view and modify an intensity transfer function
"""
def __init__(self, parent, **kws):
"""
Initialization
"""
self.parent = parent
self.selectedPoint = None
self.hasPainted = 0
wx.Panel.__init__(self, parent, -1)
self.updateT = 0
if kws.has_key("alpha"):
self.alpha = kws["alpha"]
self.updateCallback = 0
self.ctf = vtk.vtkColorTransferFunction()
self.doyield = 1
self.calling = 0
self.guiupdate = 0
self.freeMode = 0
self.selectThreshold = 35.0
self.ptThreshold = 0.1
self.color = 0
self.modCount = 0
self.minval = 0
self.maxval = 255
self.otf = vtk.vtkPiecewiseFunction()
self.restoreDefaults()
self.mainsizer = wx.BoxSizer(wx.VERTICAL)
self.canvasBox = wx.BoxSizer(wx.VERTICAL)
self.value = CTFValuePanel(self)
self.canvasBox.Add(self.value, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
self.canvas = CTFPaintPanel(self)
self.canvasBox.Add(self.canvas, 1, wx.ALL | wx.EXPAND, 10)
self.mainsizer.Add(self.canvasBox)
self.itemBox = wx.BoxSizer(wx.HORIZONTAL)
self.alphaMode = 0
iconpath = scripting.get_icon_dir()
redbmp = wx.Image(os.path.join(iconpath, "CTF_Red.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.redBtn = buttons.GenBitmapToggleButton(self, -1, None)
self.redBtn.SetBestSize((32,32))
self.redBtn.SetValue(1)
self.redBtn.SetBitmapLabel(redbmp)
greenbmp = wx.Image(os.path.join(iconpath, "CTF_Green.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.greenBtn = buttons.GenBitmapToggleButton(self, -1, None)
self.greenBtn.SetBestSize((32,32))
self.greenBtn.SetBitmapLabel(greenbmp)
bluebmp = wx.Image(os.path.join(iconpath, "CTF_Blue.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.blueBtn = buttons.GenBitmapToggleButton(self, -1, None)
self.blueBtn.SetBestSize((32,32))
self.blueBtn.SetBitmapLabel(bluebmp)
if self.alpha:
alphabmp = wx.Image(os.path.join(iconpath, "CTF_Alpha.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.alphaBtn = buttons.GenBitmapToggleButton(self, -1, None)
self.alphaBtn.SetBestSize((32,32))
self.alphaBtn.Bind(wx.EVT_BUTTON, self.onEditAlpha)
self.alphaBtn.SetBitmapLabel(alphabmp)
self.freeBtn = buttons.GenBitmapToggleButton(self, -1, None)
self.freeBtn.SetBestSize((32, 32))
bmp = wx.Image(os.path.join(iconpath, "CTF_Freehand.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.freeBtn.SetBitmapLabel(bmp)
self.colorBtn = csel.ColourSelect(self, -1, "", size = (32, 32))
self.colorBtn.Bind(csel.EVT_COLOURSELECT, self.onSetToColor)
openbmp = wx.Image(os.path.join(iconpath, "CTF_OpenPalette.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.openBtn = buttons.GenBitmapButton(self, -1, None)
self.openBtn.SetBestSize((32,32))
self.openBtn.SetBitmapLabel(openbmp)
savebmp = wx.Image(os.path.join(iconpath, "CTF_SavePalette.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.saveBtn = buttons.GenBitmapButton(self, -1, None)
self.saveBtn.SetBestSize((32,32))
self.saveBtn.SetBitmapLabel(savebmp)
self.maxNodes = wx.SpinCtrl(self, -1, "20", min = 2, max = 9999, size = (54, -1), style = wx.TE_PROCESS_ENTER)
self.maxNodes.SetToolTip(wx.ToolTip("Set the maximum number of nodes in the graph."))
self.maxNodes.SetHelpText("Use this control to set the maximum number of nodes in the graph. This is useful if you have a hand drawn palette that you wish to edit by dragging the nodes.")
self.maxNodes.Bind(wx.EVT_SPINCTRL, self.onSetMaxNodes)
self.maxNodes.Bind(wx.EVT_TEXT_ENTER, self.onSetMaxNodes)
self.itemBox.Add(self.redBtn)
self.itemBox.Add(self.greenBtn)
self.itemBox.Add(self.blueBtn)
if self.alpha:
self.itemBox.Add(self.alphaBtn)
self.itemBox.Add(self.freeBtn)
self.itemBox.Add(self.colorBtn)
self.itemBox.Add(self.openBtn)
self.itemBox.Add(self.saveBtn)
self.itemBox.Add(self.maxNodes)
self.redBtn.Bind(wx.EVT_BUTTON, self.onEditRed)
self.greenBtn.Bind(wx.EVT_BUTTON, self.onEditGreen)
self.blueBtn.Bind(wx.EVT_BUTTON, self.onEditBlue)
self.freeBtn.Bind(wx.EVT_BUTTON, self.onFreeMode)
self.openBtn.Bind(wx.EVT_BUTTON, self.onOpenLut)
self.saveBtn.Bind(wx.EVT_BUTTON, self.onSaveLut)
self.mainsizer.Add(self.itemBox)
self.SetAutoLayout(True)
self.SetSizer(self.mainsizer)
self.mainsizer.SetSizeHints(self)
self.upToDate = 0
self.canvas.Bind(wx.EVT_LEFT_DOWN, self.onEditFunction)
self.canvas.Bind(wx.EVT_LEFT_UP, self.updateCTFView)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.onDeletePoint)
self.canvas.Bind(wx.EVT_RIGHT_DOWN, self.onCreatePoint)
self.canvas.Bind(wx.EVT_MOTION, self.onDrawFunction)
self.pos = (0, 0)
def onSetMaxNodes(self, evt):
"""
Sets the maximum number of nodes
"""
n = len(self.points)
tot = 0
for i, pts in enumerate(self.points):
tot += len(pts)
# Code contribution by: 4n0 undt anezthes.
#
# First and last indices are kept. Index 0 is added manually, for loop 1 decrease.
# 'l' keeps track of which indices are of interest.
#
# Round off the float value of idx to get an index appropriate value.
#
# Iterating and modifying a list is not easy, so we mark unwanted elements
# with 'x' and remove them in a later stage.
maxPts = self.maxNodes.GetValue()
for i, pts in enumerate(self.points):
nrOfPts = len(pts)
Logging.info("Total number of points: %d" % nrOfPts)
Logging.info("Desired number of points: %d" % maxPts)
if maxPts > nrOfPts:
Logging.info("Total number of points for this channel deceed (read: Oxford dictionary) that of the maximum number of points allowed.")
else:
everyNth = float(nrOfPts-1) / (maxPts-1)
Logging.info("Removing every %f pts" % everyNth)
# Calculate indices to keep.
idx = 0
l = []
l.append(0)
for i in range(0, maxPts-1):
idx += everyNth
l.append(round(idx))
remove = []
# Mark and remove.
for j, point in enumerate(pts):
if j not in l:
pts[pts.index(point)] = 'x'
while (pts.__contains__('x')):
pts.remove('x')
Logging.info( "Number of points:", len(pts))
Logging.info( "Points kept:", pts)
self.upToDate = 0
self.updateGraph()
def onDeletePoint(self, event):
"""
Delete the selected point
"""
if self.selectedPoint:
for i, pts in enumerate(self.points):
if self.selectedPoint in pts:
pts.remove(self.selectedPoint)
self.selectedPoint = None
self.upToDate = 0
self.updateGraph()
def setAlphaMode(self, flag):
"""
Show only alpha channel
"""
self.alphaMode = flag
self.colorBtn.Show(flag)
self.redBtn.Show(flag)
self.greenBtn.Show(flag)
self.blueBtn.Show(flag)
self.updateGraph()
def onSaveLut(self, event):
"""
Save a lut file
"""
wc = "BioImageXD lookup table (*.bxdlut)|*.bxdlut|ImageJ Lookup table (*.lut)|*.lut"
filename = GUI.Dialogs.askSaveAsFileName(self, "Save lookup table", "palette.bxdlut", wc, "palette")
if filename:
lib.ImageOperations.saveLUT(self.ctf, filename)
def onOpenLut(self, event):
"""
Load a lut file
"""
wc = "BioImageXD lookup table (*.bxdlut)|*.bxdlut|ImageJ Lookup table (*.lut)|*.lut"
filename = GUI.Dialogs.askOpenFileName(self, "Load lookup table", wc, filetype = "palette")
if filename:
filename = filename[0]
Logging.info("Opening palette", filename, kw = "ctf")
self.freeMode = 0
lib.ImageOperations.loadLUT(filename, self.ctf)
self.setFromColorTransferFunction(self.ctf)
self.getPointsFromFree()
self.upToDate = 0
self.updateGraph()
# self.updateCTFView()
def onSetToColor(self, event):
"""
Set the ctf to be a specific color
"""
col = event.GetValue()
color = col.Red(), col.Green(), col.Blue()
if 255 not in color:
mval = max(color)
coeff = 255.0 / mval
ncolor = [int(x * coeff) for x in color]
Logging.info("New color = ", ncolor, kw = "ctf")
dlg = wx.MessageDialog(self,
"The color you selected: %d,%d,%d is incorrect."
"At least one of the R, G or B components\n"
"of the color must be 255. Therefore, "
"I have modified the color a bit. "
"It is now %d,%d,%d. Have a nice day." % (color[0],
color[1], color[2], ncolor[0], ncolor[1], ncolor[2]), "Selected color is incorrect", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
color = ncolor
r, g, b = color
self.redpoints = [(0, 0), (self.maxval, r)]
self.greenpoints = [(0, 0), (self.maxval, g)]
self.bluepoints = [(0, 0), (self.maxval, b)]
self.points = [self.redpoints, self.greenpoints, self.bluepoints, self.alphapoints]
self.freeMode = 0
self.upToDate = 0
self.updateGraph()
self.updateCTFView()
lib.messenger.send(None, "data_changed", 0)
self.colorBtn.SetColour(col)
def onCreatePoint(self, event):
"""
Add a point to the function
"""
x, y = event.GetPosition()
x, y = self.canvas.toGraphCoords(x, y, self.maxval)
if not self.freeMode:
d = 10
currd = self.maxval
hasx = 0
pt = (x, y)
self.points[self.color].append(pt)
self.points[self.color].sort()
self.upToDate = 0
self.updateGraph()
def onEditFunction(self, event):
"""
Edit the function
"""
x, y = event.GetPosition()
x, y = self.canvas.toGraphCoords(x, y, self.maxval)
if self.freeMode:
self.pos = (x, y)
else:
d = 0
currd = 99999999
hasx = 0
Logging.info("points for color %d = " % self.color, self.points[self.color])
for pt in self.points[self.color]:
d = self.dist((x, y), pt)
if pt[0] == x:hasx = 1
if d < self.selectThreshold and d < currd:
self.selectedPoint = pt
currd = d
break
def dist(self, p1, p2):
"""
Return the distance between points p1 and p2
"""
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def drawFreeMode(self, event):
"""
"""
x, y = event.GetPosition()
x, y = self.canvas.toGraphCoords(x, y, self.maxval)
if y <= 0:y = 0
if y >= 255:y = 255
if x <= 0:x = 0
if x >= self.maxval:x = self.maxval
self.hasPainted = 1
update = 0
if self.pos[0]:
x0 = min(self.pos[0], x)
x1 = max(self.pos[0], x)
n = (x1 - x0)
if n:
d = abs(y - self.pos[1]) / float(n)
if x > self.pos[0] and y < self.pos[1]:d *= -1
if x < self.pos[0] and y > self.pos[1]:d *= -1
for i in range(x0, x1):
ny = int(y + (i - x0) * d)
if ny < 0:ny = 0
if ny >= 255:ny = 255
val = self.funcs[self.color][i]
if val != ny:
self.funcs[self.color][i] = ny
update = 1
val = self.funcs[self.color][x]
if val != y:
self.funcs[self.color][x] = y
update = 1
if update:
self.updateGraph()
self.upToDate = 0
self.pos = (x, y)
if not self.upToDate:
wx.FutureCall(250, self.updateCTFFromPoints)
def modifyOutmostPoint(self, oldPoint, y, first):
"""
Move an outmost (first or last) point in the CTF
"""
if self.points[self.color].__contains__(oldPoint):
if first:
self.points[self.color][self.points[self.color].index(oldPoint)] = (0, y)
self.selectedPoint = (0, y)
else:
self.points[self.color][self.points[self.color].index(oldPoint)] = (255, y)
self.selectedPoint = (255, y)
def modifyPoint(self, oldPoint, newPoint):
"""
move the given point to a new position
"""
removed = 0
if oldPoint in self.points[self.color]:
self.modCount -= 1
self.points[self.color].remove(oldPoint)
for i, (x, y) in enumerate(self.points[self.color]):
if x > newPoint[0] and self.points[self.color][i - 1][0] < x:
k = i
if k == 0:k = 1
self.points[self.color].insert(k, newPoint)
self.modCount += 1
self.selectedPoint = newPoint
return
self.points[self.color].append(newPoint)
self.selectedPoint = newPoint
self.modCount += 1
def onDrawFunction(self, event):
"""
Draw the function
"""
if event.Dragging():
if self.freeMode:
self.drawFreeMode(event)
else:
x, y = event.GetPosition()
x, y = self.canvas.toGraphCoords(x, y, self.maxval)
if self.selectedPoint:
index = -1
if self.points[self.color].__contains__(self.selectedPoint):
index = self.points[self.color].index(self.selectedPoint)
if index == 0:
self.modifyOutmostPoint(self.selectedPoint, y, first = True)
elif index == len(self.points[self.color]) - 1:
self.modifyOutmostPoint(self.selectedPoint, y, first = False)
else:
self.modifyPoint(self.selectedPoint, (x, y))
self.upToDate = 0
self.updateGraph()
def updatePreview(self):
"""
Send an event updating the preview
"""
if abs(time.time() - self.updateT) > 0.5:
self.updateT = time.time()
lib.messenger.send(None, "data_changed", 0)
def onFreeMode(self, event):
"""
Toggle free mode on / off
"""
was = 0
if self.freeMode:was = 1
if not was and event.GetIsDown():
self.updateCTFFromPoints()
self.updateGraph()
self.freeMode = 1
self.setFromColorTransferFunction(self.ctf, self.otf)
if was:
self.updateCTFFromPoints()
self.freeMode = event.GetIsDown()
if not self.freeMode and was and self.hasPainted:
Logging.info("Analyzing free mode for points", kw = "ctf")
self.getPointsFromFree()
n = len(self.points)
tot = 0
for i, pts in enumerate(self.points):
tot += len(pts)
maxpts = self.maxNodes.GetValue()
if maxpts < tot:
self.onSetMaxNodes(None)
self.updateGraph()
def onEditRed(self, event):
"""
Edit the red channel
"""
self.blueBtn.SetValue(0)
self.greenBtn.SetValue(0)
if self.alpha:self.alphaBtn.SetValue(0)
self.color = 0
self.updateGraph()
def onEditAlpha(self, event):
"""
Edit the alpha channel
"""
self.blueBtn.SetValue(0)
self.greenBtn.SetValue(0)
self.redBtn.SetValue(0)
Logging.info("Editing alpha channel")
self.color = 3
self.updateGraph()
def onEditGreen(self, event):
"""
Edit the red channel
"""
self.blueBtn.SetValue(0)
self.redBtn.SetValue(0)
if self.alpha:self.alphaBtn.SetValue(0)
self.color = 1
self.updateGraph()
def onEditBlue(self, event):
"""
Edit the red channel
"""
self.redBtn.SetValue(0)
self.greenBtn.SetValue(0)
if self.alpha:self.alphaBtn.SetValue(0)
self.color = 2
self.updateGraph()
def restoreDefaults(self, event = None):
"""
Restores the default settings for this widget
"""
self.redfunc = [0] * (self.maxval + 1)
self.greenfunc = [0] * (self.maxval + 1)
self.bluefunc = [0] * (self.maxval + 1)
self.alphafunc = [0] * (self.maxval + 1)
self.funcs = [self.redfunc, self.greenfunc, self.bluefunc, self.alphafunc]
self.redpoints = [(0, 0), (self.maxval, 255)]
self.greenpoints = [(0, 0), (self.maxval, 255)]
self.bluepoints = [(0, 0), (self.maxval, 255)]
self.alphapoints = [(0, 0), (self.maxval, 51)]
self.points = [self.redpoints, self.greenpoints, self.bluepoints, self.alphapoints]
for i in xrange(0, self.maxval):
self.redfunc[i] = i
self.greenfunc[i] = i
self.bluefunc[i] = i
self.alphafunc[i] = int(i * 0.2)
self.upToDate = 0
self.ctf.RemoveAllPoints()
def updateGraph(self):
"""
Clears the canvas and repaints the function
"""
if self.freeMode:
self.canvas.paintFreeMode(self.redfunc, self.greenfunc, self.bluefunc, self.alphafunc, maximumValue = self.maxval)
else:
self.canvas.paintTransferFunction(self.alphaMode, self.selectedPoint, red = self.redpoints, green = self.greenpoints, blue = self.bluepoints, alpha = self.alphapoints, drawAlpha = self.alpha, maximumValue = self.maxval)
self.canvas.Refresh()
def updateCTFView(self, evt = None):
"""
Update the palette view of the ctf
"""
if self.upToDate:
return
if not self.freeMode:
self.updateCTFFromPoints()
self.value.paintTransferFunction(self.ctf)
self.upToDate = 1
self.value.Refresh()
self.updatePreview()
def updateCTFFromPoints(self):
"""
Updates the CTF from the values edited in points mode
"""
self.ctf.RemoveAllPoints()
self.otf.RemoveAllPoints()
if self.freeMode:
for i in xrange(0, self.maxval+1):
r, g, b = self.redfunc[i], self.greenfunc[i], self.bluefunc[i]
r /= 255.0
g /= 255.0
b /= 255.0
self.ctf.AddRGBPoint(i, r, g, b)
if self.alpha:
a = self.alphafunc[i]
a /= 255.0
self.otf.AddPoint(i, a)
else:
func = []
for i in range(int(self.maxval + 1)):
func.append([0, 0, 0, 0])
self.points = [self.redpoints, self.greenpoints, self.bluepoints, self.alphapoints]
for col, pointlist in enumerate(self.points):
pointlist.sort()
for i in xrange(1, len(pointlist)):
x1, y1 = pointlist[i - 1]
x2, y2 = pointlist[i]
dx = x2 - x1
if dx and (y2 != y1):
dy = (y2 - y1) / float(dx)
else:dy = 0
if x2 > self.maxval:
x2 = self.maxval
x1 = x1 - 1
for x in range(int(x1), int(x2 + 1)):
func[x][col] = y1 + (x - x1) * dy
ctfmax = 0
for i in xrange(int(self.maxval + 1)):
r, g, b, a = func[i]
r /= 255.0
g /= 255.0
b /= 255.0
a /= 255.0
self.ctf.AddRGBPoint(i, r, g, b)
if self.alpha:
self.otf.AddPoint(i, a)
ctfmax = i
def getColorTransferFunction(self):
"""
Returns the color transfer function
"""
return self.ctf
def setFromColorTransferFunction(self, TF, otf = None):
"""
Sets the colors of this graph
"""
self.minval, self.maxval = TF.GetRange()
self.background = None
self.minval = int(self.minval)
self.maxval = int(self.maxval)
self.redfunc = [0] * (self.maxval + 1)
self.greenfunc = [0] * (self.maxval + 1)
self.bluefunc = [0] * (self.maxval + 1)
self.alphafunc = [0] * (self.maxval + 1)
self.funcs = [self.redfunc, self.greenfunc, self.bluefunc, self.alphafunc]
for i in range(self.maxval + 1):
val = [0, 0, 0]
TF.GetColor(i, val)
r, g, b = val
r *= 255
g *= 255
b *= 255
r = int(r)
g = int(g)
b = int(b)
self.redfunc[i] = r
self.greenfunc[i] = g
self.bluefunc[i] = b
if otf:
a = otf.GetValue(i)
self.alphafunc[i] = int(a * 255)
self.updateCTFView()
@staticmethod
def slope(x0, y0, x1, y1):
return float((y1 - y0)) / float((x1 - x0))
def getPointsFromFree(self):
"""
Method that analyzes the color transfer function to
determine where to insert control points for the user
to edit
"""
xr0, xg0, xb0, xa0 = 0, 0, 0, 0
kr, kg, kb, ka = 1, 1, 1, 1
yr0, yg0, yb0, ya0 = 0, 0, 0, 0
r0, g0, b0, a0 = 0, 0, 0, 0
self.redpoints = []
self.greenpoints = []
self.bluepoints = []
self.alphapoints = []
# Go through each intensity value
for x in range(int(self.maxval + 1)):
if self.alpha:
a = self.otf.GetValue(x)
a *= self.maxval
a = int(a)
# Read the color from the CTF
val = [0, 0, 0]
self.ctf.GetColor(x, val)
r, g, b = val
# Convert the value to range 0-255 (from 0.0 - 1.0)
r *= self.maxval
g *= self.maxval
b *= self.maxval
r = int(r)
g = int(g)
b = int(b)
if x == 0:
r0, g0, b0 = r, g, b
if self.alpha:
a0 = a
if x == 1:
kr = ColorTransferEditor.slope(0, r0, 1, r)
kg = ColorTransferEditor.slope(0, g0, 1, g)
kb = ColorTransferEditor.slope(0, b0, 1, b)
if self.alpha:
ka = ColorTransferEditor.slope(0, a0, 1, a)
if x in [0, int(self.maxval)]:
self.redpoints.append((x, r))
self.greenpoints.append((x, g))
self.bluepoints.append((x, b))
if self.alpha:
self.alphapoints.append((x, a))
elif x > 1:
k = ColorTransferEditor.slope(xr0, r0, x, r)
if abs(k - kr) > self.ptThreshold and x > xr0 + 1 and r != r0:
self.redpoints.append((x, r))
kr = k
xr0 = x
r0 = r
k = ColorTransferEditor.slope(xg0, g0, x, g)
if abs(k - kg) > self.ptThreshold and x > xg0 + 1 and g != g0:
self.greenpoints.append((x, g))
kg = k
xg0 = x
g0 = g
k = ColorTransferEditor.slope(xb0, b0, x, b)
if abs(k - kb) > self.ptThreshold and x > xb0 + 1 and b != b0:
self.bluepoints.append((x, b))
kb = k
xb0 = x
b0 = b
if self.alpha:
k = ColorTransferEditor.slope(xa0, a0, x, a)
if abs(k - ka) > self.ptThreshold and x > xa0 + 1 and a != a0:
self.alphapoints.append((x, a))
ka = k
xa0 = x
a0 = a
coeff = 255.0 / self.maxval
self.redpoints = [(x, coeff * y) for (x, y) in self.redpoints]
self.greenpoints = [(x, coeff * y) for (x, y) in self.greenpoints]
self.bluepoints = [(x, coeff * y) for (x, y) in self.bluepoints]
self.alphapoints = [(x, coeff * y) for (x, y) in self.alphapoints]
self.points = [self.redpoints, self.greenpoints, self.bluepoints, self.alphapoints]
self.updateGraph()
def setColorTransferFunction(self, TF):
"""
Sets the color transfer function that is configured
by this widget
"""
self.upToDate = 0
self.ctf = TF
self.minval, self.maxval = TF.GetRange()
self.canvas.background = None
self.getPointsFromFree()
self.alphapoints = [(0, 0), (self.maxval, 51)]
self.points = [self.redpoints, self.greenpoints, self.bluepoints, self.alphapoints]
val = [0, 0, 0]
self.ctf.GetColor(self.maxval, val)
r, g, b = val
r *= 255
g *= 255
b *= 255
col = wx.Colour(int(r), int(g), int(b))
self.colorBtn.SetColour(((int(r), int(g), int(b))))
self.colorBtn.Refresh()
# We need to make sure there are no more nodes than given in the GUI.
self.onSetMaxNodes(None)
# self.updateGraph()
self.updateCTFView()
def getOpacityTransferFunction(self):
"""
Returns the opacity function
"""
return self.otf
def setOpacityTransferFunction(self, otf):
"""
Returns the opacity function
"""
if otf == self.otf:
return
self.otf = otf
self.getPointsFromFree()
self.updateGraph()
| [
"fede.anne95@hotmail.it"
] | fede.anne95@hotmail.it |
78c30514dd058404baecd9f213d6b9316b21ba4c | cff8402b68fadc94f4119d09ca6a72ce5419705b | /mnist_data.py | 22aebf5e2b4d9474c4e6da66d496dbe896de9e4f | [] | no_license | fabienpesquerel/cppn | b1b1d2805514f163b89c356d17ed4b719fcdcbc5 | 1e5779f80ea63a2acb7ace0f25898a7615aa6e5b | refs/heads/master | 2020-04-27T18:24:49.944320 | 2019-03-11T16:53:17 | 2019-03-11T16:53:17 | 174,568,565 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import torchvision.datasets as datasets
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
transform = transforms.Compose([transforms.ToTensor()])
class MNIST():
def __init__(self, download=True, transform=transform, batch_size=1):
train_im = datasets.MNIST(
root='./data', download=download, transform=transform, train=True)
test_im = datasets.MNIST(
root='./data', download=download, transform=transform, train=False)
self.train_loader = DataLoader(dataset=train_im,
batch_size=batch_size,
shuffle=True)
self.test_loader = DataLoader(dataset=test_im,
batch_size=batch_size,
shuffle=False)
| [
"fabien.pesquerel@ens.fr"
] | fabien.pesquerel@ens.fr |
500aa9e39fc258cd7469315698df58d1ec6a24fe | ee027978e04ce2b97a235ee4b59a813941d09c0e | /find_stream_memebers_tgas.py | 71d19fa0c05b2f1167c816fc0799cff56f7f8a5e | [] | no_license | kcotar/Aquarius_membership | da45816d39b822df67071a396ec6f977f624dfcf | 97a95cc3f9197eadcfd9bcb6d34eeb61dbb35a9e | refs/heads/master | 2021-03-16T05:21:04.357609 | 2017-07-05T06:42:48 | 2017-07-05T06:42:48 | 83,285,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,223 | py | import os, glob
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.coordinates as coord
from astroquery.simbad import Simbad
from astropy.table import Table, join, Column
from velocity_transformations import *
# --------------------------------------------------------
# ---------------- Read Data -----------------------------
# --------------------------------------------------------
# read GALAH data
galah_data_dir = '/home/klemen/GALAH_data/' # the same for gigli and local pc
galah_param = Table.read(galah_data_dir+'sobject_iraf_param_1.1.fits')
galah_tgas_xmatch = Table.read(galah_data_dir+'galah_tgas_xmatch.csv')
# join both datasets
tgas_data = join(galah_param, galah_tgas_xmatch, keys='sobject_id', join_type='inner')
tgas_fits_files = glob.glob('GaiaTgas/TgasSource_*.fits')
# radial velocity of observed stream
rv_stream = 200. # km/s
# radiant coordinates for stream
ra_stream = np.deg2rad(164.) # alpha - RA
de_stream = np.deg2rad(13.) # delta - DEC
# velocity vector of stream in xyz equatorial coordinate system with Earth in the center of it
v_xyz_stream = compute_xyz_vel(ra_stream, de_stream, rv_stream)
# get theoretical observed rv pmra pmdec, based on streams rv values
ra_range = np.deg2rad(np.arange(0, 360, 0.5))
plt.plot(ra_range, compute_pmra(ra_range, np.deg2rad(10.), 500., v_xyz_stream))
plt.savefig('pmra.png')
plt.close()
for rad_deg in np.arange(-20., 90., 10.):
plt.plot(ra_range, compute_pmdec(ra_range, np.deg2rad(rad_deg), 500., v_xyz_stream))
plt.savefig('pmdec.png')
plt.close()
for rad_deg in np.arange(-20., 90., 10.):
plt.plot(ra_range, compute_rv(ra_range, np.deg2rad(rad_deg), v_xyz_stream))
plt.savefig('rv.png')
plt.close()
rv_thr = 20.
pmra_thr = 5.
pmdec_thr = 5.
g_mag_thr = 10.5
parsec_thr = 1000.
Simbad.add_votable_fields('otype', 'bibcodelist(1900-2017)')
txt_file = open('possbile_star_ids.txt', 'w')
output_cols = ['tycho2_id', 'ra', 'dec', 'parallax', 'parallax_error', 'pmra_stream', 'pmra', 'pmra_error', 'pmdec_stream', 'pmdec', 'pmdec_error', 'rv_stream', 'phot_g_mean_mag']
tgas_results = Table(names=output_cols,
dtype=['S11', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8'])
for tgas_fits in tgas_fits_files:
print 'Working on Tgas file: '+tgas_fits.split('/')[1]
# read file
tgas_data = Table.read(tgas_fits)
# remove problems with masks
tgas_data = tgas_data.filled()
print ' Number of stars in dataset: '+str(len(tgas_data))
# convert to parsec distance
star_parsec = (tgas_data['parallax'].data * u.mas).to(u.parsec, equivalencies=u.parallax()) # use of .data to remove units as they are not handled corectlly by astropy
tgas_data.add_column(Column(star_parsec, name='parsec'))
# compute predicted stream pmra and pmdec, based on stars ra, dec and parsec distance
rv_stream_predicted = compute_rv(np.deg2rad(tgas_data['ra']),
np.deg2rad(tgas_data['dec']),
v_xyz_stream)
pmra_stream_predicted = compute_pmra(np.deg2rad(tgas_data['ra']),
np.deg2rad(tgas_data['dec']),
tgas_data['parsec'],
v_xyz_stream)
pmdec_stream_predicted = compute_pmdec(np.deg2rad(tgas_data['ra']),
np.deg2rad(tgas_data['dec']),
tgas_data['parsec'],
v_xyz_stream)
tgas_data.add_column(Column(rv_stream_predicted, name='rv_stream'))
tgas_data.add_column(Column(pmra_stream_predicted, name='pmra_stream'))
tgas_data.add_column(Column(pmdec_stream_predicted, name='pmdec_stream'))
# filter data based on predefined search criteria
idx_possible = np.logical_and(np.logical_and(np.abs(pmra_stream_predicted - tgas_data['pmra']) <= pmra_thr,
np.abs(pmdec_stream_predicted - tgas_data['pmdec']) <= pmdec_thr),
np.logical_and(tgas_data['parsec'].data <= parsec_thr,
tgas_data['parsec'].data > 0.))
# idx_possible = np.logical_and(np.abs(rv_stream_predicted - tgas_data['rv_guess']) <= rv_thr,
# idx_possible)
n_possible = np.sum(idx_possible)
print ' Possible members: '+str(n_possible)
if n_possible == 0:
continue
search_radi = 5 * u.arcsec
for tgas_star in tgas_data[output_cols][idx_possible]:
tgas_results.add_row(tgas_star)
if tgas_star['tycho2_id'][0] != ' ':
txt_file.write('TYC '+tgas_star['tycho2_id']+'\n')
# star_pos = coord.SkyCoord(ra=tgas_star['ra']*u.deg, dec=tgas_star['dec']*u.deg, frame='icrs')
# Simbad.query_region(star_pos, radius=search_radi)
# q_res = Simbad.query_object('TYC '+tgas_star['tycho2_id'])
# if q_res is None:
# continue
# if (q_res['OTYPE'] == '**').any():
# print tgas_star[0]
txt_file.close()
tgas_results.write('possible_stars.csv', format='ascii.csv')
| [
"cotar.klemen@gmail.com"
] | cotar.klemen@gmail.com |
ac8ceb44c203595713de778e4c11270b14cd678e | 6e601105760f09d3c9f5306e18e4cf085f0bb4a2 | /1000-9999/3020.py | 0d82feec7e02c057f5ee0431af44c4cc9bf6c8c6 | [] | no_license | WSJI0/BOJ | 6412f69fddd46c4bcc96377e2b6e013f3bb1b524 | 160d8c13f72d7da835d938686f433e7b245be682 | refs/heads/master | 2023-07-06T15:35:50.815021 | 2023-07-04T01:39:48 | 2023-07-04T01:39:48 | 199,650,520 | 2 | 0 | null | 2020-04-20T09:03:03 | 2019-07-30T12:48:37 | Python | UTF-8 | Python | false | false | 1,829 | py | '''
3020번
개똥벌레
'''
import sys
def biSearchLeft(e, target):
left=0
right=len(target)-1
while left<=right:
mid=(left+right)//2
if target[mid]>e:
right=mid-1
elif target[mid]<e:
left=mid+1
else:
while mid>0:
mid-=1
if target[mid]!=e:
return mid+1
return mid
break
return 'no'
def biSearchRight(e, target):
left=0
right=len(target)-1
while left<=right:
mid=(left+right)//2
if target[mid]>e:
right=mid-1
elif target[mid]<e:
left=mid+1
else:
while mid<len(target)-1:
mid+=1
if target[mid]!=e:
return mid-1
return mid
break
return 'no'
n, h=map(int, sys.stdin.readline().split())
cave=[]
for i in range(n):
if i%2==0:
cave.append(int(sys.stdin.readline()))
else:
cave.append(-int(sys.stdin.readline()))
cave.sort()
road=[0]*h
road2=[0]*h
for j in range(h):
k=h-j
result=biSearchLeft(k, cave)
if result!='no':
road[j]+=(n-result)
else:
if j!=0:
for i in range(1, j+1):
if road[j-i]!=0:
road[j]+=road[j-i]
break
for l in range(h):
k=-(h-l)
result=biSearchRight(k, cave)
if result!='no':
road2[l]+=result+1
else:
if l!=0:
for i in range(1, l+1):
if road2[l-i]!=0:
road2[l]+=road2[l-i]
break
road2.reverse()
root=99999999999
cnt=1
for y in range(h):
rr=road[y]+road2[y]
if root>rr:
root=rr
cnt=1
elif root==rr:
cnt+=1
print(root, cnt) | [
"lifedev@naver.com"
] | lifedev@naver.com |
2d5f4d8a08f7ead666b5e713009d7cf2d07a0ce4 | 3271c4619c9c83293132379647122e2bd654e5bd | /jubatus/classifier/client.py | 5eab8d830571c0d78641f2ff2bb36722311e4db5 | [
"MIT"
] | permissive | hirokiky/jubatus-python-client | bba24667d9236c23f24f6a2f0a2dee23adef6692 | 7d8597e3fa1038db607e8b56e4e6c333ee272ad1 | refs/heads/master | 2021-01-24T21:53:48.929755 | 2015-06-22T05:12:32 | 2015-06-22T05:12:32 | 37,836,530 | 1 | 0 | null | 2015-06-22T03:53:13 | 2015-06-22T03:53:12 | Python | UTF-8 | Python | false | false | 1,223 | py | # This file is auto-generated from classifier.idl(0.6.4-33-gcc8d7ca) with jenerator version 0.5.1-457-g49229fa/master
# *** DO NOT EDIT ***
import msgpackrpc
import jubatus.common
from .types import *
from jubatus.common.types import *
class Classifier(jubatus.common.ClientBase):
def __init__(self, host, port, name, timeout=10):
super(Classifier, self).__init__(host, port, name, timeout)
def train(self, data):
return self.jubatus_client.call("train", [data], TInt(True, 4), [TList(
TUserDef(LabeledDatum))])
def classify(self, data):
return self.jubatus_client.call("classify", [data], TList(TList(TUserDef(
EstimateResult))), [TList(TDatum())])
def get_labels(self):
return self.jubatus_client.call("get_labels", [], TList(TString()), [])
def set_label(self, new_label):
return self.jubatus_client.call("set_label", [new_label], TBool(), [TString(
)])
def clear(self):
return self.jubatus_client.call("clear", [], TBool(), [])
def delete_label(self, target_label):
return self.jubatus_client.call("delete_label", [target_label], TBool(),
[TString()])
| [
"hirokiky@gmail.com"
] | hirokiky@gmail.com |
af34bd61ab24bbcf5232ffdc1c7af0d23b6f5950 | 269eb633ee96ee2b8089f4c54603825d4b46cd5c | /post/views.py | 135facdfb1eab061b2248520f53d1ec641120531 | [] | no_license | ExMiracle/idm-board | c00026970e79e6b75bd998a4692177acc4c449a4 | 14dd4e19a3d689e57fe2995d3283e18c6aac645e | refs/heads/master | 2022-11-30T12:20:45.785838 | 2019-11-09T23:23:01 | 2019-11-09T23:23:01 | 209,656,458 | 0 | 0 | null | 2022-11-22T04:46:46 | 2019-09-19T22:06:25 | Python | UTF-8 | Python | false | false | 2,394 | py | from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from .models import Post
from .serializers import PostSerializer
from itertools import chain
class PostViewSet(viewsets.ModelViewSet):
"""
API endpoint that shows all posts.
"""
queryset = Post.objects.all().order_by('date_posted')
serializer_class = PostSerializer
@action(detail=False, methods=['post'])
def new_thread(self, request):
thread = Post.objects.new_thread(request.POST, request.FILES)
return Response('success')
# TODO: check if isinstance(list, post): if it is error
@action(detail=True, methods=['post'])
def new_reply(self, request, pk=None):
post = Post.objects.new_reply(request.POST, request.FILES, self.kwargs['pk'])
return Response('success')
# TODO: check if isinstance(list, post): if it is error
@action(detail=True)
def query(self, request, pk=None):
thread_queryset = Post.objects.filter(id=self.kwargs['pk'])
if thread_queryset:
thread = Post.objects.get(id=self.kwargs['pk'])
replies = thread.replies.all()
serializer = self.get_serializer(thread_queryset.union(replies), many=True)
return Response(serializer.data)
return Response("thread doesn't exist")
# TODO: do catalog with this api endpoint
@action(detail=False)
def catalog(self, request):
queryset = Post.objects.filter(is_thread=True).order_by('-updated_at')
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False)
def index(self, request):
queryset = Post.objects.filter(is_thread=True).order_by('-updated_at')
if queryset:
result = []
for thread in queryset:
thread = Post.objects.filter(id=thread.id)
thread_replies = thread[0].replies.all()[:3]
# container.extend(thread).extend(thread_replies)
# if thread_replies:
thread_with_replies = list(chain(thread, thread_replies))
result.extend(thread_with_replies)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data)
return Response('No threads yet')
| [
"exmiracle2@gmail.com"
] | exmiracle2@gmail.com |
1e0c5c10d7cb631437246a0b6cd5dcefa164091d | 504344fc66e8d54081a17306d3012a16bbb81ee7 | /screen_parameter.py | ad5885fe04aa3eef465c8c6bc8fd54d0eadcb399 | [] | no_license | Ryanshuai/auto_pubg | 814753644a8e8e7aa3d7ca3c346a9e05b825c00d | 696f33f888efc441a74e142db878e836bbf3efee | refs/heads/master | 2022-09-21T12:13:24.155393 | 2020-11-12T20:03:43 | 2020-11-12T20:03:43 | 153,748,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | from win32api import GetSystemMetrics
screen_width = GetSystemMetrics(0)
screen_high = GetSystemMetrics(1)
screen_h_factor = screen_high / 1080
screen_w_factor = screen_width / 1920
show_position_y = 300 * screen_h_factor
show_position_x = 1650 * screen_w_factor
show_size_y = 50
show_size_x = 200
min_icon_area = 10 * 10 * screen_h_factor * screen_h_factor
max_icon_area = 30 * screen_h_factor * 30 * screen_h_factor
min_icon_side_len = 30 * screen_h_factor
max_icon_side_len = 50 * screen_h_factor
min_rect_side_len = 45 * screen_h_factor
max_rect_side_len = 55 * screen_h_factor
max_icon_diff = 20
white_min_rgb = {
'name': 245,
'in-tab': 235,
'posture': 200,
'fire-mode': 235,
}
min_white_rate = {
'name': 0.04 * screen_h_factor * screen_h_factor,
'in-tab': 0.001 * screen_h_factor * screen_h_factor,
'posture': 0.04 * screen_h_factor * screen_h_factor,
'fire-mode': 0.0001 * screen_h_factor * screen_h_factor
}
min_gun_name_high = 15 * screen_h_factor
max_gun_name_high = 25 * screen_h_factor
min_gun_name_width = 105 * screen_h_factor
max_gun_name_width = 120 * screen_h_factor
min_fire_mode_high = 15 * screen_h_factor
max_fire_mode_high = 25 * screen_h_factor
min_fire_mode_width = 12 * screen_h_factor
max_fire_mode_width = 17 * screen_h_factor
min_in_tab_high = 7 * screen_h_factor
max_in_tab_high = 13 * screen_h_factor
min_in_tab_width = 28 * screen_h_factor
max_in_tab_width = 34 * screen_h_factor
| [
"1018718155@qq.com"
] | 1018718155@qq.com |
b62d522b0129de39dc58b5fa10a7f267a9442bfe | fee47505c5352b1c65ff365ec36d80dc6448ecba | /BonusAccountTask/urls.py | 68a2a28bd8934fd48c209144cfa2e62f8502e95d | [] | no_license | esheloncr/bonus_account | 62ffb5b2a5667451cb168817086e4a3f106b998a | 2e917ad7831b65bd035facfd32b2daedf5d2efd6 | refs/heads/master | 2023-07-26T19:37:47.036587 | 2021-09-06T11:07:11 | 2021-09-06T11:07:11 | 353,766,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | """BonusAccountTask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from django.views.static import serve
from django.conf import settings
from rest_framework.documentation import include_docs_urls
from .schema import CoreAPISchemaGenerator
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("Account.api.v1,2.api_urls")),
path('doc/', include_docs_urls(title='API', authentication_classes=[], permission_classes=[],
generator_class=CoreAPISchemaGenerator), name="docs"),
url(r'^static/(?P<path>.*)$', serve, {'document_root': settings.STATIC_ROOT})
]
| [
"igoninalexeynn@gmail.com"
] | igoninalexeynn@gmail.com |
e307844074128dcc81cf48921700f732bae83d10 | f8bdc46409c9f5eaf3d85ef157260589462d941a | /demos/selective_dualarm_stowing/python/selective_dualarm_stowing/models/dualarm_alex.py | e516d9c01775ef3f72bfc8ebf3630f36b34503c3 | [
"MIT",
"BSD-3-Clause"
] | permissive | start-jsk/jsk_apc | 2e268f8b65e9d7f4f9cc4416dc8383fd0a7b9750 | c4e349f45ef38457dc774e33f6902acf1a1540a6 | refs/heads/master | 2023-09-05T09:06:24.855510 | 2023-09-01T17:10:12 | 2023-09-01T17:10:12 | 25,620,908 | 36 | 25 | NOASSERTION | 2023-09-01T17:10:14 | 2014-10-23T05:28:31 | Common Lisp | UTF-8 | Python | false | false | 5,285 | py | import chainer
import chainer.functions as F
import chainer.links as L
class DualarmAlex(chainer.Chain):
def __init__(self, n_failure, n_class, threshold=0.5, pt_func=None):
self.threshold = threshold
self.pt_func = pt_func
self.n_failure = n_failure
super(DualarmAlex, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(3, 96, 11, stride=4, pad=4)
self.bn1 = L.BatchNormalization(96)
self.conv2 = L.Convolution2D(96, 256, 5, stride=1, pad=1)
self.bn2 = L.BatchNormalization(256)
self.conv3 = L.Convolution2D(256, 384, 3, stride=1, pad=1)
self.conv4 = L.Convolution2D(384, 384, 3, stride=1, pad=1)
self.conv5 = L.Convolution2D(384, 256, 3, stride=1, pad=1)
self.bn5 = L.BatchNormalization(256)
self.fc6_failure = L.Linear(33280, 4096)
self.fc7_failure = L.Linear(4096, 4096)
self.fc8_failure = L.Linear(4096, 2*n_failure)
self.fc6_cls = L.Linear(33280, 4096)
self.fc7_cls = L.Linear(4096, 4096)
self.fc8_cls = L.Linear(4096, n_class)
def __call__(self, x, t=None, t_cls=None):
n_batch = len(x)
h = F.relu(self.bn1(self.conv1(x)))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.bn2(self.conv2(h)))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.relu(self.bn5(self.conv5(h)))
h = F.max_pooling_2d(h, 3, stride=3)
conv4 = h
if not self.train_conv:
h.unchain_backward()
# failure prediction
h = F.dropout(F.relu(self.fc6_failure(conv4)), ratio=0.5)
h = F.dropout(F.relu(self.fc7_failure(h)), ratio=0.5)
h = self.fc8_failure(h)
h = h.reshape((-1, 2, self.n_failure))
fc8_failure = h
fail_prob = F.softmax(fc8_failure, axis=1)[:, 1, :]
self.fail_prob = fail_prob
# classification prediction
h = F.dropout(F.relu(self.fc6_cls(conv4)), ratio=0.5)
h = F.dropout(F.relu(self.fc7_cls(h)), ratio=0.5)
h = self.fc8_cls(h)
cls_score = h
self.cls_score = cls_score
if t is None:
assert not chainer.config.train
return
# failure loss
half_n = self.n_failure / 2
is_singlearm_mask = t[:, half_n] == -1
# loss for single arm
h_single = fc8_failure[is_singlearm_mask][:, :, :half_n]
t_single = t[is_singlearm_mask][:, :half_n]
# Requires: https://github.com/chainer/chainer/pull/3310
if h_single.data.shape[0] > 0:
loss_single = F.softmax_cross_entropy(
h_single, t_single, normalize=False)
else:
loss_single = None
# loss for dual arm
h_dual = fc8_failure[~is_singlearm_mask][:, :, half_n:]
t_dual = t[~is_singlearm_mask][:, half_n:]
# Requires: https://github.com/chainer/chainer/pull/3310
if h_dual.data.shape[0] > 0:
loss_dual = F.softmax_cross_entropy(
h_dual, t_dual, normalize=False)
else:
loss_dual = None
# classification loss
cls_loss = F.softmax_cross_entropy(cls_score, t_cls)
self.cls_loss = cls_loss
if loss_single is None:
self.fail_loss = loss_dual
elif loss_dual is None:
self.fail_loss = loss_single
else:
self.fail_loss = loss_single + loss_dual
self.loss = self.fail_loss + self.cls_loss
# calculate acc on CPU
fail_prob_single = fail_prob[is_singlearm_mask][:, :half_n]
fail_prob_single = chainer.cuda.to_cpu(fail_prob_single.data)
t_single = chainer.cuda.to_cpu(t_single)
fail_prob_dual = fail_prob[~is_singlearm_mask][:, half_n:]
fail_prob_dual = chainer.cuda.to_cpu(fail_prob_dual.data)
t_dual = chainer.cuda.to_cpu(t_dual)
fail_label_single = fail_prob_single > self.threshold
fail_label_single = fail_label_single.astype(self.xp.int32)
fail_label_dual = fail_prob_dual > self.threshold
fail_label_dual = fail_label_dual.astype(self.xp.int32)
fail_acc_single = (t_single == fail_label_single).all(axis=1)
fail_acc_single = fail_acc_single.astype(self.xp.int32).flatten()
fail_acc_dual = (t_dual == fail_label_dual).all(axis=1)
fail_acc_dual = fail_acc_dual.astype(self.xp.int32).flatten()
self.fail_acc = self.xp.sum(fail_acc_single)
self.fail_acc += self.xp.sum(fail_acc_dual)
self.fail_acc /= float(len(fail_acc_single) + len(fail_acc_dual))
cls_pred = F.argmax(cls_score, axis=1)
cls_pred = chainer.cuda.to_cpu(cls_pred.data)
t_cls = chainer.cuda.to_cpu(t_cls)
self.cls_acc = self.xp.sum(t_cls == cls_pred)
self.cls_acc /= float(len(t_cls))
chainer.reporter.report({
'loss': self.loss,
'cls/loss': self.cls_loss,
'cls/acc': self.cls_acc,
'fail/loss': self.fail_loss,
'fail/acc': self.fail_acc,
}, self)
if chainer.config.train:
return self.loss
| [
"shingogo@hotmail.co.jp"
] | shingogo@hotmail.co.jp |
740c1c584f9b2893fd67d3413b16390781eb0b4f | ff2c39ac2ef4827b0627adb8e6865b6d4e1d7f42 | /data_gathering/create_pickle.py | e54e2c95584fcfcce8dd28b26522406af51a22ee | [] | no_license | lancejchen/ssd_vehicle_detection | 7d939ff1f50d26d66a0e6f5f708da4d4f271cbe6 | d5deac90ce3a9e92d94fd48fbe585779eda1dc0a | refs/heads/master | 2021-01-23T03:27:58.703101 | 2017-03-24T01:55:14 | 2017-03-24T01:55:14 | 86,083,059 | 1 | 0 | null | 2017-03-24T15:29:42 | 2017-03-24T15:29:42 | null | UTF-8 | Python | false | false | 6,012 | py | '''
Create raw data pickle file
data_raw is a dict mapping image_filename -> [{'class': class_int, 'box_coords': (x1, y1, x2, y2)}, {...}, ...]
'''
import numpy as np
import pickle
import re
import os
import time
from PIL import Image
# Script config
RESIZE_IMAGE = True # resize the images and write to 'resized_images/'
GRAYSCALE = True # convert image to grayscale? this option is only valid if RESIZE_IMAGE==True (FIXME)
TARGET_W, TARGET_H = 400, 250 # original image is 1920x1200, keep 1.6 aspect ratio
DEBUG = False
# Raw data dict and label map
data_raw = {}
label_map = {'car': 1} # background class is 0
# Keep track of time
t0 = time.time()
######################################################
# Parse Dataset 1 (object-detection-crowdai/*)
######################################################
data_dir = 'object-detection-crowdai'
# For speed, put entire contents of labels.csv in memory
labels_csv = []
with open(data_dir + '/labels.csv', 'r') as f:
for line in f:
line = line[:-1] # strip trailing newline
labels_csv.append(line)
# Create pickle file to represent dataset
image_files = os.listdir(data_dir)
for count, image_file in enumerate(image_files):
if DEBUG:
if count > 100:
break
if (count+1) % 100 == 0:
print('Processed %d images in %s - total elapsed time: %d sec' % (count+1, data_dir, int(time.time() - t0)))
if image_file == 'labels.csv':
continue
new_image_file = 'd1_' + image_file
# Find box coordinates for all objects in this image
class_list = []
box_coords_list = []
for line in labels_csv:
if re.search(image_file, line):
fields = line.split(',')
# Get label name and assign class label
label_name = fields[5]
if label_name != 'Car':
continue # ignore certain labels
label_class = label_map[label_name.lower()]
class_list.append(label_class)
# Resize image, get rescaled box coordinates
box_coords = np.array([int(x) for x in fields[0:4]])
if RESIZE_IMAGE:
# Resize the images and write to 'resized_images/'
image = Image.open(os.path.join(data_dir, image_file))
orig_w, orig_h = image.size
if GRAYSCALE:
image = image.convert('L') # 8-bit grayscale
image = image.resize((TARGET_W, TARGET_H), Image.LANCZOS) # high-quality downsampling filter
resized_dir = 'resized_images_%dx%d/' % (TARGET_W, TARGET_H)
if not os.path.exists(resized_dir):
os.makedirs(resized_dir)
image.save(os.path.join(resized_dir, new_image_file))
# Rescale box coordinates
x_scale = TARGET_W / orig_w
y_scale = TARGET_H / orig_h
ulc_x, ulc_y, lrc_x, lrc_y = box_coords
new_box_coords = (ulc_x * x_scale, ulc_y * y_scale, lrc_x * x_scale, lrc_y * y_scale)
new_box_coords = [round(x) for x in new_box_coords]
box_coords = np.array(new_box_coords)
box_coords_list.append(box_coords)
if len(class_list) == 0:
continue # ignore images with no labels-of-interest
class_list = np.array(class_list)
box_coords_list = np.array(box_coords_list)
# Create the list of dicts
the_list = []
for i in range(len(box_coords_list)):
d = {'class': class_list[i], 'box_coords': box_coords_list[i]}
the_list.append(d)
data_raw[new_image_file] = the_list
######################################################
# Parse Dataset 2 (object-dataset/*)
######################################################
data_dir = 'object-dataset'
# For speed, put entire contents of labels.csv in memory
labels_csv = []
with open(data_dir + '/labels.csv', 'r') as f:
for line in f:
line = line[:-1] # strip trailing newline
labels_csv.append(line)
# Create pickle file to represent dataset
image_files = os.listdir(data_dir)
for count, image_file in enumerate(image_files):
if DEBUG:
if count > 100:
break
if (count+1) % 100 == 0:
print('Processed %d images in %s - total elapsed time: %d sec' % (count+1, data_dir, int(time.time() - t0)))
if image_file == 'labels.csv':
continue
new_image_file = 'd2_' + image_file
# Find box coordinates for all objects in this image
class_list = []
box_coords_list = []
for line in labels_csv:
if re.search(image_file, line):
fields = line.split(' ')
# Get label name and assign class label
label_name = fields[6]
if label_name != '"car"':
continue # ignore certain labels
label_class = label_map[label_name[1:-1]] # remove the quotation marks
class_list.append(label_class)
# Resize image, get rescaled box coordinates
box_coords = np.array([int(x) for x in fields[1:5]])
if RESIZE_IMAGE:
# Resize the images and write to 'resized_images/'
image = Image.open(os.path.join(data_dir, image_file))
orig_w, orig_h = image.size
if GRAYSCALE:
image = image.convert('L') # 8-bit grayscale
image = image.resize((TARGET_W, TARGET_H), Image.LANCZOS) # high-quality downsampling filter
resized_dir = 'resized_images_%dx%d/' % (TARGET_W, TARGET_H)
if not os.path.exists(resized_dir):
os.makedirs(resized_dir)
image.save(os.path.join(resized_dir, new_image_file))
# Rescale box coordinates
x_scale = TARGET_W / orig_w
y_scale = TARGET_H / orig_h
ulc_x, ulc_y, lrc_x, lrc_y = box_coords
new_box_coords = (ulc_x * x_scale, ulc_y * y_scale, lrc_x * x_scale, lrc_y * y_scale)
new_box_coords = [round(x) for x in new_box_coords]
box_coords = np.array(new_box_coords)
box_coords_list.append(box_coords)
if len(class_list) == 0:
continue # ignore images with no labels-of-interest
class_list = np.array(class_list)
box_coords_list = np.array(box_coords_list)
# Create the list of dicts
the_list = []
for i in range(len(box_coords_list)):
d = {'class': class_list[i], 'box_coords': box_coords_list[i]}
the_list.append(d)
data_raw[new_image_file] = the_list
######################################################
# Save results to pickle file
######################################################
with open('data_raw_%dx%d.p' % (TARGET_W, TARGET_H), 'wb') as f:
pickle.dump(data_raw, f)
| [
"georgesung@gmail.com"
] | georgesung@gmail.com |
19005500e56a8cfd4cd8d73586cef78ba54b0b6a | c253e3c94b66e85d52b1c274e649a8431db0d7d5 | /IT-Lab/assignment-13/codes/3.py | cbacaf8b1655d28044d63039e819c9100d1397c2 | [] | no_license | Abhinal/college-assignments | bfecc9d8dd05b7da5348def9990f42ff28329328 | a93aeee086eb681f946cc343869610e4588af307 | refs/heads/master | 2023-08-16T12:04:35.543135 | 2021-10-22T16:27:33 | 2021-10-22T16:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | f = open("demofile3.txt", "w")
f.write("Woops! I have deleted the content!")
f.close()
f = open("demofile3.txt", "r")
print(f.read())
| [
"ayushdubey70@gmail.com"
] | ayushdubey70@gmail.com |
2aa08601d81f1ab298de2b62bce3c9cd19f4e78d | db9d7648ac9de73a208fad70565a7be208be0438 | /main.spec | abd2fdaa7eca7bcfca8b4e0a75c0e71520a2260e | [] | no_license | testerchan/TestFish | 5d71aa8bfd595bf2f53cc46fde90557eba7f8e19 | fc285c726a76f279b345eb9cb093fe9fa48c387e | refs/heads/master | 2020-05-17T17:27:15.147945 | 2019-04-28T14:51:39 | 2019-04-28T14:51:39 | 183,853,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['D:\\xampp\\htdocs\\TestFish'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"aku_soku_zan_1bantai@hotmail.com"
] | aku_soku_zan_1bantai@hotmail.com |
dfc904b97c5ed6fadac821366f8e95d574bdc177 | 5a0e22f9f09768d794007cc6ab919c366eb22775 | /v2/blog/migrations/0010_auto_20210314_1832.py | 23998fb39c9484946526a9b147bcf48892b5205e | [] | no_license | baseplate-admin/Help-The-Helpless | 641367638a209712a8570ef1b3f8059883750786 | a4a962f3efa61226412d816c624a961d5aaf833e | refs/heads/main | 2023-03-22T01:00:13.615010 | 2021-03-18T08:16:43 | 2021-03-18T08:16:43 | 323,393,934 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | # Generated by Django 3.1.7 on 2021-03-14 12:32
import blog.blocks
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_blogauthorsorderable'),
]
operations = [
migrations.RemoveField(
model_name='blogdetailspage',
name='page_title',
),
migrations.RemoveField(
model_name='blogdetailspage',
name='page_title_description',
),
migrations.RemoveField(
model_name='blogpage',
name='page_title',
),
migrations.RemoveField(
model_name='blogpage',
name='page_title_description',
),
migrations.AddField(
model_name='blogdetailspage',
name='blog_title',
field=models.CharField(default=1, max_length=20),
preserve_default=False,
),
migrations.AlterField(
model_name='blogdetailspage',
name='content',
field=wagtail.core.fields.StreamField([('full_richtext', blog.blocks.RichtextBlock())], blank=True, null=True),
),
]
| [
"zarifahnaf@outlook.com"
] | zarifahnaf@outlook.com |
c4c48ba19943ea2b09ca9f6068fe744487016ade | 3d1f2dfb4da9f485f9ef6b5c00f74473352f6a7c | /exporter/yml_exporter.py | 4328262a5702ac5a4314a1cfb13da392c551c17a | [] | no_license | jfmacdonald/cell-library-gof-example | 513a85a491e565c963767a625c30b84f6598ff64 | f7013d722b6fc1c4d98d68fa1425cf7370226ca5 | refs/heads/master | 2021-04-05T23:28:21.641733 | 2018-03-09T00:26:45 | 2018-03-09T00:26:45 | 124,459,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | from warnings import warn
from yaml import load_all
from os import path
from .exporter import Exporter
from library import *
from entity import FileEntity
class YmlExporter(Exporter):
def __init__(self, library):
super().__init__(library)
self.__stream = None
self.__ok = False
def export_to_file(self, stream):
if not stream or stream.closed:
return False
self.__ok = True
self.__stream = stream
for component in self.get_library():
if not component.accept(self):
self.__ok = False
self.__stream.close()
return self.__ok
def visitDbFile(self, file):
entity = self._file_entity(file)
return self._export(entity)
def visitCtlFile(self, file):
entity = self._file_entity(file)
return self._export(entity)
def visitGdsFile(self, file):
entity = self._file_entity(file)
return self._export(entity)
def visitLefFile(self, file):
entity = self._file_entity(file)
return self._export(entity)
def visitLibFile(self, file):
entity = self._file_entity(file)
return self._export(entity)
def visitLibGroup(self, group):
return True
def visitCornerGroup(self, group):
return True
def visitIpGroup(self, group):
return True
def _file_entity(self, file):
filename = file.name()
filetype = file.type()
entity = FileEntity(filename, filetype)
for name in file.get_attribute_names():
entity.set_attribute( name, file.get_attribute_value(name))
for cell in file.get_cells():
entity.set_cell(str(cell))
for group in file.get_hierarchy():
entity.set_group( group.type(), group.name() )
return entity
def _export(self, file_entity):
try:
self.__stream.write('---\n')
self.__stream.write(file_entity.get_yaml())
return True
except Exception as msg:
filename = file_entity.get_filename()
warn("Write failed: %s" % filename)
return False
| [
"john@jfmacdonald.com"
] | john@jfmacdonald.com |
c848438c2077621875d0c82384721542d7d07365 | b4dee0c084c357cdb5445d13efe9af6aad4618dd | /utils.py | ab7b03928b9e5b17e418cb3a8f4c063246c8a397 | [
"MIT"
] | permissive | L226/wikiflat | b5f322d5d55a249ec148bc69ae50eb9c5b8f22ff | db006d677d878e2678ec77630aff3d15914fde54 | refs/heads/master | 2021-01-23T13:09:44.641890 | 2017-06-05T11:12:10 | 2017-06-05T11:12:10 | 93,229,627 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,641 | py | """
general utils for unravel project
- web get
- text parse
- hyperlink traverse
- word similarity
- text reconstruct
- job metadata
"""
import os
import logging
import logging.config
import redis
import HTMLParser
import wikipedia
import nltk
from nltk import tokenize
try:
import cPickle as pickle
except:
import pickle
logging.config.fileConfig('log.conf')
logger = logging.getLogger(__name__)
nltk.download('punkt')
def gen_disp_text(input_text=None):
"""
generate html safe text for display
"""
processed_text, siteurl = generate_unravelled_text(input_text=input_text, full_summary=[], prevlinked=[], qdepth=2)
# disp_text = processed_text.encode('ascii', 'xmlcharrefreplace')
html_parser = HTMLParser.HTMLParser()
disp_text = ""
for row in processed_text:
disp_text += html_parser.unescape(row)
# disp_text += "<br />"
return disp_text, siteurl
def generate_unravelled_text(input_text=None, qdepth=2, similarity=0.75, alength='summary', full_summary=[], prevlinked=[]):
"""
generate the full unravelled text
params:
- input_text, the topic to be searched for
- qdepth, the number of links to follow down the article tree
- similarity, the cosine distance minimum for sub-topic inclusion
- alength, the length of article to be returned - ["full","summary"]
"""
logger.info("beginning unravel process for %s, qdepth=%d" % (input_text, qdepth))
topicpage = webget(topic=input_text)
if topicpage is not None:
siteurl = topicpage.url
prevlinked.append(input_text.lower())
topicsummary = topicpage.summary
links = topicpage.links
for sentence in split_raw_text(topicsummary):
full_summary.extend([sentence, " "])
# tmp_summ += " "
current_depth = qdepth -1
if current_depth <= 0:
return [sentence, "<br /><br />"], siteurl # this prevents whole summaries from being returned
else:
for link in links:
if link.lower() in sentence.lower() and link.lower() not in prevlinked:
# doesn't get non identical link text, link value
if word_distance_check(link, input_text, similarity):
prevlinked.append(link.lower())
full_summary.extend(generate_unravelled_text(input_text=link, qdepth=current_depth, full_summary=[], prevlinked=prevlinked)[0])
links.remove(link)
return full_summary, siteurl
else:
return full_summary, None
def webget(topic=None):
"""
get the relevant wiki summary for topic or URL
"""
logger.debug("webget %s commencing" % topic)
try:
if topic is not None:
cg = cache_get(topic)
if cg is None:
logging.info("failed to find cache for %s" % topic)
topicpage = wikipedia.page(title=topic, preload=True) # preload causes open issue keyerr extlinks
logging.info("setting cache for %s" % topic)
cache_set(topic, topicpage)
return topicpage
else:
logging.info("found cache for %s" % topic)
return cg
except wikipedia.exceptions.DisambiguationError as err:
logger.info("Wikipedia disambig error: %s" % err)
# might eventually handle this better
return None
except wikipedia.exceptions as err:
logger.critical("Wikipedia error: %s" % err)
return None
def split_raw_text(raw):
"""
split raw text into sentences
"""
return tokenize.sent_tokenize(raw)
def word_distance_check(topic, testword, similarity):
"""
perform word2vec cosine distance of testword from topic
if distance greater than similarity, return False
"""
# connect to API service
return True
def redis_connect():
"""
look for remote or local redis cache and return connection object if found
"""
if os.environ.get("REDIS_URL", None) is not None:
return redis.from_url(os.environ.get("REDIS_URL"))
elif os.environ.get("REDIS_PORT", None) is not None: # easy check for docker-compose env
return redis.StrictRedis(host='redis', port=os.environ['REDIS_PORT'])
else:
return None
def cache_get(topic):
"""
checks the cache for topic and returns if found
"""
try:
r = redis_connect()
if r is not None:
logger.info("performing cache lookup for %s" % topic)
topicget = r.get(topic)
if topicget is not None:
return pickle.loads(topicget)
except ConnectionError as err:
logger.critical("Redis ConnectionError: %s" % err)
return None
def cache_set(topic, topicpage):
"""
saves topic data and links to cache
"""
try:
r = redis_connect()
if r is not None:
if r.set(name=topic, value=pickle.dumps(topicpage), ex=21600): # 6hr expiry
logging.info('successfully saved %s to cache' % topic)
else:
return None
except ConnectionError as err:
logger.critical("Redis ConnectionError: %s" % err)
return None
def stats():
"""
stats for this job
"""
return None | [
"luke@ninyo.com"
] | luke@ninyo.com |
0332360f22339757bb6bfc0d7e4464b8daec0377 | d65521827b65f4026bc05e2cf453b15a5580461b | /flask_introduction/library/_03_template_str_inside_view.py | 9c25631877fa6f2f1981451d34385865e24fae75 | [] | no_license | monu11296/flaskIntroduction | 1990bc8dec0d13192e0ae5227065bce248b98d2c | bef935b2e67b66a0563b5c28e2e96e7fd52946d6 | refs/heads/master | 2020-04-18T10:11:56.849501 | 2019-01-25T00:51:02 | 2019-01-25T00:51:02 | 167,459,904 | 1 | 0 | null | 2019-01-25T00:58:24 | 2019-01-25T00:40:06 | Python | UTF-8 | Python | false | false | 829 | py | """Using Flask template engines.
In this example we're using Flask template engine (Jinja2) to simplify
the process to generate the resulting HTML.
**TODO**
In our previous example we had to do a lot of string handling to
create the <ul> with authors.
It's your turn to use the template engine to build the same result.
"""
from flask import Flask
from flask import render_template_string # !Important
app = Flask(__name__)
@app.route('/')
def hello_world():
library_name = "Poe"
html = """
<html>
<h1>Welcome to {{library_name}} library!</h1>
</html>
"""
rendered_html = render_template_string(html, library_name=library_name)
authors = ["Alan Poe", "Jorge L. Borges", "Mark Twain"]
# Using an <ul> tag add the authors using the template engine
return rendered_html
| [
"you@example.com"
] | you@example.com |
249132e1095f862e9e13a552da227b3333ebfaa4 | 7739085d1d4650cd84a4f6be054988801a863275 | /SocketProgrammingAssignment/assignment1/WebServer_2.py | 0a9e0fbb6296d3bc3d76be5bea33ce6a8d9d5ac5 | [] | no_license | wesleynowlin/Computer-Networking-A-Top-Down-Approach | 66886936200fa32f2757934b8f06aa915c2b3810 | 652458177d2457f25edc2b75bd308497a08b6e3d | refs/heads/master | 2020-05-07T11:17:12.665745 | 2018-11-18T08:05:28 | 2018-11-18T08:05:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | # import socket module
from socket import *
from threading import Thread
def deal(connectionSocket,addr):
try:
message = connectionSocket.recv(1024) # 获取客户发送的报文
filename = message.split()[1]
f = open(filename[1:])
outputdata = f.read();
# Send one HTTP header line into socket
header = ' HTTP/1.1 200 OK\nConnection: close\nContent-Type: text/html\nContent-Length: %d\n\n' % (
len(outputdata))
connectionSocket.send(header.encode())
# Send the content of the requested file to the client
for i in range(0, len(outputdata)):
connectionSocket.send(outputdata[i].encode())
connectionSocket.close()
except IOError:
# Send response message for file not found
header = ' HTTP/1.1 404 Found'
connectionSocket.send(header.encode())
connectionSocket.close()
serverSocket = socket(AF_INET, SOCK_STREAM)
# Prepare a sever socket
serverSocket.bind(('', 6789)) # 将TCP欢迎套接字绑定到指定端口
serverSocket.listen(5) # 最大连接数为1
while True:
# Establish the connection
try:
print('Ready to serve...')
connectionSocket, addr = serverSocket.accept() # 接收到客户连接请求后,建立新的TCP连接套接字
print("From [%s] " % str(addr))
client = Thread(target=deal, args=(connectionSocket, addr))
client.start()
except:
print("error")
serverSocket.close() | [
"1194620498@qq.com"
] | 1194620498@qq.com |
54aa4af75f6d0908dfb7bc0bd4c363a2228b9c43 | 7a3b2857ba2cb2702e7e3b6e8b9f7db621db293a | /coronacheck_tools/verification/mobilecore.py | 0f4b2751941516bef806c43628d8b550c660877e | [
"MIT"
] | permissive | p0epvlieg/coronacheck-tools | e3178cfdadfdf92559a76079ac890532998b52ef | 4686a72d5516668a5617930a77200da1a1593823 | refs/heads/main | 2023-09-03T07:02:35.786054 | 2021-11-09T07:51:00 | 2021-11-09T07:51:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | from appdirs import user_config_dir
from coronacheck_tools.lib import loadlib, listlibs
from pathlib import Path
from datetime import datetime, timedelta
import json
import base64
import requests
import shutil
def list_native_libs():
return listlibs()
def validate(raw: str, lib='auto', allow_international=False):
confdir = _ensureconfig()
verifier, ffi = loadlib(lib=lib)
cstr_raw = ffi.new("char[]", raw.encode())
cstr_confdir = ffi.new("char[]", str(confdir.absolute()).encode())
retval = verifier.ffiverify(cstr_raw, cstr_confdir)
result = ffi.string(retval)
verifier.freeCString(retval)
del cstr_raw
del cstr_confdir
result = json.loads(result)
if len(result['Error'].strip()) > 0:
return False, result['Error']
result = result['Details']
if result['credentialVersion'] == '1':
# if this field is set to 1 it is actually a european EHC
result['isEHC'] = True
result['isDHC'] = False
else:
result['isEHC'] = False
result['isDHC'] = True
if result['isEHC'] and not allow_international:
return False, 'Invalid because the QR Code is an international EHC and allow_international=False'
return True, result
def readconfig():
confdir = _ensureconfig()
conf = {}
for config_file in confdir.glob('*.json'):
with open(config_file, 'r') as fh:
data = fh.read()
if not data or len(data) == 0:
conf[f"{config_file.stem}"] = {}
continue
conf[f"{config_file.stem}"] = json.loads(data)
return conf
def clearconfig():
confdir = _ensureconfig()
shutil.rmtree(confdir)
def _ensureconfig():
confdir = Path(user_config_dir('coronacheck-tools')) / 'mobilecore'
confdir.mkdir(parents=True, exist_ok=True)
timestamp_file = confdir / 'timestamp'
if timestamp_file.exists():
with open(timestamp_file, 'r') as fh:
timestamp = datetime.utcfromtimestamp(0)
ts = fh.read()
if len(ts) >= 0 and ts.isdecimal():
timestamp = datetime.utcfromtimestamp(float(ts))
now = datetime.utcnow()
if timestamp >= now - timedelta(hours=24):
# no need to refresh the config
return confdir
config_file = confdir / 'config.json'
config_url = "https://verifier-api.coronacheck.nl/v4/verifier/config"
_getpayload(config_url, config_file)
public_keys_file = confdir / 'public_keys.json'
public_keys_url = "https://verifier-api.coronacheck.nl/v4/verifier/public_keys"
_getpayload(public_keys_url, public_keys_file)
with open(timestamp_file, 'w') as fh:
fh.write(str(int(datetime.utcnow().timestamp())))
return confdir
def _getpayload(url, outfile):
req = requests.get(url)
req.raise_for_status
data = base64.b64decode(req.json()['payload']).decode()
with open(outfile, 'w') as fh:
fh.write(data)
| [
"thomas@tphil.nl"
] | thomas@tphil.nl |
e650a4e141d28eedd6ec302a9ec21e1665454c1d | c6e859f5b303c2d6117dbe907f49fbfcf55efa98 | /postman/3. larger_response.py | 7aa203a21f381244f33dfaa5dee72fcc6a017d3b | [] | no_license | sirkp/DSA | 4d883d735e5225dbdaf81d8a8e19c09d7de88376 | 1920bb4d3907ad3a79bfa9476e9d2eaaa7602fab | refs/heads/master | 2022-12-25T01:58:58.084578 | 2020-10-03T03:47:48 | 2020-10-03T03:47:48 | 273,150,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # larger Responses
def largerResponse(filename):
file = open(filename, 'r');
n = 0;
sum = 0;
for line in file:
words = line.split();
bytes = int(words[-1]);
if(bytes>5000):
n += 1;
sum += bytes
file.close();
file = open("bytes_"+filename, "w");
file.write(str(n)+"\n");
file.write(str(sum)+"\n");# remove "\n" if not accepted
largerResponse(filename);
| [
"sirkp@gmail.com"
] | sirkp@gmail.com |
73cf4bbcd3801b9fdcd472525956202d729d23f9 | 3d67043f56f566a2f822c2db68ff1c8cc8974dc2 | /CS384_Final_Structure/End_Sem_Code/end_sem_group_allocation.py | 27657b395f948d3678d51d89cbb762a28386139d | [] | no_license | cs3842020/CS384_2020_skeleton | 8d6995455522fd96c868f40523240dec9851fcad | 9f920415a24599beed445ac8bf31e005753d05a4 | refs/heads/master | 2023-01-30T21:03:47.616396 | 2020-12-02T19:47:29 | 2020-12-02T19:47:29 | 295,760,785 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 288 | py |
def group_allocation(filename, number_of_groups):
# Entire Logic
# You can add more functions, but in the test case, we will only call the group_allocation() method,
filename = "Btech_2020_master_data.csv"
number_of_groups = 12
group_allocation(filename, number_of_groups) | [
"cs3842020@gmail.com"
] | cs3842020@gmail.com |
57581795f2b9526f736df142af26cc4f424a7aa8 | 5a671c25778638c9807fcddfb49d149e55db4c8f | /apps/users/migrations/0005_auto_20180308_1700.py | 1563fe8368738145f49805e4eee2ea435c033339 | [] | no_license | FathallaSelim/OnlineCourses | 629386cff5acba0722e8d896d817a379952a1837 | 560da03ef66ab83f75958063eb69f1a22b5393b4 | refs/heads/master | 2023-01-13T20:00:45.146188 | 2019-12-24T09:39:48 | 2019-12-24T09:39:48 | 228,176,413 | 1 | 1 | null | 2022-12-27T14:58:31 | 2019-12-15T11:55:44 | CSS | UTF-8 | Python | false | false | 444 | py | # Generated by Django 2.0.2 on 2018-03-08 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20180307_1959'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(default='image/default.png', max_length=135, upload_to='image/%Y/%m'),
),
]
| [
"noreply@github.com"
] | FathallaSelim.noreply@github.com |
e3988c53722da59a2e6154d465c2f2904696e35e | 2635b9bf6d3d77344257ce4cfc345843bbd6dabb | /tests/test_functions.py | 3f10c69d40e603d3cbcd6244099395a8d942792f | [
"Apache-2.0"
] | permissive | icecrime/graphite-api | da94be76213752133db356ee806b3663afa74ebc | 99e87a1e666910648dbe90bcba463b6c2078ddfc | refs/heads/master | 2021-01-14T10:38:43.783149 | 2014-03-08T13:32:16 | 2014-03-08T13:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,599 | py | import copy
from mock import patch, call, MagicMock
from graphite_api import functions
from graphite_api.app import app
from graphite_api.render.datalib import TimeSeries
from . import TestCase
def return_greater(series, value):
return [i for i in series if i is not None and i > value]
def return_less(series, value):
return [i for i in series if i is not None and i < value]
class FunctionsTest(TestCase):
def test_highest_max(self):
config = [20, 50, 30, 40]
seriesList = [range(max_val) for max_val in config]
# Expect the test results to be returned in decending order
expected = [
[seriesList[1]],
[seriesList[1], seriesList[3]],
[seriesList[1], seriesList[3], seriesList[2]],
# Test where num_return == len(seriesList)
[seriesList[1], seriesList[3], seriesList[2], seriesList[0]],
# Test where num_return > len(seriesList)
[seriesList[1], seriesList[3], seriesList[2], seriesList[0]],
]
for index, test in enumerate(expected):
results = functions.highestMax({}, seriesList, index + 1)
self.assertEqual(test, results)
def test_highest_max_empty_series_list(self):
# Test the function works properly with an empty seriesList provided.
self.assertEqual([], functions.highestMax({}, [], 1))
def testGetPercentile(self):
seriesList = [
([None, None, 15, 20, 35, 40, 50], 20),
(range(100), 30),
(range(200), 60),
(range(300), 90),
(range(1, 101), 31),
(range(1, 201), 61),
(range(1, 301), 91),
(range(0, 102), 30),
(range(1, 203), 61),
(range(1, 303), 91),
]
for index, conf in enumerate(seriesList):
series, expected = conf
result = functions._getPercentile(series, 30)
self.assertEqual(
expected, result,
('For series index <%s> the 30th percentile ordinal is not '
'%d, but %d ' % (index, expected, result)))
def test_n_percentile(self):
seriesList = []
config = [
[15, 35, 20, 40, 50],
range(1, 101),
range(1, 201),
range(1, 301),
range(0, 100),
range(0, 200),
range(0, 300),
# Ensure None values in list has no effect.
[None, None, None] + list(range(0, 300)),
]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 1, 1, c))
def n_percentile(perc, expected):
result = functions.nPercentile({}, seriesList, perc)
self.assertEqual(expected, result)
n_percentile(30, [[20], [31], [61], [91], [30], [60], [90], [90]])
n_percentile(90, [[50], [91], [181], [271], [90], [180], [270], [270]])
n_percentile(95, [[50], [96], [191], [286], [95], [190], [285], [285]])
def test_sorting_by_total(self):
seriesList = []
config = [[1000, 100, 10, 0], [1000, 100, 10, 1]]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 0, 0, c))
self.assertEqual(1110, functions.safeSum(seriesList[0]))
result = functions.sortByTotal({}, seriesList)
self.assertEqual(1111, functions.safeSum(result[0]))
self.assertEqual(1110, functions.safeSum(result[1]))
def _generate_series_list(self):
seriesList = []
config = [range(101), range(101), [1] + [None] * 100]
for i, c in enumerate(config):
name = "collectd.test-db{0}.load.value".format(i + 1)
series = TimeSeries(name, 0, 101, 1, c)
series.pathExpression = name
seriesList.append(series)
return seriesList
def test_remove_above_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeAbovePercentile({}, seriesList, percent)
for result in results:
self.assertListEqual(return_greater(result, percent), [])
def test_remove_below_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeBelowPercentile({}, seriesList, percent)
expected = [[], [], [1]]
for i, result in enumerate(results):
self.assertListEqual(return_less(result, percent), expected[i])
def test_remove_above_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeAboveValue({}, seriesList, value)
for result in results:
self.assertListEqual(return_greater(result, value), [])
def test_remove_below_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeBelowValue({}, seriesList, value)
for result in results:
self.assertListEqual(return_less(result, value), [])
def test_limit(self):
seriesList = self._generate_series_list()
limit = len(seriesList) - 1
results = functions.limit({}, seriesList, limit)
self.assertEqual(len(results), limit,
"More than {0} results returned".format(limit))
def _verify_series_options(self, seriesList, name, value):
"""
Verify a given option is set and True for each series in a
series list
"""
for series in seriesList:
self.assertIn(name, series.options)
if value is True:
test_func = self.assertTrue
else:
test_func = self.assertEqual
test_func(series.options.get(name), value)
def test_second_y_axis(self):
seriesList = self._generate_series_list()
results = functions.secondYAxis({}, seriesList)
self._verify_series_options(results, "secondYAxis", True)
def test_draw_as_infinite(self):
seriesList = self._generate_series_list()
results = functions.drawAsInfinite({}, seriesList)
self._verify_series_options(results, "drawAsInfinite", True)
def test_line_width(self):
seriesList = self._generate_series_list()
width = 10
results = functions.lineWidth({}, seriesList, width)
self._verify_series_options(results, "lineWidth", width)
def test_transform_null(self):
seriesList = self._generate_series_list()
transform = -5
results = functions.transformNull({}, copy.deepcopy(seriesList),
transform)
for counter, series in enumerate(seriesList):
if not None in series:
continue
# If the None values weren't transformed, there is a problem
self.assertNotIn(None, results[counter],
"tranformNull should remove all None values")
# Anywhere a None was in the original series, verify it
# was transformed to the given value it should be.
for i, value in enumerate(series):
if value is None:
result_val = results[counter][i]
self.assertEqual(
transform, result_val,
"Transformed value should be {0}, not {1}".format(
transform, result_val))
def test_alias(self):
seriesList = self._generate_series_list()
substitution = "Ni!"
results = functions.alias({}, seriesList, substitution)
for series in results:
self.assertEqual(series.name, substitution)
def test_alias_sub(self):
seriesList = self._generate_series_list()
substitution = "Shrubbery"
results = functions.aliasSub({}, seriesList, "^\w+", substitution)
for series in results:
self.assertTrue(
series.name.startswith(substitution),
"aliasSub should replace the name with {0}".format(
substitution))
# TODO: Add tests for * globbing and {} matching to this
def test_alias_by_node(self):
seriesList = self._generate_series_list()
def verify_node_name(*nodes):
# Use deepcopy so the original seriesList is unmodified
results = functions.aliasByNode({}, copy.deepcopy(seriesList),
*nodes)
for i, series in enumerate(results):
fragments = seriesList[i].name.split('.')
# Super simplistic. Doesn't match {thing1,thing2}
# or glob with *, both of what graphite allow you to use
expected_name = '.'.join([fragments[i] for i in nodes])
self.assertEqual(series.name, expected_name)
verify_node_name(1)
verify_node_name(1, 0)
verify_node_name(-1, 0)
# Verify broken input causes broken output
with self.assertRaises(IndexError):
verify_node_name(10000)
def test_alpha(self):
seriesList = self._generate_series_list()
alpha = 0.5
results = functions.alpha({}, seriesList, alpha)
self._verify_series_options(results, "alpha", alpha)
def test_color(self):
seriesList = self._generate_series_list()
color = "red"
# Leave the original seriesList unmodified
results = functions.color({}, copy.deepcopy(seriesList), color)
for i, series in enumerate(results):
self.assertTrue(
hasattr(series, "color"),
"The transformed seriesList is missing the 'color' attribute",
)
self.assertFalse(
hasattr(seriesList[i], "color"),
"The original seriesList shouldn't have a 'color' attribute",
)
self.assertEqual(series.color, color)
def test_scale(self):
seriesList = self._generate_series_list()
multiplier = 2
# Leave the original seriesList undisturbed for verification
results = functions.scale({}, copy.deepcopy(seriesList), multiplier)
for i, series in enumerate(results):
for counter, value in enumerate(series):
if value is None:
continue
original_value = seriesList[i][counter]
expected_value = original_value * multiplier
self.assertEqual(value, expected_value)
def test_average_series(self):
series = self._generate_series_list()
average = functions.averageSeries({}, series)[0]
self.assertEqual(average[:3], [1/3., 1.0, 2.0])
def test_average_series_wildcards(self):
series = self._generate_series_list()
average = functions.averageSeriesWithWildcards({}, series, 1)[0]
self.assertEqual(average[:3], [1/3., 1.0, 2.0])
self.assertEqual(average.name, 'collectd.load.value')
def _generate_mr_series(self):
seriesList = [
TimeSeries('group.server1.metric1', 0, 1, 1, [None]),
TimeSeries('group.server1.metric2', 0, 1, 1, [None]),
TimeSeries('group.server2.metric1', 0, 1, 1, [None]),
TimeSeries('group.server2.metric2', 0, 1, 1, [None]),
]
mappedResult = [
[seriesList[0], seriesList[1]],
[seriesList[2], seriesList[3]]
]
return seriesList, mappedResult
def test_mapSeries(self):
seriesList, expectedResult = self._generate_mr_series()
results = functions.mapSeries({}, copy.deepcopy(seriesList), 1)
self.assertEqual(results, expectedResult)
def test_reduceSeries(self):
sl, inputList = self._generate_mr_series()
expectedResult = [
TimeSeries('group.server1.reduce.mock', 0, 1, 1, [None]),
TimeSeries('group.server2.reduce.mock', 0, 1, 1, [None])
]
resultSeriesList = [TimeSeries('mock(series)', 0, 1, 1, [None])]
mock = MagicMock(return_value=resultSeriesList)
with patch.dict(app.config['GRAPHITE']['functions'], {'mock': mock}):
results = functions.reduceSeries({}, copy.deepcopy(inputList),
"mock", 2, "metric1", "metric2")
self.assertEqual(results, expectedResult)
self.assertEqual(mock.mock_calls, [call({}, inputList[0]),
call({}, inputList[1])])
| [
"brutasse@gmail.com"
] | brutasse@gmail.com |
866603de50181b3fa18d54d50a6cb9afbb90ce37 | b1bbfe2fa31d761d6a4658b022d344b5a0cb7dd8 | /863-all_nodes_distance_K.py | 9d181bbe57de2cd3949917d448ea134564b08c8f | [] | no_license | stevestar888/leetcode-problems | f5917efc3516f8e40d5143b4dc10583c1e22dabd | 844f502da4d6fb9cd69cf0a1ef71da3385a4d2b4 | refs/heads/master | 2022-11-12T05:01:02.794246 | 2022-10-28T16:45:48 | 2022-10-28T16:45:48 | 248,663,356 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | """
https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree/submissions/
Strat:
(general)
Find a way for you to access your parents.
Do a traversal, treating your parent like a child.
(specically)
Use a dictionary to store the parent for every node.
Use DFS traversal, and add the given node's value once you've hit a distance K.
Stats:
Runtime: 40 ms, faster than 19.83% of Python online submissions for All Nodes Distance K in Binary Tree.
Memory Usage: 13.1 MB, less than 72.67% of Python online submissions for All Nodes Distance K in Binary Tree.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def distanceK(self, root, target, K):
"""
:type root: TreeNode
:type target: TreeNode
:type K: int
:rtype: List[int]
"""
result = []
visited = set()
target_node = None
parents = {}
#populates the parents dictionary (in order traversal)
def find_parents(node):
if node == None:
return
if node.left:
parents[node.left.val] = node
find_parents(node.left)
if node.right:
parents[node.right.val] = node
find_parents(node.right)
#find all nodes that are distance K from the starting node
def dfs(node, visited, travelled, K):
if node == None or node in visited:
return
visited.add(node)
if travelled < K:
#haven't reached the target distance yet
#send recursive calls to both children + parent
dfs(node.left, visited, travelled + 1, K)
dfs(node.right, visited, travelled + 1, K)
dfs(parents.get(node.val), visited, travelled + 1, K)
if travelled == K: #reached target distance
result.append(node.val)
#find the parents for each node
find_parents(root)
#find all nodes with distance K to the target node
dfs(target, visited, 0, K)
return result
| [
"noreply@github.com"
] | stevestar888.noreply@github.com |
6f6eaa158bddd373d9110edf52d4eec558f46461 | 15d8377c6ea9570017024b86cc68df19a14b8f6e | /BaseAdb.py | d90d4b62a32199388e03ae770188bb5c9067a69d | [
"MIT"
] | permissive | xuzhou1859/MonkeyTest | 00970bf6e0c7fe781a9f209be748d1a7de57663a | 0bdbde0146851a22910c3c4e867ed9f3ba6b26f1 | refs/heads/master | 2021-09-05T12:09:26.549436 | 2018-01-27T10:46:07 | 2018-01-27T10:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | #!/usr/bin/env python
# coding=utf-8
import os
class BaseAdb(object):
"""
Fundamental operations of adb.
"""
def adb_call(self, command):
"""
get the command.
"""
command_result = ''
command_text = 'adb %s' % command
print(command_text)
results = os.popen(command_text, "r")
while 1:
line = results.readline()
if not line: break
command_result += line
results.close()
return command_result
def attach_device(self):
"""
check device, suport multi-devices
"""
result = self.adb_call("devices")
devices = result.partition('\n')[2].replace('\n', '').split('\tdevice')
return [device for device in devices if len(device) > 2]
def get_state(self):
"""
check state.
"""
result = self.call_adb("get-state")
result = result.strip(' \t\n\r')
return result or None
def push(self, local, remote):
"""
copy file from computer to phone.
"""
result = self.call_adb("push %s %s" % (local, remote))
return result
def pull(self, remote, local):
"""
frtch file from phone to computer.
"""
result = self.call_adb("pull %s %s" % (remote, local))
return result
def open_app(self,packagename,activity,devices):
"""
open pointed app.
"""
result = self.call_adb("-s "+ devices+" shell am start -n %s/%s" % (packagename, activity))
check = result.partition('\n')[2].replace('\n', '').split('\t ')
if check[0].find("Error") >= 1:
return False
else:
return True
def get_app_pid(self, pkg_name):
"""
get the pod by package name.
"""
string = self.call_adb("shell ps | grep "+pkg_name)
# print(string)
if string == '':
return "the process doesn't exist."
result = string.split(" ")
# print(result[4])
return result[4]
| [
"hust.wanglin@gmail.com"
] | hust.wanglin@gmail.com |
a89f050f9e68a630e4312c8b4c9c97ba41e1138a | 0e73f78466b2a7d928be8033522d591405d22b40 | /irt/text/__init__.py | d5075606f537fc72af23c64738e256ef2d7be7c3 | [
"MIT"
] | permissive | cthoyt/irt | 0cf7c34035107e1b9afedf60e8bef12da6f2a955 | bb923ce9859cf7a50b14776fb3a70e1b8a92a7b2 | refs/heads/main | 2023-06-21T00:58:35.418466 | 2021-07-25T16:24:26 | 2021-07-25T16:24:26 | 389,444,545 | 0 | 0 | MIT | 2021-07-25T21:31:57 | 2021-07-25T21:31:56 | null | UTF-8 | Python | false | false | 333 | py | # -*- coding: utf-8 -*-
import enum
SEP = "|"
MASK_TOKEN = "[MASK]"
TOK_MENTION_START = "[MENTION_START]"
TOK_MENTION_END = "[MENTION_END]"
class Mode(enum.Enum):
CLEAN = "clean"
MARKED = "marked"
MASKED = "masked"
@staticmethod
def filename(mode: "Mode"):
return f"contexts.{mode.value}.txt.gz"
| [
"felix@hamann.xyz"
] | felix@hamann.xyz |
ab30dd41a89456e73e07931f0d91d5647c8d2497 | 73e6f26fade2192d59bce06cac606a721855248d | /Library/settings.py | 928b004ceb497b8abd5d355ef9024fefc1dd934b | [] | no_license | SaudFaisal923/library_management | e11edd6fe92d269bc3a52fc2bd76ee4321580a73 | fa57a603a39297840cf5db41687f3ba71dde6644 | refs/heads/main | 2023-06-20T14:01:50.490314 | 2021-07-11T17:08:12 | 2021-07-11T17:08:12 | 385,005,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,260 | py | """
Django settings for Library project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-%969#l0eitbowdqj1_sfwedpxj-mzc3i7!6$j+4*zf14#osb$l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Book_store',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Library.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"faisalsaud923@gamil.com"
] | faisalsaud923@gamil.com |
209d8760a7423a45deac02e12cd91e27141f2a11 | fd3adde3dc8ef07bbe617840d52f0f2afcc0ac5e | /FactorCreator/20180201/ElementSegregator.py | 41efcc7265ba9e7fc690b3e47a8520439c09fab3 | [] | no_license | dxcv/MyMultiFactor | 7a6d669dc898ceb1b6a9117d498e68e3ddc40616 | 3cf463a06d89cef32adf1357dac7722d209f90bb | refs/heads/master | 2020-06-30T23:26:54.931596 | 2018-03-21T10:21:30 | 2018-03-21T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 17:23:42 2018
@author: xtuser08
"""
import os
import pandas as pd
import time
#判断路径是否存在,不存在则建立
def Create_Path(Path):
if not os.path.exists(Path):
os.makedirs(Path)
def Segregate_Then_Merge(InputPath,OutputPath):
Create_Path(OutputPath)
#获取全部文件数据
FileList=os.listdir(InputPath)
Dict={}
for FileName in FileList:
Code=FileName[:10]
print Code
Data=pd.read_csv(InputPath+FileName,index_col=0,encoding='gbk')
Dict[Code]=Data
#对文件数据按因子拆分后按股票合并
KeyList=sorted(Dict.keys())
FactorList=Dict.get(KeyList[0]).columns.tolist()[1:]
if 'Industry' in FactorList:
FactorList.pop(FactorList.index('Industry'))
for Factor in FactorList:
FactorDict={}
for Key in KeyList:
print Factor,Key
FactorDict[Key]=Dict.get(Key)[Factor]
df=pd.concat(FactorDict).unstack().T
df.to_csv(OutputPath+Factor+'.csv',encoding='gbk')
start=time.clock()
#路径
Dir=os.getcwd()
HigherDir=os.path.dirname(Dir)
DataPath=os.path.dirname(HigherDir)+'\\Data\\' #数据文件夹
FactorPath=DataPath+'RawFactorData'+Dir[Dir.rfind('\\'):]+'\\' #因子储存路径
ElementPath=DataPath+'FactorLibrary\\'
Segregate_Then_Merge(FactorPath,ElementPath)
end=time.clock()
print end-start
| [
"xiaomin.lin.2015@mqf.smu.edu.sg"
] | xiaomin.lin.2015@mqf.smu.edu.sg |
611c784236972142d1c76ef51d49aeb2a35b195b | e18201cc4d9b0102b74219b85eda3efd1b1abece | /PythonExamples/PythonEssentail/IndexASlicing/HW/star_data/star_data_solution.py | 8a3efe07112029a23fc4b2525033e65707011f05 | [] | no_license | vhalyolee/PyScripts | 3b696a6165a58d3f408e1fc87375adc1dc29a281 | c5fc76617121cf1cc96c40759e40cdc0ab22c0ac | refs/heads/master | 2021-01-13T08:48:21.619816 | 2017-06-26T17:05:59 | 2017-06-26T17:05:59 | 70,178,442 | 0 | 0 | null | 2016-10-06T17:55:19 | 2016-10-06T17:50:26 | null | UTF-8 | Python | false | false | 2,209 | py |
# Star Data Exercise
# ==================
#
# The data file for the "Third Catalogue of Nearby Stars" contains information about nearby stars in lines which look like the following:
#
# <pre>
# Proxima Centauri M5 e 11.05 15.49 771.8
# Alp1Cen G2 V 0.01 4.38 749.0
# Alp2Cen K0 V 1.34 5.71 749.0
# 52Tau Cet G8 Vp 3.49 5.77 286.0
# </pre>
#
# The data is provided in fixed-width fields, as follows:
#
# <pre>
# 0:17 Star name
# 18:28 Spectral class
# 29:34 Apparent magnitude
# 35:40 Absolute magnitude
# 41:46 Parallax in thousandths of an arc second
# </pre>
# Both the lower limit and in the upper limit are inclusive here.
#
# Given the following string, containing one line from the file, extract each of the data items from the string. You should strip extraneous whitespace and convert strings containing floating point numbers to Python floats.
star_string = "Proxima Centauri M5 e 11.05 15.49 771.8"
# The slicing will extract the part of the string that is relevant. Since
# slicing in python is not inclusive on the upper limit, it is 18 instead of
# 17:
star_name = star_string[:18]
# Then we remove the white spaces:
star_name = star_name.strip()
# To be more compact, and if it is as readable to you, feel free to do both
# steps at once:
spectral_class = star_string[18:29].strip()
# Since the float function deals with white spaces, the call to .strip is not needed in the next 3 lines:
apparent_magnitude = float(star_string[29:35])
absolute_magnitude = float(star_string[35:41])
parallax = float(star_string[41:])
print "Star name: ", star_name
print "Spectral class: ", spectral_class
print "Apparent magnitude:", apparent_magnitude
print "Absolute magnitude:", absolute_magnitude
print "Parallax: ", parallax
# References
# ----------
#
# <pre>
# Preliminary Version of the Third Catalogue of Nearby Stars
# GLIESE W., JAHREISS H.
# Astron. Rechen-Institut, Heidelberg (1991)
# </pre>
# Copyright 2008-2016, Enthought, Inc.
# Use only permitted under license. Copying, sharing, redistributing or other unauthorized use strictly prohibited.
# http://www.enthought.com
| [
"vhalyo@gmail.com"
] | vhalyo@gmail.com |
b0af46c45ab53bc78f1fa8c039487dbbe0469192 | e4666cb9597fc094ce74121cd6c73765718f1aff | /config.py | 66c9ceb0247e43d8cb91aa325006093050f03a3f | [] | no_license | ccuulinay/view-d-data | edbdd0449e2f366e93d3bcb021cc10db6f50cef1 | 5de812c5453ede2fcf163ac8d385e8371aae77b2 | refs/heads/main | 2023-07-14T15:47:24.297668 | 2021-08-28T07:52:55 | 2021-08-28T07:52:55 | 387,171,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import os
import logging
import json
import pathlib
from datetime import timedelta
################### Get logger ###################
# set up basic config - logging to console
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(asctime)s: %(name)s: %(message)s',
# datefmt='%m-%d %H:%M',
# filename='/temp/myapp.log',
# filemode='w'
)
# Create console handlers
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
c_format = logging.Formatter('%(levelname)s: %(asctime)s: %(name)s: %(message)s')
c_handler.setFormatter(c_format)
# Create file handler
log_f_path = os.environ.get("APP_LOG_PATH", os.path.dirname(__file__))
log_f_p = pathlib.Path(log_f_path) / "logs"
log_f_p.mkdir(parents=True, exist_ok=True)
log_f = log_f_p / "app_flask.log"
f_handler = logging.FileHandler(log_f)
f_handler.setLevel(logging.ERROR)
f_format = logging.Formatter('%(levelname)s: %(asctime)s: %(name)s: %(message)s')
f_handler.setFormatter(f_format)
# Add hanlder to root logger.
logging.getLogger('').addHandler(f_handler)
# Create a custom logger
logger = logging.getLogger("DEFAULT API LOGGER")
logger.setLevel(logging.INFO)
class DBConfig(object):
HOST = os.getenv("DB_HOST", '127.0.0.1')
USER = os.getenv("DB_USER", "root")
PASSWORD = os.getenv("DB_PASSWORD")
SCHEMA = os.getenv("DB_SCHEMA", "MOCKIDP")
| [
"ccuulinay@gmail.com"
] | ccuulinay@gmail.com |
d320e4e866cc7cda8f8204b7e6ca8aec4202bfb9 | e2426d7c01500ca4a2df4e4555f217f957baf957 | /cows/service/imps/wms_gdal.py | 624e30293f59af81a8d6f05c3ab93789be0978fe | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | cedadev/cows | 959a5e1ad220cfe0cce48a2131d6971106c765aa | db9ed729c886b271ce85355b97e39243081e8246 | refs/heads/master | 2020-03-16T15:17:45.710584 | 2018-05-09T10:35:47 | 2018-05-09T10:36:37 | 132,736,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,312 | py | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
An implementation of cows.service.wms_iface that uses GDAL to support
warping between multiple coordinate reference systems. This implementation
relies on a further interface, IGDALDataSource, to provide the data.
:todo: The source is a little confused about the difference between
Dataset (i.e. a GDAL Dataset object) and DataSource (A wrapper
around a GDAL dataset defined in IGDALDataSource). Fix this.
"""
from cows.service.wxs_iface import ILayerMapper
from cows.service.wms_iface import IwmsLayer, IwmsDimension, IwmsLayerSlab
from cows.bbox_util import geoToPixel
from osgeo import osr, gdal
try:
from PIL import Image
except ImportError:
import Image
import logging
log = logging.getLogger(__name__)
class IGDALDataSource(object):
"""
This interface is very similar to ILayer except that it returns GDAL
datasets rather than PIL images. It also doesn't try to handle multiple
CRSs as this is handled by GDALLayer.
:ivar title: The layer title. As seen in the Capabilities document.
:ivar abstract: Abstract as seen in the Capabilities document.
:ivar dimensions: A mapping of dimension names to IDimension objects.
:ivar units: A string describing the units.
:ivar crs: The CRS that GDAL datasets will be returned in by
self.getDataset()
:todo: Legend plotting needs support but should probably be done in a
seperate interface.
"""
dimensions = NotImplemented
def getWKT(self):
"""
Because mapping between CRS codes and WKT format can be flaky in GDAL
this function allows the problem to be solved on a case-by-case basis.
:return: the description of self.crs in GDAL well known text format.
"""
def getBBox(self):
"""
:return: the bounding box (llx, lly, urx, ury) in self.crs.
"""
def getDataset(self, dimValues=None, renderOpts={}):
"""
Create the equivilent of ILayerSlab as a GDAL dataset. The dataset
could have 1,3 or 4 bands representing PIL modes 'L', 'RGB' or 'RGBA'.
@param dimValues: A mapping of dimension names to dimension values
as specified in the IDimension.extent.
@param renderOpts: A generic mapping object for passing rendering
options.
:return: A GDAL Dataset object for this horizontal slice.
"""
class GDALLayer(IwmsLayer):
"""
This implementation of IwmsLayer can warp images from a source CRS to
various other CRSs.
:ivar sourceCRS: The CRS of the data source.
:ivar warpCRS: A mapping of CRS identifiers to WKT descriptions of
CRSs that are supported for this ILayer via warping.
"""
def __init__(self, dataSource):
"""
@param dataSource: A IGDALDataSource implementation.
"""
self._ds = dataSource
self.warpCRS = {}
self.sourceCRS = dataSource.crs
self.title = dataSource.title
self.abstract = dataSource.abstract
self.dimensions = dataSource.dimensions
self.units = dataSource.units
#!NOTE: self.crss is implemented as property
def _getCRSs(self):
return [self._ds.crs] + self.warpCRS.keys()
crss = property(_getCRSs)
def getBBox(self, crs):
src_bb = self._ds.getBBox()
if crs == self.sourceCRS:
return src_bb
sr_src = osr.SpatialReference(self._ds.getWKT())
sr_dst = osr.SpatialReference(self.warpCRS[crs])
ct = osr.CoordinateTransformation(sr_src, sr_dst)
llx, lly = ct.TransformPoint(float(src_bb[0]), float(src_bb[1]))[:2]
urx, ury = ct.TransformPoint(float(src_bb[2]), float(src_bb[3]))[:2]
return (llx, lly, urx, ury)
def getSlab(self, crs, dimValues=None, renderOpts={}):
return GDALLayerSlab(self, crs, dimValues=dimValues,
renderOpts=renderOpts)
def getCacheKey(self, crs, dimValues=None, renderOpts={}):
"""
A fairly sane cache key generation algorithm.
"""
if dimValues is None:
x = None
else:
x = dimValues.items()
x.sort()
y = renderOpts.items(); y.sort()
return str((x, y))
class GDALLayerSlab(IwmsLayerSlab):
def __init__(self, layer, crs, dimValues=None, renderOpts={}):
self.layer = layer
self.crs = crs
self.dimValues = dimValues
self.rendOpts = renderOpts
self.bbox = layer.getBBox(crs)
if crs == layer.sourceCRS:
self._data = layer._ds.getDataset()
else:
self._data = warpDataset(layer._ds, layer.warpCRS[crs])
def getImage(self, bbox, width, height):
# Calculate the pixel coordinates of bbox within self.bbox
w, h = self._data.RasterXSize, self._data.RasterYSize
llx, lly = geoToPixel(bbox[0], bbox[1], self.bbox, w, h)
urx, ury = geoToPixel(bbox[2], bbox[3], self.bbox, w, h)
xoff, yoff = llx, ury
xsize, ysize = urx-llx, lly-ury
img = datasetToImage(self._data, xoff, yoff, xsize, ysize)
return img.resize((width, height))
#-----------------------------------------------------------------------------
# Utility functions
def datasetToImage(ds, xoff, yoff, xsize, ysize):
"""
Convert a GDAL dataset into a PIL image with cropping.
"""
bandImages = []
for iband in range(1, ds.RasterCount+1):
band = ds.GetRasterBand(iband)
bandImages.append(Image.fromstring('L', (xsize, ysize),
band.ReadRaster(xoff, yoff,
xsize, ysize)))
return Image.merge('RGBA', bandImages)
def warpDataset(dataSource, wkt, driverName='MEM', datasetName=''):
"""
Warp a GDAL dataset from one CRS to another.
@param dataset: An object implementing IGDALDataSource.
@param wkt: The Well Known Text string of the destination CRS
@param driverName: The GDAL driver to use for the new dataset.
This driver must support the Create() method.
@param datasetName: The name to give the dataset (i.e. the filename if
a file-based driver)
@param return: A GDAL dataset in the new CRS.
"""
ds = dataSource.getDataset()
sr_src = osr.SpatialReference(dataSource.GetProjection())
sr_dst = osr.SpatialReference(wkt)
ct = osr.CoordinateTransform(sr_src, sr_dst)
# What is a reasonable resolution? We should transform the resolution
# of the source image into the new coordinates
T = ds.GetGeoTransform()
#!TODO ...
dr = gdal.GetDriverByName(driverName)
#!TODO: How big should the image be? Just fudge the issue for now.
dsOut = dr.Create(datasetName, width, height, 4,
gdal.GDT_Byte)
dsOut.SetProjection(wkt)
# We need to calculate the warped GeoTransform
topLeft = ct(T[0], T[3])
gdal.ReprojectImage(ds, dsOut, ds.GetProjection(), wkt)
return dsOut
| [
"ag.stephens@stfc.ac.uk"
] | ag.stephens@stfc.ac.uk |
a68052900e87d22e20f286a50536c416392d3dcf | 43cdd7cb26fe44b1ed7de6a46f8b5e680c9b1372 | /openpeerpower/components/sms/__init__.py | 328aa2a919bc4a9522a555d6536f83b3d31420c0 | [
"Apache-2.0"
] | permissive | OpenPeerPower/Open-Peer-Power | 02ec5c133564b47c6f72f669e844a666643cacd6 | 940a04a88e8f78e2d010dc912ad6905ae363503c | refs/heads/master | 2022-08-16T09:38:49.994009 | 2021-05-29T03:54:13 | 2021-05-29T03:54:13 | 183,174,237 | 1 | 0 | Apache-2.0 | 2022-07-15T18:43:02 | 2019-04-24T07:35:47 | Python | UTF-8 | Python | false | false | 930 | py | """The sms component."""
import logging
import gammu # pylint: disable=import-error, no-member
import voluptuous as vol
from openpeerpower.const import CONF_DEVICE
from openpeerpower.helpers import config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DEVICE): cv.isdevice})},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(opp, config):
"""Configure Gammu state machine."""
conf = config[DOMAIN]
device = conf.get(CONF_DEVICE)
gateway = gammu.StateMachine() # pylint: disable=no-member
try:
gateway.SetConfig(0, dict(Device=device, Connection="at"))
gateway.Init()
except gammu.GSMError as exc: # pylint: disable=no-member
_LOGGER.error("Failed to initialize, error %s", exc)
return False
else:
opp.data[DOMAIN] = gateway
return True
| [
"pcaston@arach.net.au"
] | pcaston@arach.net.au |
939e7d058fe04d55211e0d4fcea6573e3821c895 | 33eb8ed192179a8104b9e04008f8890c2255d7f3 | /A2/hw2/test.py | 94331b3dd86b61ea7fffc7b979bc5c0be3466bed | [] | no_license | ayuj6/CS540-AI | f5e0d7a256fab8be1b35429c45d37ec0957e3bf6 | deaea3f996fd66a3ec9317805b79aa31c901d679 | refs/heads/main | 2023-05-05T06:51:39.190006 | 2021-05-28T19:33:09 | 2021-05-28T19:33:09 | 371,794,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,264 | py |
'''
HW4 is to be written in a file called classify.py with the following interface:
create_vocabulary(training_directory: str, cutoff: int)
create_bow(vocab: dict, filepath: str)
load_training_data(vocab: list, directory: str)
prior(training_data: list, label_list: list)
p_word_given_label(vocab: list, training_data: list, label: str)
train(training_directory: str, cutoff: int)
classify(model: dict, filepath: str)
'''
__author__ = 'cs540-testers'
__credits__ = ['Saurabh Kulkarni', 'Alex Moon', 'Stephen Jasina',
'Harrison Clark']
version = 'V1.1.2'
from classify import train, create_bow, load_training_data, prior, \
p_word_given_label, classify, create_vocabulary
import unittest
class TestClassify(unittest.TestCase):
def compare_dicts(self, a, b):
'''Compares two dicts that map strings to other (non-container) data'''
# Check that all elements of a are in b
for k in a:
self.assertIn(k, b)
if isinstance(a[k], float):
self.assertAlmostEqual(a[k], b[k])
elif isinstance(a[k], dict):
self.compare_dicts(a[k], b[k])
else:
self.assertEqual(a[k], b[k])
# Check if b has unexpected extra entries
for k in b:
self.assertIn(k, a)
# create_vocabulary(training_directory: str, cutoff: int)
# returns a list
def test_create_vocabulary(self):
vocab = create_vocabulary('./EasyFiles/', 1)
expected_vocab = [',', '.', '19', '2020', 'a', 'cat', 'chases', 'dog',
'february', 'hello', 'is', 'it', 'world']
self.assertEqual(vocab, expected_vocab)
vocab = create_vocabulary('./EasyFiles/', 2)
expected_vocab = ['.', 'a']
self.assertEqual(vocab, expected_vocab)
# create_bow(vocab: dict, filepath: str)
# returns a dict
def test_create_bow(self):
vocab = create_vocabulary('./EasyFiles/', 1)
bow = create_bow(vocab, './EasyFiles/2016/1.txt')
expected_bow = {'a': 2, 'dog': 1, 'chases': 1, 'cat': 1, '.': 1}
self.assertEqual(bow, expected_bow)
bow = create_bow(vocab, './EasyFiles/2020/2.txt')
expected_bow = {'it': 1, 'is': 1, 'february': 1, '19': 1, ',': 1,
'2020': 1, '.': 1}
self.assertEqual(bow, expected_bow)
vocab = create_vocabulary('./EasyFiles/', 2)
bow = create_bow(vocab, './EasyFiles/2016/1.txt')
expected_bow = {'a': 2, None: 3, '.': 1}
self.assertEqual(bow, expected_bow)
# load_training_data(vocab: list, directory: str)
# returns a list of dicts
def test_load_training_data(self):
vocab = create_vocabulary('./EasyFiles/', 1)
training_data = load_training_data(vocab, './EasyFiles/')
expected_training_data = [
{
'label': '2020',
'bow': {'it': 1, 'is': 1, 'february': 1, '19': 1, ',': 1,
'2020': 1, '.': 1}
},
{
'label': '2016',
'bow': {'hello': 1, 'world': 1}
},
{
'label': '2016',
'bow': {'a': 2, 'dog': 1, 'chases': 1, 'cat': 1, '.': 1}
}
]
self.assertCountEqual(training_data, expected_training_data)
# prior(training_data: list, label_list: list)
# returns a dict mapping labels to floats
# assertAlmostEqual(a, b) can be handy here
def test_prior(self):
vocab = create_vocabulary('./corpus/training/', 2)
training_data = load_training_data(vocab, './corpus/training/')
log_probabilities = prior(training_data, ['2020', '2016'])
expected_log_probabilities = {'2020': -0.32171182103809226,
'2016': -1.2906462863976689}
self.compare_dicts(log_probabilities, expected_log_probabilities)
# p_word_given_label(vocab: list, training_data: list, label: str)
# returns a dict mapping words to floats
# assertAlmostEqual(a, b) can be handy here
def test_p_word_given_label_2020(self):
vocab = create_vocabulary('./EasyFiles/', 1)
training_data = load_training_data(vocab, './EasyFiles/')
log_probabilities = p_word_given_label(vocab, training_data, '2020')
expected_log_probabilities = {',': -2.3513752571634776,
'.': -2.3513752571634776, '19': -2.3513752571634776,
'2020': -2.3513752571634776, 'a': -3.044522437723423,
'cat': -3.044522437723423, 'chases': -3.044522437723423,
'dog': -3.044522437723423, 'february': -2.3513752571634776,
'hello': -3.044522437723423, 'is': -2.3513752571634776,
'it': -2.3513752571634776, 'world': -3.044522437723423,
None: -3.044522437723423}
self.compare_dicts(log_probabilities, expected_log_probabilities)
vocab = create_vocabulary('./EasyFiles/', 2)
training_data = load_training_data(vocab, './EasyFiles/')
log_probabilities = p_word_given_label(vocab, training_data, '2020')
expected_log_probabilities = {'.': -1.6094379124341005,
'a': -2.302585092994046, None: -0.35667494393873267}
self.compare_dicts(log_probabilities, expected_log_probabilities)
def test_p_word_given_label_2016(self):
vocab = create_vocabulary('./EasyFiles/', 1)
training_data = load_training_data(vocab, './EasyFiles/')
log_probabilities = p_word_given_label(vocab, training_data, '2016')
expected_log_probabilities = {',': -3.091042453358316,
'.': -2.3978952727983707, '19': -3.091042453358316,
'2020': -3.091042453358316, 'a': -1.9924301646902063,
'cat': -2.3978952727983707, 'chases': -2.3978952727983707,
'dog': -2.3978952727983707, 'february': -3.091042453358316,
'hello': -2.3978952727983707, 'is': -3.091042453358316,
'it': -3.091042453358316, 'world': -2.3978952727983707,
None: -3.091042453358316}
self.compare_dicts(log_probabilities, expected_log_probabilities)
vocab = create_vocabulary('./EasyFiles/', 2)
training_data = load_training_data(vocab, './EasyFiles/')
log_probabilities = p_word_given_label(vocab, training_data, '2016')
expected_log_probabilities = {'.': -1.7047480922384253,
'a': -1.2992829841302609, None: -0.6061358035703157}
self.compare_dicts(log_probabilities, expected_log_probabilities)
# train(training_directory: str, cutoff: int)
# returns a dict
def test_train(self):
model = train('./EasyFiles/', 2)
expected_model = {
'vocabulary': ['.', 'a'],
'log prior': {
'2020': -0.916290731874155,
'2016': -0.5108256237659905
},
'log p(w|y=2020)': {
'.': -1.6094379124341005,
'a': -2.302585092994046,
None: -0.35667494393873267
},
'log p(w|y=2016)': {
'.': -1.7047480922384253,
'a': -1.2992829841302609,
None: -0.6061358035703157
}
}
self.compare_dicts(model, expected_model)
# classify(model: dict, filepath: str)
# returns a dict
def test_classify_2020(self):
model = train('./corpus/training/', 2)
classification = classify(model, './corpus/test/2016/0.txt')
expected_classification = {
'log p(y=2020|x)': -3906.351945884105,
'log p(y=2016|x)': -3916.458747858926,
'predicted y': '2020'
}
self.compare_dicts(classification, expected_classification)
def test_classify_2016(self):
model = train('./corpus/training/', 2)
classification = classify(model, './corpus/test/2016/19.txt')
expected_classification = {
'log p(y=2016|x)': -3800.4027665365134,
'log p(y=2020|x)': -3805.776535552692,
'predicted y': '2016'
}
self.compare_dicts(classification, expected_classification)
if __name__ == '__main__':
print('Tester %s' % version)
unittest.main()
| [
"ayujprasad1999@gmail.com"
] | ayujprasad1999@gmail.com |
b436c1884518402d04f67b999bb6ece4df3fb38d | 930fa404212252c6e925c38ed2e6cae614ce4ec8 | /sdne-dec.py | 48694035afbe7cc74b80a012bc28e44dddaa2465 | [] | no_license | klovbe/graph_clustering | 0b778d3055a91bb2cd6d049c7a975a6679173a9b | 15fd0296631b9b0e21455f0f51e40ed65319bc0d | refs/heads/master | 2020-04-07T04:59:40.752665 | 2018-12-11T15:05:40 | 2018-12-11T15:05:40 | 158,079,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,360 | py | disp_avlbl = True
import os
if 'DISPLAY' not in os.environ:
disp_avlbl = False
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import networkx as nx
import sys
sys.path.append('./')
sys.path.append(os.path.realpath(__file__))
from keras.engine.topology import Layer, InputSpec
from keras.layers import Input, Dense, Lambda, Subtract, merge, Dropout, BatchNormalization, Activation
from keras.models import Model, model_from_json
import keras.regularizers as Reg
from keras.optimizers import SGD, Adam
from keras import backend as K
from time import time
from datasets import *
from sklearn.cluster import KMeans
import metrics
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
def get_encoder(node_num, d, K, n_units, nu1, nu2, activation_fn):
# Input
x = Input(shape=(node_num,))
# Encoder layers
y = [None] * (K + 1)
y[0] = x # y[0] is assigned the input
for i in range(K - 1):
y[i + 1] = Dense(n_units[i], activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
y[K] = Dense(d, activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
# Encoder model
encoder = Model(input=x, output=y[K])
return encoder
def get_decoder(node_num, d, K,
n_units, nu1, nu2,
activation_fn):
# Input
y = Input(shape=(d,))
# Decoder layers
y_hat = [None] * (K + 1)
y_hat[K] = y
for i in range(K - 1, 0, -1):
y_hat[i] = Dense(n_units[i - 1],
activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
y_hat[0] = Dense(node_num, activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
# Output
x_hat = y_hat[0] # decoder's output is also the actual output
# Decoder Model
decoder = Model(input=y, output=x_hat)
return decoder
def get_autoencoder(encoder, decoder):
# Input
x = Input(shape=(encoder.layers[0].input_shape[1],))
# Generate embedding
y = encoder(x)
# Generate reconstruction
x_hat = decoder(y)
# Autoencoder Model
autoencoder = Model(input=x, output=[x_hat, y])
return autoencoder
def autoencoder(dims, nu1, nu2, act='relu', init='glorot_uniform'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
(ae_model, encoder_model), Model of autoencoder and model of encoder
"""
n_stacks = len(dims) - 1
# input
x = Input(shape=(dims[0],), name='input')
h = x
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], activation=act, kernel_initializer=init, kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2),
name='encoder_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2),
name='encoder_%d' % (n_stacks - 1))(h) # hidden layer, features are extracted from here
y = h
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
y = Dense(dims[i], activation=act, kernel_initializer=init, kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2),
name='decoder_%d' % i)(y)
# output
y = Dense(dims[0], activation='sigmoid', kernel_initializer=init, kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2),
name='decoder_0')(y)
return Model(inputs=x, outputs=y, name='AE'), Model(inputs=x, outputs=h, name='encoder')
def autoencoder_bn(dims, nu1, nu2, act='relu', init='glorot_uniform'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
(ae_model, encoder_model), Model of autoencoder and model of encoder
"""
n_stacks = len(dims) - 1
# input
x = Input(shape=(dims[0],), name='input')
h = x
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i, kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(h)
h = BatchNormalization()(h)
h = Activation(activation=act)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1), kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(h)
# hidden layer, features are extracted from here
y = h
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
y = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i, kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y)
y = BatchNormalization()(y)
y = Activation(activation=act)(y)
# output
y = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='decoder_0', kernel_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y)
return Model(inputs=x, outputs=y, name='AE'), Model(inputs=x, outputs=h, name='encoder')
class ClusteringLayer(Layer):
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Example
```
model.add(ClusteringLayer(n_clusters=10))
```
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, n_features)` witch represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
# Input shape
2D tensor with shape: `(n_samples, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
self.clusters = self.add_weight((self.n_clusters, input_dim), initializer='glorot_uniform', name='clusters')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs):
""" student t-distribution, as same as used in t-SNE algorithm.
q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it.
Arguments:
inputs: the variable containing data, shape=(n_samples, n_features)
Return:
q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters)
"""
q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha))
#self.clusters:(n_clusters, embedding dim), inputs:(None, embedding dim)
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1)) # q:(None,n_clusters)
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Objectives
def weighted_mse_x(y_true, y_pred):
''' Hack: This fn doesn't accept additional arguments.
We use y_true to pass them.
y_pred: Contains x_hat - x
y_true: Contains [b, deg]
'''
return K.sum(
K.square(y_pred * y_true[:, :-1]),
axis=-1) / y_true[:, -1]
def weighted_mse_y(y_true, y_pred):
''' Hack: This fn doesn't accept additional arguments.
We use y_true to pass them.
y_pred: Contains y2 - y1
y_true: Contains s12
'''
min_batch_size = K.shape(y_true)[0]
return K.reshape(
K.sum(K.square(y_pred), axis=-1),
[min_batch_size, 1]
) * y_true
class SDNE(object):
def __init__(self, dims, bn, init, nu1, nu2, alpha, gamma, n_clusters, *hyper_dict, **kwargs):
''' Initialize the SDNE class
Args:
d: dimension of the embedding
beta: penalty parameter in matrix B of 2nd order objective
alpha: weighing hyperparameter for 1st order objective
nu1: L1-reg hyperparameter
nu2: L2-reg hyperparameter
K: number of hidden layers in encoder/decoder
n_units: vector of length K-1 containing #units in hidden layers
of encoder/decoder, not including the units in the
embedding layer
rho: bounding ratio for number of units in consecutive layers (< 1)
n_iter: number of sgd iterations for first embedding (const)
xeta: sgd step size parameter
n_batch: minibatch size for SGD
modelfile: Files containing previous encoder and decoder models
weightfile: Files containing previous encoder and decoder weights
'''
self.alpha = alpha
self.gamma = gamma
self.dims = dims
self.node_num = self.dims[0]
self.n_stacks = len(self.dims) -1
self.n_clusters = n_clusters
# Generate encoder, decoder and autoencoder
# If cannot use previous step information, initialize new models
if bn:
self.autoencoder, self.encoder = autoencoder_bn(self.dims, nu1=nu1, nu2=nu2, init=init)
else:
self.autoencoder, self.encoder = autoencoder(self.dims, nu1=nu1, nu2=nu2, init=init)
# Initialize self.model
# Input
x_in = Input(shape=(2 * self.node_num,), name='x_in')
x1 = Lambda(
lambda x: x[:, 0:self.node_num],
output_shape=(self.node_num,)
)(x_in)
x2 = Lambda(
lambda x: x[:, self.node_num:2 * self.node_num],
output_shape=(self.node_num,)
)(x_in)
# Process inputs
x_hat1 = self.autoencoder(x1)
x_hat2 = self.autoencoder(x2)
y1 = self.encoder(x1)
y2 = self.encoder(x2)
# Outputs
x_diff1 = Subtract()([x_hat1, x1])
x_diff2 = Subtract()([x_hat2, x2])
y_diff = Subtract()([y2, y1])
clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')
clustering_layer1 = clustering_layer(y1)
clustering_layer2 = clustering_layer(y2)
# Model
# self.pre_model = Model(input=x_in, output=[x_diff1, x_diff2])
self.pre_model = Model(input=x_in, output=[x_diff1, x_diff2, y_diff])
self.cluster_model = Model(input=x_in, output=[x_diff1, x_diff2, y_diff, clustering_layer1, clustering_layer2])
self.predict_model = Model(input=x_in, output=[y1, clustering_layer1])
def pretrain(self, x, optimizer='adam', epochs=200, batch_size=256, beta=1):
print('...Pretraining...')
self.pre_model.compile(optimizer=optimizer, loss=[weighted_mse_x, weighted_mse_x, weighted_mse_y])
# begin pretraining
t0 = time()
self.pre_model.fit_generator(generator=batch_generator_sdne(x, batch_size=batch_size, shuffle=True, beta=beta),
epochs=epochs, steps_per_epoch= (self.node_num//batch_size))
print('Pretraining time: ', time() - t0)
self.pretrained = True
@staticmethod
def target_distribution(q):
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
# def fit(self, x, graph=None, edge_f=None,
# is_weighted=False, no_python=False):
#
# sgd = SGD(lr=self.xeta, decay=1e-5, momentum=0.99, nesterov=True)
# # adam = Adam(lr=self.xeta, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# self.model.compile(
# optimizer=sgd,
# loss=[weighted_mse_x, weighted_mse_x, weighted_mse_y],
# loss_weights=[1, 1, self.alpha]
# )
#
# self.model.fit_generator(
# generator=batch_generator_sdne(S, self.beta, self.n_batch, True),
# nb_epoch=self.num_iter,
# samples_per_epoch=S.nonzero()[0].shape[0] // self.n_batch,
# verbose=1
# )
# # Get embedding for all points
# self.Y = model_batch_predictor(self.autoencoder, S, self.n_batch)
# t2 = time()
# return self.Y, (t2 - t1)
def fit(self, x_train, optimizer='adam', beta =1, y = None, epochs=500,
batch_size=256, update_interval=5, early_stopping=20, tol=0.01):
double_x = np.append(x_train, x_train, axis=1)
print('Update interval', update_interval)
# Step 1: initialize cluster centers using k-means
t1 = time()
print('Initializing cluster centers with k-means.')
kmeans = KMeans(n_clusters=self.n_clusters, n_init=20)
encoder_out = self.encoder.predict(x_train)
y_pred = kmeans.fit_predict(encoder_out)
if y is not None:
acc = np.round(metrics.acc(y, y_pred), 5)
nmi = np.round(metrics.nmi(y, y_pred), 5)
ari = np.round(metrics.ari(y, y_pred), 5)
print('kmeans : acc = %.5f, nmi = %.5f, ari = %.5f' % (acc, nmi, ari))
X_embedded = TSNE(n_components=2).fit_transform(encoder_out)
# fig = plt.gcf()
# plt.savefig("k-means.png")
plt.figure(figsize=(12, 10))
plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y)
plt.colorbar()
plt.show()
plt.savefig("k-means.png")
print("K-means result:")
print(np.bincount(y_pred))
# y_pred = kmeans.fit_predict(x_train)
y_pred_last = np.copy(y_pred)
self.cluster_model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_])
self.cluster_model.compile(optimizer=optimizer, loss=[weighted_mse_x, weighted_mse_x, weighted_mse_y, 'kld', 'kld'],
loss_weights=[1, 1, args.alpha, args.gamma, args.gamma])
# for ite in range(int(epoch)):
# if ite % update_interval == 0:
# q,_,_ = self.model.predict(x_train, verbose=0)
# p = self.target_distribution(q) # update the auxiliary target distribution p
# y0 = np.zeros_like(x_train)
# self.model.fit(x=x_train, y=[p, y0, x_train], batch_size=batch_size)
# Step 2: deep clustering
for ite in range(int(epochs)):
# train on batch
if ite % update_interval == 0:
_, q = self.predict_model.predict(double_x, verbose=0)
p = self.target_distribution(q) # update the auxiliary target distribution p
y_pred = q.argmax(1)
delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / y_pred.shape[0]
print("delta label:{}".format(delta_label))
y_pred_last = np.copy(y_pred)
if y is not None:
acc = np.round(metrics.acc(y, y_pred), 5)
nmi = np.round(metrics.nmi(y, y_pred), 5)
ari = np.round(metrics.ari(y, y_pred), 5)
print('acc = %.5f, nmi = %.5f, ari = %.5f' % (acc, nmi, ari))
print("predicted bin")
print(np.bincount(y_pred))
if ite > update_interval and delta_label < tol:
print("Early stopping...")
break
print(ite)
self.cluster_model.fit_generator(
generator=batch_generator_dec(x_train, p, batch_size=batch_size, shuffle=True, beta=beta),
shuffle=False, steps_per_epoch=(self.node_num // batch_size)
)
print('training time: ', time() - t1)
# save the trained model
encoder_out = self.encoder.predict(x_train)
_, q = self.predict_model.predict(double_x, verbose=0)
#k-means
y_pred = kmeans.fit_predict(encoder_out)
if y is not None:
acc = np.round(metrics.acc(y, y_pred), 5)
nmi = np.round(metrics.nmi(y, y_pred), 5)
ari = np.round(metrics.ari(y, y_pred), 5)
print('kmeans : acc = %.5f, nmi = %.5f, ari = %.5f' % (acc, nmi, ari))
print(np.bincount(y_pred))
#this method
y_pred = q.argmax(1)
if y is not None:
print("orginal cluster proportion: {}".format(np.bincount(y)))
acc = np.round(metrics.acc(y, y_pred), 5)
nmi = np.round(metrics.nmi(y, y_pred), 5)
ari = np.round(metrics.ari(y, y_pred), 5)
print('dec : acc = %.5f, nmi = %.5f, ari = %.5f' % (acc, nmi, ari))
X_embedded = TSNE(n_components=2).fit_transform(encoder_out)
plt.savefig("dec.png")
plt.figure(figsize=(12, 10))
plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y)
plt.colorbar()
plt.show()
print(np.bincount(y_pred))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--epochs', default=500, type=int)
parser.add_argument('--pretrain_epochs', default=200, type=int)
parser.add_argument('--update_interval', default=5, type=int)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--gene_select', default=None, type=int)
parser.add_argument('--tol', default=0.001, type=float)
parser.add_argument("--early_stopping", default=20, type=int)
parser.add_argument("--n_clusters", default=5, type=int)
parser.add_argument("--train_datapath", default="data/drop80-0-1.train", type=str)
parser.add_argument("--labelpath", default=None, type=str)
parser.add_argument("--outDir", default="data/drop80-0-1.train", type=str)
parser.add_argument("--model_name", default="data/drop80-0-1.train", type=str)
parser.add_argument("--data_type", default="count", type=str)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--trans', dest='trans', action='store_true')
feature_parser.add_argument('--no-trans', dest='trans', action='store_false')
parser.set_defaults(trans=True)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--bn', dest='bn', action='store_true')
feature_parser.add_argument('--no-bn', dest='bn', action='store_false')
parser.set_defaults(bn=True)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--gene_scale', dest='gene_scale', action='store_true')
feature_parser.add_argument('--no-gene_scale', dest='gene_scale', action='store_false')
parser.set_defaults(gene_scale=True)
parser.add_argument("--metric", default="pearson", type=str)
parser.add_argument('--gamma', default=0.1,type=float)
parser.add_argument('--nu1', default=1e-6, type=float)
parser.add_argument('--nu2', default=1e-6, type=float)
parser.add_argument('--beta', default=1.0, type=float)
parser.add_argument('--alpha', default=0.1, type=float)
args = parser.parse_args()
print(args)
# load dataset
t0 = time()
edges, graph = load_newdata(args)
print("")
y = None
if args.labelpath is not None:
from sklearn.preprocessing import LabelEncoder
labeldf = pd.read_csv(args.labelpath, header=0, index_col=0)
y = labeldf.values
y = y.transpose()
y = np.squeeze(y)
if not isinstance(y, (int, float)):
y = LabelEncoder().fit_transform(y)
n_clusters = len(np.unique(y))
print("has {} clusters:".format(n_clusters))
print("orginal cluster proportion: {}".format(np.bincount(y)))
X = PCA(n_components=50).fit_transform(graph)
X_embedded = TSNE(n_components=2).fit_transform(X)
y_pred = KMeans(n_clusters=n_clusters, n_init=40).fit_predict(X_embedded)
acc = np.round(metrics.acc(y, y_pred), 5)
nmi = np.round(metrics.nmi(y, y_pred), 5)
ari = np.round(metrics.ari(y, y_pred), 5)
print('acc = %.5f, nmi = %.5f, ari = %.5f' % (acc, nmi, ari))
plt.savefig("tsne.png")
plt.figure(figsize=(12, 10))
plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y)
plt.colorbar()
plt.show()
plt.savefig("tsne.png")
init = 'glorot_uniform'
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.)
# # prepare the DEC model
Sdne = SDNE(dims=[edges, 1000, 300, 20],bn=args.bn, init=init, nu1=args.nu1, nu2=args.nu2, alpha=args.alpha,
gamma=args.gamma, n_clusters=n_clusters)
Sdne.pre_model.summary()
#
if args.ae_weights is None:
Sdne.pretrain(x=graph, optimizer=optimizer, epochs=args.pretrain_epochs, batch_size=args.batch_size, beta=args.beta)
else:
Sdne.autoencoder.load_weights(args.ae_weights)
#
Sdne.cluster_model.summary()
Sdne.fit(graph, optimizer = optimizer, beta = 1, y=y,
epochs=args.epochs, batch_size=args.batch_size,
update_interval=args.update_interval, early_stopping=args.early_stopping, tol=args.tol) | [
"klovbe@gmail.com"
] | klovbe@gmail.com |
057ade53206a1e43f6ff9b28dc78a967904c96bf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/480/usersdata/334/110622/submittedfiles/Av2_Parte2.py | e11e2a6b49056c7a5f5f6a12af476f55d4ff8dad | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # -*- coding: utf-8 -*-
x = int(input('Digite um número: '))
soma = 0
while (True):
if x!=0:
soma = soma + x%10
x = x//10
else:
break
print(soma) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1a71a9961e299b2092ae9063b2a5fbf394b7517d | ab4bf61695c200bc1cad19a1313a0d9e3893cba9 | /programmers/heap-doubleQueue.py | eeadc481f475a4ed0c2c18ba8a002fbe8059a80f | [] | no_license | aibees/algorithm | fc19afd587b71a6d1395826efe813529bbeb999e | 08b923eaa7c7b6e76aad44a1a7cb3ff6bf200380 | refs/heads/master | 2021-08-31T11:53:34.055946 | 2021-08-11T15:29:52 | 2021-08-11T15:29:52 | 164,192,026 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | import heapq
def solution(operations):
queue = []
for oper in operations :
print(oper)
if oper.startswith("D") :
num = int(list(oper.split(" "))[1])
if num is 1 and len(queue) is not 0 :
queue.pop()
elif num is -1 and len(queue) is not 0 :
queue.pop(0)
elif oper.startswith('I') :
queue.append(int(list(oper.split(" "))[1]))
queue = sorted(queue)
print(queue)
if len(queue) is 0 :
answer = [0,0]
else :
answer = [queue.pop(), queue.pop(0)]
return answer
print(solution( ["I -45", "I -642", "I 653", "D 1", "I -642", "I 45", "I 97", "D 1", "D -1", "I 333"])) | [
"aibees1129@naver.com"
] | aibees1129@naver.com |
29865e4eea4bce9834d13137459eed5c0cd0acc9 | 18f8bbe0e32ab2964b779091e84054a3c4bad69e | /lame_sender.py | 5c745caa512bbe87c574812864b4a7aaa9eae001 | [] | no_license | elanwu/lame_sender | b48d7085288248cc448a807025d75434ec7a5bbe | 996f4b36c2221cfad5ccc7654a9cb708719475ad | refs/heads/master | 2022-12-01T23:12:08.305332 | 2020-08-11T06:45:16 | 2020-08-11T06:45:16 | 282,705,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,371 | py | #!/bin/python3
# Not complete Xmodem serial file (e.g. binary mcu firmware) transmiting program.
# Free to use, with your own risk.
# elanwu@yeah.net
import serial
import hashlib
import datetime
import struct
import time
import binascii
import random
## DESIGNATE: 1) BINARY FIRMWARE FILE NAME, 2) SERIAL PORT ##
BIN_FW_NAME = 'a.bin'
SER_PORT_NAME_DEFAULT = ('COM11', 'COM7', '/dev/ttyUSB0')[1] # 0,1,2 ...
SOH = b'\x01'
NAK = b'\x15'
ACK = b'\x06'
def find_a_valid_serial_port_name():
n = 32
name = ''
i = -1
for i in range(n, -2, -1):
name = "COM{}".format(i)
try:
s = serial.Serial(name)
s.close()
del s
break;
except Exception as e:
print('.', end='')
name = "/dev/ttyUSB{}".format(i)
try:
s = serial.Serial(name)
s.close()
del s
break;
except Exception as e:
print(',', end='')
if i < 0:
name = ''
return name
def calc_file_sha256_str(fn: str) -> str:
'''
Indentify a file by its sha256 digest.
'''
with open(fn, "rb") as f:
s = hashlib.sha256()
s.update(f.read())
digest = s.digest().hex()
return digest
def slice_file_into_128_bytes_blocks(fn: str) -> [bytes]:
'''
Slice file content into 128 bytes blocks,
and fill last insufficient block with 0x1A as Xmodem protocal required.
'''
## 1/2 ##
blocks = []
with open(fn, "rb") as f:
while True:
blk_128byte = f.read(128)
if blk_128byte == b'': # read() Returns an empty bytes object on EOF.
break;
else:
blocks.append(blk_128byte)
## 2/2 ##
lst_block_len = len(blocks[-1])
if lst_block_len < 128:
blocks[-1] += b'\x1A' * (128 - lst_block_len)
return blocks
def calc_xmodem_crc_byte(byts: bytes) -> bytes:
crc = 0
for b in byts:
crc += int(b)
crc &= 0xff
return struct.pack('B', crc)
def xmodem_transive(blks: [bytes], ser: serial.Serial):
ser.apply_settings({'timeout': 2.7})
tp1 = datetime.datetime.now()
i = 0
rpt = 0
unit = b''
while True:
# Each block of the transfer looks like:
# <SOH><blk #><255-blk #><--128 data bytes--><cksum>
xmd_idx = (i + 1) & 0xff
blk = blks[i]
unit = SOH + struct.pack('BB', xmd_idx, 255-xmd_idx) + blk + calc_xmodem_crc_byte(blk)
#incoming = ser.read_all()
incoming = ser.read(1)
# (reading) timeout expired, ACK/NAK get garbaged?
# or NAK received, resent in both situation.
if incoming == b'' or NAK in incoming:
rpt += 1
print("{:03d}/{:03d} blocks repeating {}\r".format(i, len(blocks), '.' * rpt), end='')
time.sleep(0.1 + (rpt / 70) * random.random())
# procced.
elif ACK in incoming:
i+=1
rpt = 0
print("{:03d}/{:03d} blocks transmited".format(i, len(blocks)))
time.sleep(0.03)
# quit loop if retry too much or finished.
if 10 < rpt or i == len(blks):
break
ser.reset_input_buffer()
ser.write(unit)
# Note: no SENDRE EOT here, as RECIEVER will timeout.
time.sleep(3)
response = ser.read_all()
tp2 = datetime.datetime.now()
# TODO: failed when i != len(blocks)?
# TODO: check XIC in response against calculated one, whether they two match.
print("{:03d}/{:03d} blocks transmited in {} seconds".format(i, len(blocks), (tp2-tp1).seconds))
print("response:<{}>".format(response))
if __name__ == "__main__":
print('lame_sender V0.1.2')
serial_port_name = find_a_valid_serial_port_name()
print('serial_port_found:<{}>'.format(serial_port_name))
SER_PORT_NAME = SER_PORT_NAME_DEFAULT if serial_port_name == '' else serial_port_name
print('bin_firmware_file:<{}>, serial_port:<{}>'.format(BIN_FW_NAME, SER_PORT_NAME))
dgst = calc_file_sha256_str(fn=BIN_FW_NAME)
print("bin_firware_file:<{}> is found, with sha256 digest:<{}>.".format(BIN_FW_NAME, dgst[0:5]))
ser = serial.Serial(SER_PORT_NAME, 9600) # open serial port.
print("serial_port:<{}> is opened, with init config:<{}>, 9600,8N1 is expected.".format(SER_PORT_NAME, ser.get_settings()))
blocks = slice_file_into_128_bytes_blocks(fn=BIN_FW_NAME)
xmodem_transive(blks=blocks, ser=ser)
print('Done')
input('Press any key to exit ...')
'''
XIC: printf("XIC:<%C%C>", ('H' + (sum >> 4)), ('H' + (sum & 0x0f)));
http://techheap.packetizer.com/communication/modems/xmodem.html
-------- 1. DEFINITIONS.
<soh> 01H
<eot> 04H
<ack> 06H
<nak> 15H
<can> 18H
-------- 3. MESSAGE BLOCK LEVEL PROTOCOL
Each block of the transfer looks like:
<SOH><blk #><255-blk #><--128 data bytes--><cksum>
in which:
<SOH> = 01 hex
<blk #> = binary number, starts at 01 increments by 1, and
wraps 0FFH to 00H (not to 01)
<255-blk #> = blk # after going thru 8080 "CMA" instr.
Formally, this is the "ones complement".
<cksum> = the sum of the data bytes only. Toss any carry.
-------- 5. DATA FLOW EXAMPLE INCLUDING ERROR RECOVERY
Here is a sample of the data flow, sending a 3-block message.
It includes the two most common line hits - a garbaged block,
and an <ack> reply getting garbaged. <xx> represents the
checksum byte.
SENDER RECIEVER
Times out after 10 seconds,
<--- <nak>
<soh> 01 FE -data- <xx> --->
<--- <ack>
<soh> 02 FD -data- <xx> ---> (data gets line hit)
<--- <nak>
<soh> 02 FD -data- <xx> --->
<--- <ack>
<soh> 03 FC -data- <xx> --->
(ack gets garbaged) <--- <ack>
<soh> 03 FC -data- <xx> --->
<--- <ack>
<eot> --->
<--- <ack>
'''
| [
"elanwu@yeah.net"
] | elanwu@yeah.net |
503f4adc6d38797e5af48405cb4b1924392660b6 | 5af21bf4f558001dbaecb4bc55d5d508a85cb761 | /photoData.py | 0d66dcf8743d11daa5056cafe06b774d659144e3 | [] | no_license | zhxjnkjbh/imgDetecter | 14314cac4e4d3209bf945e0382499300040e7ab5 | dee88846b4e22bf84814d12db090732eea0aecf6 | refs/heads/master | 2020-07-27T21:56:42.419808 | 2019-10-09T03:34:07 | 2019-10-09T03:34:07 | 209,226,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,348 | py | # -*- coding: utf-8 -*-
import cv2
import numpy as np
from PyQt5 import QtGui
class PhotoData:
def __init__(self):
self.img_raw = None
self.img_rgb = None
self.img_gray = None
self.img_r = None
self.img_g = None
self.img_b = None
self.img_show = None
self.img_binary = None
self.qimg_show = QtGui.QImage(1024, 768, QtGui.QImage.Format_RGB888)
self.width = 0
self.height = 0
self.img_list = ['001.png', '003.jpg']
#self.read('3.jpg')
self.img_name = ''#self.img_list[0]
self.cur_index = 0 #当前处理,0:grayParam,1:openClose,2:thresh,3:filter
self.raw_width = 0
self.raw_height = 0
print('after read:', self.width, '*', self.height)
def read(self, img_name):
print(img_name)
self.img_raw = cv2.imread(img_name.encode('gbk').decode(), 1)
if self.check():
self.img_name = img_name
self.raw_width = self.img_raw.shape[1]
self.raw_height = self.img_raw.shape[0]
self.img_raw = cv2.resize(self.img_raw, (1024, 768))
self.img_raw = cv2.cvtColor(self.img_raw, cv2.COLOR_BGR2RGB)
self.img_rgb = self.img_raw.copy()
self.img_show = self.img_rgb.copy()
self.toGray()
self.img_b = self.img_raw[:, :, 0]
self.img_g = self.img_raw[:, :, 1]
self.img_r = self.img_raw[:, :, 2]
self.img_r, self.img_g, self.img_b = cv2.split(self.img_raw) # 拆分
self.width = self.img_raw.shape[1]
self.height = self.img_raw.shape[0]
self.img_binary = np.zeros((self.height, self.width), dtype=np.uint8)
#print(type(self.img_raw))
#print(self.img_raw.shape[0], '*', self.width)
return True
else:
print('check fail!')
return False
print('raw:', self.width, '*', self.height)
def check(self):
return (not (self.img_raw is None))
def toGray(self):
self.img_gray = cv2.cvtColor(self.img_rgb, cv2.COLOR_BGR2GRAY)
def show(self, index):
#print('show:', self.qimg_show.width(), '*', self.height)
img_show = self.img_rgb
if 0 == index:
# color
img_show = self.img_rgb
#self.qimg_show = QtGui.QImage(self.img_rgb, self.width, self.height, self.width * 3, QtGui.QImage.Format_RGB888)
elif 5 == index:
#raw
img_show = self.img_raw
#self.qimg_show = QtGui.QImage(self.img_raw, self.width, self.height, self.width * 3, QtGui.QImage.Format_RGB888)
else:
'''
if 0 <= index and 4 > index:
img_data = None
if 0 == index:
# raw
self.img_show = self.img_raw
print('raw:')
#self.img_show = QtGui.QImage(self.img_raw, self.width, self.height, self.width * 3, QtGui.QImage.Format_RGB888)
print('img_raw:', self.qimg_show.width(), '*', self.qimg_show.height())
'''
if 1 == index:
# r
img_show = self.img_r
print('r')
elif 2 == index:
# g
img_show = self.img_g
print('g')
elif 3 == index:
# b
img_show = self.img_b
print('b')
else:
# gray
img_show = self.img_gray
#self.qimg_show = QtGui.QImage(self.img_show, self.width, self.height, QtGui.QImage.Format_Grayscale8)
#print('raw0:', self.img_show.width(), '*', self.img_show.height())
#self.qimg_show = QtGui.QImage(self.img_show, self.width, self.height, self.width * 3, QtGui.QImage.Format_RGB888)
#print('raw1:', self.qimg_show.width(), '*', self.qimg_show.height())
if 3 == len(img_show.shape):
self.qimg_show = QtGui.QImage(img_show, self.width, self.height, self.width * 3, QtGui.QImage.Format_RGB888)
else:
self.qimg_show = QtGui.QImage(img_show, self.width, self.height, QtGui.QImage.Format_Grayscale8)
#print('raw2:', self.qimg_show.width(), '*', self.qimg_show.height())
print('img_show:', self.width, '*', self.qimg_show.height())
return self.qimg_show
#data = PhotoData()
#print(type(data.img_raw))
def showProcessedImg(self, is_binary = False):
img = self.img_show
print('photoData::showProcessedImg:', self.width, '*', self.height)
if is_binary:
img = self.img_binary
'''
self.qimg_show = QtGui.QImage(img, self.width, self.height, QtGui.QImage.Format_Grayscale8)
else:
self.qimg_show = QtGui.QImage(img, self.width, self.height, width,
QtGui.QImage.Format_RGB888)
'''
if 3 == len(img.shape):
self.qimg_show = QtGui.QImage(img, self.width, self.height, self.width * 3, QtGui.QImage.Format_RGB888)
else:
self.qimg_show = QtGui.QImage(img, self.width, self.height, QtGui.QImage.Format_Grayscale8)
return self.qimg_show | [
"zhxjnkjbh@163.com"
] | zhxjnkjbh@163.com |
93843ecba39e6406a7648888f5cdf768deb2e2b6 | dac49880ab446c0d5e4bd7f216fd27ca900ae03d | /Python_CGI/App_3/cgi-bin/sentimentAnalysis.py | 256e46003ad9b81dad0380b03aa5b6b2ff3ff083 | [] | no_license | AkhileshPandeyji/Python_All | bf961cc7c775d58ff0856a9b8d5ef656c1f8756e | d2f33f0fe37fcc8d9864d886c46391c355d716af | refs/heads/master | 2020-06-13T04:38:01.637083 | 2019-07-11T17:03:05 | 2019-07-11T17:03:05 | 194,537,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,692 | py | import pandas as pd
import numpy as np
from nltk.tokenize import word_tokenize,sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer,WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer,TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score,confusion_matrix
from sklearn.model_selection import train_test_split
def train():
imdb = pd.read_csv("C:\\Users\\Akhilesh Kr. Pandey\\Desktop\\WebML\\App_3\\cgi-bin\\SentimentAnalysis\\imdb_labelled.txt",sep='\t',header=None)
amazon = pd.read_csv("C:\\Users\\Akhilesh Kr. Pandey\\Desktop\\WebML\\App_3\\cgi-bin\\SentimentAnalysis\\amazon_cells_labelled.txt",sep='\t',header=None)
yelp = pd.read_csv("C:\\Users\\Akhilesh Kr. Pandey\\Desktop\\WebML\\App_3\\cgi-bin\\SentimentAnalysis\\yelp_labelled.txt",sep='\t',header=None)
df = pd.DataFrame()
df = pd.concat([imdb,amazon,yelp],ignore_index=True)
df.columns = ['Review','Sentiment']
tokenList = []
for i in range(df.shape[0]):
tokenList.append(word_tokenize(df['Review'].iloc[i]))
stopwords_list = stopwords.words('english')
stopwords_list.extend([',','.','-','!',"(",")"])
wordsList = []
for tokens in tokenList:
word_temp = []
for word in tokens:
if word.lower() not in stopwords_list:
word_temp.append(word.lower())
wordsList.append(word_temp)
wnet = WordNetLemmatizer()
for i in range(len(wordsList)):
for j in range(len(wordsList[i])):
wordsList[i][j] = wnet.lemmatize(wordsList[i][j],pos='v')
wordsList = np.asarray(wordsList)
for i in range(len(wordsList)):
wordsList[i] = ' '.join(wordsList[i])
cv = CountVectorizer()
vector = cv.fit_transform(wordsList)
reg = LogisticRegression()
y = df['Sentiment'].values
xtrain,xtest,ytrain,ytest = train_test_split(vector,y,test_size=0.25)
reg.fit(xtrain,ytrain)
ypred = reg.predict(xtest)
return cv,reg,stopwords_list,wnet
def test(msg,cv,reg,stopwords_list,wnet):
rev = msg
tokens = word_tokenize(rev)
wordsL = []
for word in tokens:
if word.lower() not in stopwords_list:
wordsL.append(word.lower())
for i in range(len(wordsL)):
wordsL[i] = wnet.lemmatize(wordsL[i],pos='v')
sent = []
sent = ' '.join(wordsL)
vect = cv.transform([sent])
senti = reg.predict(vect)
return senti[0]
# for testing purpose
if __name__ == '__main__':
cv,reg,stopw,wnet = train()
senti = test("Bad Movie",cv,reg,stopw,wnet)
print(senti)
pass
| [
"pandeyakhilesh5372@gmail.com"
] | pandeyakhilesh5372@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.