blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7bae5f6dbc68ac4b1343bd97117e35335e733b41 | bf6b1e77e548d90c9a0e456d7cd1b6555c7f32ba | /Zookeeper.py | 1187b6819fe60c8787614b23b21c6e078eab2e3c | [] | no_license | saiganeshrajakommuri/ZookeeperProject | b3d05aa0f2f9e5b79d7c1baa9cc1690b37bcebbb | b34828a35405ffae23f7b02e388dbacb9c354ad5 | refs/heads/master | 2022-11-18T22:57:09.259111 | 2020-07-18T12:22:39 | 2020-07-18T12:22:39 | 272,244,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | camel = r"""
Switching on camera from habitat with camels...
___.-''''-.
/___ @ |
',,,,. | _.'''''''._
' | / \
| \ _.-' \
| '.-' '-.
| ',
| '',
',,-, ':;
',,| ;,, ,' ;;
! ; !'',,,',',,,,'! ; ;:
: ; ! ! ! ! ; ; :;
; ; ! ! ! ! ; ; ;,
; ; ! ! ! ! ; ;
; ; ! ! ! ! ; ;
;,, !,! !,! ;,;
/_I L_I L_I /_I
Yey, our little camel is sunbathing!"""
lion = r"""
Switching on camera from habitat with lions...
,w.
,YWMMw ,M ,
_.---.._ __..---._.'MMMMMw,wMWmW,
_.-"" ''' YP"WMMMMMMMMMb,
.-' __.' .' MMMMW^WMMMM;
_, .'.-'"; `, /` .--"" :MMM[==MWMW^;
,mM^" ,-'.' / ; ; / , MMMMb_wMW" @\
,MM:. .'.-' .' ; `\ ; `, MMMMMMMW `"=./`-,
WMMm__,-'.' / _.\ F'''-+,, ;_,_.dMMMMMMMM[,_ / `=_}
"^MP__.-' ,-' _.--"" `-, ; \ ; ;MMMMMMMMMMW^``; __|
/ .' ; ; ) )`{ \ `"^W^`, \ :
/ .' / ( .' / Ww._ `. `"
/ Y, `, `-,=,_{ ; MMMP`""-, `-._.-,
(--, ) `,_ / `) \/"") ^" `-, -;"\:
The lion is croaking!"""
deer = r"""
Switching on camera from habitat with deers...
/| |\
`__\\ //__'
|| ||
\__`\ |'__/
`_\\ //_'
_.,:---;,._
\_: :_/
|@. .@|
| |
,\.-./ \
;;`-' `---__________-----.-.
;;; \_\
';;; |
; | ;
\ \ \ | /
\_, \ / \ |\
|';| |,,,,,,,,/ \ \ \_
| | | \ / |
\ \ | | / \ |
| || | | | | |
| || | | | | |
| || | | | | |
|_||_| |_| |_|
/_//_/ /_/ /_/
Our 'Bambi' looks hungry. Let's go to feed it!"""
goose = r"""
Switching on camera from habitat with lovely goose...
_
,-"" "".
,' ____ `.
,' ,' `. `._
(`. _..--.._ ,' ,' \ \
(`-.\ .-"" ""' / ( d _b
(`._ `-"" ,._ ( `-( \
<_ ` ( <`< \ `-._\
<`- (__< < :
(__ (_<_< ;
`------------------------------------------
This bird stares intently at you... (Maybe it's time to change the channel?)"""
bat = r"""
Switching on camera from habitat with bats...
_________________ _________________
~-. \ |\___/| / .-~
~-. \ / o o \ / .-~
> \\ W // <
/ /~---~\ \
/_ | | _\
~-. | | .-~
; \ / i
/___ /\ /\ ___\
~-. / \_/ \ .-~
V V
It looks like this bat is fine."""
rabbit = r"""
Switching on camera from habitat with rabbits...
,
/| __
/ | ,-~ /
Y :| // /
| jj /( .^
>-"~"-v"
/ Y
jo o |
( ~T~ j
>._-' _./
/ "~" |
Y _, |
/| ;-"~ _ l
/ l/ ,-"~ \
\//\/ .- \
Y / Y
l I !
]\ _\ /"\
(" ~----( ~ Y. )
It seems there will be more rabbits soon!"""
animals = [camel, lion, deer, goose, bat, rabbit]
while True:
n = input("Which habitat # do you need?")
if n !="exit":
n=int(n)
print(animals[n])
else:
print("See you!")
break
| [
"noreply@github.com"
] | noreply@github.com |
6890538b1bc2e5cf2c76206fe2ff95cbc574e301 | 40ba7074036d7834543764ae5e1c37985ffd4335 | /main_blindPnP.py | e76c6ebe82ab7170fede3278d8dedffb6434465c | [
"MIT"
] | permissive | Liumouliu/Deep_blind_PnP | 47d54c09db4f5e3ebff6875507e2a0b42430fc0d | 0e3e679ffa58c4d0824abf138d0254744dce37da | refs/heads/master | 2022-06-08T03:02:25.109027 | 2022-05-11T12:34:11 | 2022-05-11T12:34:11 | 221,364,258 | 129 | 18 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | import os
import random
from easydict import EasyDict as edict
import json
import logging
import sys
import torch.backends.cudnn as cudnn
import torch.utils.data
from config import get_config
from lib.data_loaders import make_data_loader
from trainer import BlindPnPTrainer
# logging
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format='%(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch])
logging.basicConfig(level=logging.INFO, format="")
# main function
def main(configs):
# train and validation dataloaders
train_loader = make_data_loader(configs, "train", configs.train_batch_size, num_threads = configs.train_num_thread, shuffle=True)
val_loader = make_data_loader(configs, "valid", 1, num_threads = configs.val_num_thread, shuffle=False)
trainer = BlindPnPTrainer(configs, train_loader, val_loader)
trainer.train()
if __name__ == '__main__':
configs = get_config()
# -------------------------------------------------------------
"""You can change the configurations here or in the file config.py"""
# dataset dir
# configs.data_dir = "/media/liu/data"
# dataset used
# "megaDepth", "modelnet40", "nyu_non_overlap"
configs.dataset = "megaDepth"
# 1e-3 for megaDepth; 1e-4 for modelnet40; 1e-4 for nyu_non_overlap
configs.train_lr = 1e-3
# select which GPU to be used
configs.gpu_inds = 0
# This is a debug number, set it to whatever you want
configs.debug_nb = "preTrained"
# training batch size
configs.train_batch_size = 12
# if your training is terminated unexpectly, uncomment the following line and set the resume_dir to continue
# configs.resume_dir = 'output'
# -------------------------------------------------------------
dconfig = vars(configs)
if configs.resume_dir:
resume_config = json.load(open(configs.resume_dir + "/" + configs.dataset + "/" + configs.debug_nb + '/config.json', 'r'))
for k in dconfig:
if k in resume_config:
dconfig[k] = resume_config[k]
dconfig['resume'] = os.path.join(resume_config['out_dir'], resume_config['dataset'], configs.debug_nb) + '/checkpoint.pth'
else:
dconfig['resume'] = None
# print the configurations
logging.info('===> Configurations')
for k in dconfig:
logging.info(' {}: {}'.format(k, dconfig[k]))
# Convert to dict
configs = edict(dconfig)
# set the seeds
if configs.train_seed is not None:
random.seed(configs.train_seed)
torch.manual_seed(configs.train_seed)
torch.cuda.manual_seed(configs.train_seed)
cudnn.deterministic = True
main(configs)
| [
"panpanfei@outlook.com"
] | panpanfei@outlook.com |
0fa10e1e12f2045d34d5cda669f06aa86c592aa5 | 4c1f87b0830debf0363e12c3c97c9d3b75b645e3 | /day8/day8.py | 6aeeac631bfbd3f1b13b534b3d5f55b40f73743c | [] | no_license | EricRoe/advent_of_code_2018 | 06e2777c78b60a08ae90147695447f660434ab3c | f12feccdc38446bd0eaa70171800f4479c6e6da4 | refs/heads/master | 2020-04-09T10:46:19.520667 | 2019-07-25T05:47:40 | 2019-07-25T05:47:40 | 160,282,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | with open('./input.txt') as f:
d = f.readline().split()
d = [int(i) for i in d]
#test = [2,3,0,3,10,11,12,1,1,0,1,99,2,1,1,2]
def get(tree):
#print(tree)
if not tree[0]:
# print('base ', tree[2:tree[1]+2])
return (2 + tree[1], tree[2:tree[1]+2]) # skiplen, meta
else:
skiplen = 0
ch_meta = []
meta = []
for i in range(tree[0]):
s, m = get(tree[2+skiplen:])
skiplen += s
ch_meta.append(m)
refs = tree[skiplen+2:skiplen+tree[1]+2]
for i in refs:
if not i or i > len(ch_meta):
continue
else:
meta += ch_meta[i-1]
# print('else ', tree[skiplen+2:skiplen+tree[1]+1])
return (skiplen + tree[1] + 2, meta)
_, meta = get(d)
print(sum(meta))
| [
"eric.roe@outlook.com"
] | eric.roe@outlook.com |
aef44472f047aceb84b2c449b00ba97e0e4eeca1 | 6f160f5bb400a3a8cf01079320c8a4e5290f21e0 | /detect_stutter.py | 8e47ebe6c6c99df3adbb7b1e7392cc310e9b2464 | [] | no_license | mansi-k/Stutter-Therapy | 96729729028164124a98fb4dc6c4c218f11b6d48 | 4339bbe4a2c01b340548f8f3d58190ea358651a3 | refs/heads/master | 2021-08-28T11:36:16.945067 | 2021-08-20T14:23:09 | 2021-08-20T14:23:09 | 240,598,327 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | import librosa
import numpy as np
import tensorflow as tf
from pydub import AudioSegment
from keras.models import load_model
import os
model_rep = load_model('/home/mansi/anaconda3/beproject/stutter_det/models/best_model_rep.h5')
model_pro = load_model('/home/mansi/anaconda3/beproject/stutter_det/models/best_model_pro.h5')
def detect_prolongation(mfcc):
s = 0
for m in mfcc:
y = model_pro.predict(m.reshape(1,2,44,1), batch_size=1)
y = np.around(y,decimals=2)
if y[0][0] > 0.5:
s += y[0][0]
p_sev = s/len(mfcc)*100
return p_sev
def detect_repetition(mfcc):
s = 0
for m in mfcc:
y = model_rep.predict(m.reshape(1,13,44,1), batch_size=1)
y = np.around(y,decimals=2)
if y[0][0] > 0.5:
s += y[0][0]
r_sev = s/len(mfcc)*100
return r_sev
def detect_stutter(audio):
sound_file = AudioSegment.from_wav(audio)
audio_chunks = sound_file[::1000]
ps = 0
rs = 0
mfcc_arr_p = []
mfcc_arr_r = []
for i, chunk in enumerate(audio_chunks):
chunkfile = "chunks_test/chunk{0}.wav".format(i)
chunk.export(chunkfile, format="wav")
y, sr = librosa.load(chunkfile)
mfcc = np.array(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13))
if mfcc.shape[0] == 13 and mfcc.shape[1] == 44:
a = []
a.append(mfcc)
mfcc_arr_r.append(a)
b = []
b.append(mfcc[0])
b.append(mfcc[12])
mfcc_arr_p.append(b)
mfcc_arr_r = np.array(mfcc_arr_r)
mfcc_arr_p = np.array(mfcc_arr_p)
mfcc_arr_r.reshape(mfcc_arr_r.shape[0], 13, 44, 1)
mfcc_arr_p.reshape(mfcc_arr_p.shape[0], 2, 44, 1)
p_sev = detect_prolongation(mfcc_arr_p)
r_sev = detect_repetition(mfcc_arr_r)
o_sev = (p_sev+r_sev)/2
return p_sev, r_sev, o_sev
if __name__== "__main__":
common = '/home/mansi/anaconda3/beproject/stutter_det/demo_audios'
arr1 = os.listdir(common)
for a in arr1:
print('\n'+a)
arr2 = os.listdir(common+'/'+a)
for b in arr2:
if b.endswith('.wav'):
print('\n'+b)
p_sev, r_sev, o_sev = detect_stutter(common+'/'+a+'/'+b)
print('Prolongation % : '+str(p_sev))
print('Repetition % : '+str(r_sev))
print('Overall stutter % : '+str(o_sev))
| [
"noreply@github.com"
] | noreply@github.com |
f37779ca9fecf3ff431fdb243d8bdb30859f1a21 | e70cc9ce5bf94d11d7c5d030abed5a0de26debb3 | /biocellion_frontend/bin/biocell/solver.py | 8c03f467ea6fa0093144599bfdf8361d90ba2d74 | [] | no_license | boaguilar/biocellion_python | 1c9f200142ee8892825fcdab90c68803da987742 | 0b3b48480436f21d9b46d07bd8f4da63b3e7a950 | refs/heads/master | 2021-01-11T18:44:51.924718 | 2018-05-02T21:15:24 | 2018-05-02T21:15:24 | 79,614,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,157 | py | from biocell import *
class Solver( ParamHolder ):
def __init__( self, name ):
self.mName = name
self.mEnumToken = "SOLVER_%s" % ( name, )
ParamHolder.__init__(self)
self.setPrefix( "SOLVER" )
self.addAttribute( Param( "name", "str", "", True ) )
self.addAttribute( Param( "class", "str", "", True ) )
self.addAttribute( Param( "domain", "str", "", True ) )
self.addParam( Param( "active", "bool", False, True ) )
self.addParam( Param( "preStep", "int", 3, True ) )
self.addParam( Param( "postStep", "int", 3, True ) )
self.addParam( Param( "coarseStep", "int", 3, False ) ) # required in cDynoMiCS
self.addParam( Param( "bottomStep", "int", 3, True ) )
self.addParam( Param( "nCycles", "int", 5, True ) )
# biocellion specific
self.addParam( Param( "pdeSolverType", "str", "SteadyState", False, "", [ "SteadyState", "TimeDependent", ], ) )
self.addParam( Param( "refineRatio", "int", 2, False ) )
self.addParam( Param( "AMRLevels", "int", 3, False ) )
self.addParam( Param( "numTimeSteps", "int", 1, False ) )
self.mPrivateTotallyHiddenParams = [ ]
self.mPrivateNumberHiddenParams = [ ]
self.mPrivateBoolHiddenParams = [ ]
self.mPrivateStringHiddenParams = [ "name", "class", "domain" ]
self.mPrivateHiddenParams = [ "pdeSolverType" ] + self.mPrivateTotallyHiddenParams + self.mPrivateNumberHiddenParams + self.mPrivateBoolHiddenParams + self.mPrivateStringHiddenParams
self.mHiddenParams = self.mHiddenParams + self.mPrivateHiddenParams
self.mReactions = AllSolverReactions( )
return
def isSteadyState( self ):
return self.getParam( 'pdeSolverType' ).getValue( ) == 'SteadyState'
def getName(self):
return self.mName
def getEnumToken(self):
return self.mEnumToken
def getReactions( self ):
return self.mReactions
def getRefineRatio( self ):
refine_ratio = self.getParam( 'refineRatio' ).getValue( )
return refine_ratio
def getInitializeBioModel(self, indent, depth):
varname = "solver"
lines = []
lines.append( (depth*indent) + "{" )
depth += 1
lines.append( (depth*indent) + "Solver *%s = new Solver( );" % ( varname, ) )
lines.append( (depth*indent) + "%s->setSolverIdx( %s );" % ( varname, self.getEnumToken( ) ) )
s = ParamHolder.getInitializeBioModel( self, varname, indent, depth )
if s:
lines.append( s )
s = self.getInitializeBioModelSetDataMembers( varname, "->", indent, depth,
self.mPrivateBoolHiddenParams,
self.mPrivateNumberHiddenParams,
self.mPrivateStringHiddenParams )
lines.append( s )
solver_type = self.getParam( 'pdeSolverType' ).getValue( )
if solver_type == 'SteadyState':
solver_type_name = "PDE_TYPE_REACTION_DIFFUSION_STEADY_STATE_LINEAR"
elif solver_type == 'TimeDependent':
solver_type_name = "PDE_TYPE_REACTION_DIFFUSION_TIME_DEPENDENT_LINEAR"
else:
raise Exception( "Unexpected pdeSolverType: " + str( solver_type ) )
lines.append( (depth*indent) + "%s->setPDEType( %s );" % ( varname, solver_type_name, ) )
container_name = "%s->getReactions()" % ( varname, )
s = self.mReactions.getInitializeBioModel( container_name, indent, depth )
if s:
lines.append( s )
lines.append( (depth*indent) + "gBioModelRW->getSolvers( ).push_back( %s );" % ( varname, ) )
depth -= 1;
lines.append( (depth*indent) + "}" )
return "\n".join( lines )
def __str__(self):
s = "<solver" + self.formatAttributes() + ">\n"
s += ParamHolder.__str__( self )
s += str( self.mReactions )
s += "</solver>\n"
return s
def __repr__(self):
return str(self)
class AllSolvers( ItemHolder ):
def __init__( self, model ):
ItemHolder.__init__( self, Solver )
self.mModel = model
return
def getRefineRatio( self ):
if len( self.mOrder ) < 1:
raise Exception( "ERROR: At least one <solver> must be specified." )
refine_ratios = set( )
for name in self.mOrder:
refine_ratios.add( self.mItems[ name ].getRefineRatio( ) )
if len( refine_ratios ) > 1:
raise Exception( "ERROR: All refine ratios must be the same. These were found: " + str( refine_ratios ) )
if len( refine_ratios ) < 1:
raise Exception( "ERROR: All refine ratios must be the same. None were found" )
return refine_ratios.pop( )
def getBioModelH( self, indent, depth ):
lines = [ ]
lines.append( self.getAllParamNames( indent, depth ) )
lines.append( "" )
lines.append( self.getSolversEnum( indent, depth ) )
return "\n".join( lines )
def getSolversEnum(self, indent, depth):
lines = []
lines.append( (depth*indent) + "typedef enum _solver_type_e {" )
depth += 1
for name in self.mOrder:
s = (depth*indent) + "%s," % (self.mItems[ name ].getEnumToken(), )
lines.append( s )
s = (depth*indent) + "NUM_SOLVERS"
lines.append( s )
depth -= 1
lines.append( (depth*indent) + "} solver_type_e;" )
return "\n".join( lines )
def getInitializeBioModel( self, indent, depth ):
lines = []
for name in self.mOrder:
lines.append( self.mItems[ name ].getInitializeBioModel( indent, depth ) )
return "\n".join( lines )
def addItem( self, name, item=None ):
if item is None:
item = self.mItemClass( name )
return ItemHolder.addItem( self, name, item )
def main():
print( "FIXME: no tester for " + str( __file__ ) )
return
if __name__ == "__main__":
main()
| [
"larsen@dixie.edu"
] | larsen@dixie.edu |
ff904f46d578f25e760593eff96a4f1ec4b5dd35 | cef5a68531e6ebf90803c1ec95c21572ac13c139 | /places/__init__.py | 361279f1f8fe8dad090ef98e9e0ac753dd486822 | [] | no_license | ARAVINDBALAN/django-places | 3b4846a7797283349e15b9947d0db6d9fa544edf | 78c488638ecc639249837d51415075653a6bd6bb | refs/heads/master | 2020-03-27T16:23:25.388574 | 2017-09-10T00:33:24 | 2017-09-10T00:33:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
# Weird encoding python2 hack :'(
if sys.version_info < (3,0):
reload(sys)
sys.setdefaultencoding('utf8')
from decimal import Decimal
default_app_config = 'places.apps.PlacesConfig'
__version__ = '2.0.0'
class Places(object):
def __init__(self, place, latitude, longitude):
if isinstance(latitude, float) or isinstance(latitude, int):
latitude = str(latitude)
if isinstance(longitude, float) or isinstance(longitude, int):
longitude = str(longitude)
self.place = place
self.latitude = Decimal(latitude)
self.longitude = Decimal(longitude)
def __str__(self):
return "%s, %s, %s" % (self.place, self.latitude, self.longitude)
def __repr__(self):
return "Places(%s)" % str(self)
def __len__(self):
return len(str(self))
def __eq__(self, other):
return isinstance(other, Places) and self.latitude == other.latitude and self.longitude == other.longitude
def __ne__(self, other):
return not isinstance(other, Places) or self.latitude != other.latitude or self.longitude != other.longitude
| [
"om.cortez.2010@gmail.com"
] | om.cortez.2010@gmail.com |
35fe46be42a9c480e91d066a081e4accf192032f | 423a41dadbacb77339a45e2741ae27b334d31f4f | /web/contactx_web/settings.py | 518ac011ef49490bf09c5b92d95fc443f69b03ae | [] | no_license | tathagata/contactx | 6594ad84caaa5be2d24a40670d30260fca829e1a | 4f5cbdbd16e77829a6b1ab11542cc020e046e167 | refs/heads/master | 2020-05-30T23:06:01.461539 | 2012-08-22T21:23:33 | 2012-08-22T21:23:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,043 | py | # Django settings for contactx_web project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#ce5($1&kfq&nz$%-sifvk!+q21+75t!nr=now$i=kv_x)5gr7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'contactx_web.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"tathagatadg@gmail.com"
] | tathagatadg@gmail.com |
9a047c55dc1dbee0270501e4c076765c88d3e4a8 | fe9a0558e78666150ca26fe7b8b2781061cf987e | /txtl/components/cds_tetr.py | 0190a5cf7985da94cb3c385eac8860899748ef62 | [
"BSD-3-Clause"
] | permissive | BuildACell/txtlsim-python | 8c3587b1a0356e6617da6b67a8554a7697776e8e | 5284a5477f38bbc8a956dc9608b0264ca672a331 | refs/heads/master | 2018-10-09T18:34:59.520569 | 2018-09-19T01:53:44 | 2018-09-19T01:53:44 | 111,568,117 | 4 | 4 | BSD-3-Clause | 2018-09-19T01:53:45 | 2017-11-21T15:38:53 | Python | UTF-8 | Python | false | false | 559 | py | # cds_tetr.py - TetR protein definition
# RMM, 11 Aug 2018
#
# This file contains the model for the TetR protein.
#
# Copyright (c) 2018, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from ..dna import ProteinCDS
#! TODO: decide if this should be CDS_tetr
class cds_tetr(ProteinCDS):
"DNA for TetR protein"
def __init__(self, name='TetR', *args, **kwargs):
ProteinCDS.__init__(self, name=name, *args, **kwargs, dimerize=True)
# Define a shorthand version for convenience
tetr = cds_tetr
| [
"murray@cds.caltech.edu"
] | murray@cds.caltech.edu |
5d00ff9e5462a42bd1845fa9ba16d12841903f29 | 3e4912fd783063e5f2c7a8f41e1daf8ddb607670 | /employees/serializers.py | ceff0c92ab6758257c4e3ada1874f6f9625ee453 | [] | no_license | Puja0708/multi-tenant-system | 6adad0993d7c5dc4e295e4a244a8e23506dbcaf1 | 25c325b5e5c9d6eaa31e5d4df516cfa7a23d8ce5 | refs/heads/master | 2020-03-27T22:35:35.115455 | 2018-09-10T11:55:49 | 2018-09-10T11:55:49 | 147,244,530 | 1 | 0 | null | 2018-09-10T05:11:31 | 2018-09-03T19:20:11 | Python | UTF-8 | Python | false | false | 820 | py | from __future__ import unicode_literals, absolute_import
from rest_framework import serializers
from employees.models import EmployeeRoles, Employee
class EmployeeRoleSerializer(serializers.ModelSerializer):
class Meta(object):
model = EmployeeRoles
fields = '__all__'
class EmployeeViewSerializer(serializers.ModelSerializer):
class Meta(object):
model = Employee
fields = '__all__'
class EmployeeViewUpdateSerializer(serializers.ModelSerializer):
first_name = serializers.CharField(read_only=False, required=False)
last_name = serializers.CharField(read_only=False, required=False)
team = serializers.IntegerField(required=False)
age = serializers.IntegerField(required=False)
class Meta(object):
model = Employee
fields = '__all__'
| [
"puja@metarain.com"
] | puja@metarain.com |
8a22a2ebe3c3159834157d2ebcd45a43c447d69a | 1a73c506a7528026f9215ccbf0bba55d4a322346 | /python/mail/SMTP-Header邮件.py | 4110e72e8bd0590d1152a48060c133dc47441fad | [] | no_license | clearLoveKang/myPython | c3b533553067fddaac26d5341e307b521de55944 | 33d437629db498db4e41b48cd233e81a1cb5eaaa | refs/heads/master | 2021-04-09T10:40:08.266133 | 2018-12-10T02:10:37 | 2018-12-10T02:10:37 | 125,455,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # -*- coding:utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.header import Header
msg = MIMEText('小可爱,起床吃饭饭啦', 'plain', 'utf-8')
from_addr = input('form')
from_pwd = input('password')
to_addr = input('addr: ')
msg['From'] = Header('大佬康<*******@qq.com>', 'utf-8')
msg['To'] = Header('小宝贝<*******@qq.com>', 'utf-8')
msg['Subject'] = Header('康康来看下小宝贝', 'utf-8')
smtp_srv = 'smtp.qq.com'
try:
# 与邮箱服务器建立连接
srv = smtplib.SMTP_SSL(smtp_srv.encode(), 465)
# 打印交互过程
srv.set_debuglevel(1)
# 进行登录
srv.login(from_addr, from_pwd)
# 发送邮件 来自哪里 发到哪里 内容
srv.sendmail(from_addr, [to_addr], msg.as_string())
# 退出
srv.quit()
except Exception as e:
print(e)
| [
"952472388@qq.com"
] | 952472388@qq.com |
d8514be79f7861f9ae195854ba1d64ed810716ea | 3724a1b95e95e611cdd793d1af685f72dfea6b3e | /cloudcafe/networking/lbaas/lbaas_api/listener/client.py | ae712a2a12cef2e37f97dda90670c5fa142ac8e3 | [
"Apache-2.0"
] | permissive | kurhula/cloudcafe | 1b1e41994959cf959a49e19fea5cbda893d9c9df | 7d49cf6bfd7e1a6e5b739e7de52f2e18e5ccf924 | refs/heads/master | 2021-01-20T22:45:27.425724 | 2015-02-20T16:49:35 | 2015-02-20T16:49:35 | 31,156,531 | 0 | 1 | null | 2015-02-22T07:56:08 | 2015-02-22T07:56:07 | null | UTF-8 | Python | false | false | 6,979 | py | """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.networking.lbaas.common.client import BaseLoadBalancersClient
from cloudcafe.networking.lbaas.lbaas_api.listener.request import \
CreateListener, UpdateListener
from cloudcafe.networking.lbaas.lbaas_api.listener.response import \
Listener, Listeners
class ListenersClient(BaseLoadBalancersClient):
"""
Listeners Client
@summary: Listeners represent a single listening port and can optionally
provide TLS termination.
"""
_LISTENERS_URL = "{base_url}/listeners"
_LISTENER_URL = "{base_url}/listeners/{listener_id}"
def create_listener(self, name, load_balancer_id, tenant_id,
default_pool_id, protocol, protocol_port,
description=None, connection_limit=None,
admin_state_up=None, requestslib_kwargs=None):
"""Create Listener
@summary: Creates an instance of a listener given the
provided parameters
@param name: Name of the listener that will be created
@type name: String
@param load_balancer_id: ID of a load balancer.
@type load_balancer_id: String
@param tenant_id: Tenant that will own the listener.
@type tenant_id: String
@param default_pool_id: ID of default pool. Must have compatible
protocol with listener.
@type default_pool_id: String
@param protocol: Protocol to load balance: HTTP, HTTPS, TCP, UDP
@type protocol: String
@param protocol_port: TCP (or UDP) port to listen on.
@type protocol_port: Integer
@param description: Detailed description of the listener.
@type description: String
@param connection_limit: Maximum connections the load balancer can
have. Default is infinite.
@type connection_limit: Integer
@param admin_state_up: If set to false, listener will be created in an
administratively down state
@type admin_state_up: Boolean
@return: Response Object containing response code and the
listener domain object
@rtype: Requests.response
"""
full_url = self._LISTENERS_URL.format(base_url=self.url)
listener_request_object = CreateListener(
name=name, load_balancer_id=load_balancer_id, tenant_id=tenant_id,
default_pool_id=default_pool_id, protocol=protocol,
protocol_port=protocol_port, description=description,
connection_limit=connection_limit, admin_state_up=admin_state_up)
return self.request('POST', full_url,
response_entity_type=Listener,
request_entity=listener_request_object,
requestslib_kwargs=requestslib_kwargs)
def list_listeners(self, requestslib_kwargs=None):
"""List Listeners
@summary: List all listeners configured for the account.
@rtype: Requests.response
@note: This operation does not require a request body.
"""
full_url = self._LISTENERS_URL.format(base_url=self.url)
return self.request('GET', full_url,
response_entity_type=Listeners,
requestslib_kwargs=requestslib_kwargs)
def update_listener(self, listener_id, name=None, description=None,
default_pool_id=None, load_balancer_id=None,
admin_state_up=None, requestslib_kwargs=None):
"""Update Listener
@summary: Update the properties of a listener given the
provided parameters
@param listener_id: ID of the listener to get details from.
@type listener_id: str
@param name: Name of the listener that will be created
@type name: String
@param description: Detailed description of the listener.
@type description: String
@param default_pool_id: ID of default pool. Must have compatible
protocol with listener.
@type default_pool_id: String
@param load_balancer_id: ID of a load balancer.
@type load_balancer_id: String
@param admin_state_up: If set to false, listener will be created in an
administratively down state
@type admin_state_up: Boolean
@return: Response Object containing response code.
@rtype: Requests.response
"""
update_listener = UpdateListener(
name=name, description=description,
default_pool_id=default_pool_id,
load_balancer_id=load_balancer_id,
admin_state_up=admin_state_up)
full_url = self._LISTENER_URL.format(base_url=self.url,
listener_id=listener_id)
return self.request('PUT', full_url,
request_entity=update_listener,
response_entity_type=Listener,
requestslib_kwargs=requestslib_kwargs)
def get_listener(self, listener_id, requestslib_kwargs=None):
"""Get Listener Details
@summary: List details of the specified listener.
@param listener_id: ID of the listener to get details from.
@type listener_id: str
@return: Response Object containing response code and the
listener domain object.
@rtype: Requests.response
@note: This operation does not require a request body.
"""
full_url = self._LISTENER_URL.format(base_url=self.url,
listener_id=listener_id)
return self.request('GET', full_url,
response_entity_type=Listener,
requestslib_kwargs=requestslib_kwargs)
def delete_listener(self, listener_id, requestslib_kwargs=None):
"""Delete Listener
@summary: Remove a listener from the account.
@param listener_id: ID of the listener to delete.
@type listener_id: str
@return: Response Object containing response code.
@rtype: Requests.response
@note: Returns an error if it's still in use by any pools.
"""
full_url = self._LISTENER_URL.format(
base_url=self.url,
listener_id=listener_id)
return self.request('DELETE', full_url,
requestslib_kwargs=requestslib_kwargs)
| [
"franklin.naval@RACKSPACE.COM"
] | franklin.naval@RACKSPACE.COM |
1f6bb93da2698a8ef381c0b71b36dd2085b09945 | a8b2e3623533d454e484598024eac7b0d27b34a3 | /cosmoslik/cosmoslik_plugins/models/classyAACF.py | 758fdc5aa0356cfd881d9b25dcf44bb6dad625bd | [] | no_license | andy16777216/FiniteInflation | 087f08762cec62864871682508b14d2b9c97ab8d | 900b402124af30d7b1b5a13af6d1232708f55788 | refs/heads/master | 2016-09-06T01:27:02.289672 | 2015-07-16T18:48:42 | 2015-07-16T18:48:42 | 26,138,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | from __future__ import absolute_import
from cosmoslik import SlikPlugin
from numpy import arange, pi
class classy(SlikPlugin):
"""
Plugin for CLASS.
Credit: Brent Follin, Teresa Hamill, Andy Scacco
"""
#{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
name_mapping = {'As':'A_s',
'ns':'n_s',
'r':'r',
'k_c':'k_c',
'alpha_exp':'alpha_exp',
'nt':'n_t',
'ombh2':'omega_b',
'omch2':'omega_cdm',
'omnuh2':'omega_ncdm',
'tau':'tau_reio',
'H0':'H0',
'massive_neutrinos':'N_ncdm',
'massless_neutrinos':'N_ur',
'Yp':'YHe',
'pivot_scalar':'k_pivot',
#'Tcmb':'T_cmb',
#'P_k_max_hinvMpc':'P_k_max_h/Mpc'
#'w':'w0_fld',
#'nrun':'alpha_s',
#'omk':'Omega_k',
#'l_max_scalar':'l_max_scalars',
#'l_max_tensor':'l_max_tensors'
}
def __init__(self):
super(classy,self).__init__()
try:
from classy import Class
except ImportError:
raise Exception("Failed to import CLASS python wrapper 'Classy'.")
self.model = Class()
def __call__(self,
ombh2,
omch2,
H0,
As,
ns,
k_c,
alpha_exp,
tau,
#omnuh2=0, #0.006 #None means that Class will take the default for this, maybe?
w=None,
r=None,
nrun=None,
omk=0,
Yp=None,
Tcmb=2.7255,
#massive_neutrinos=0,
massless_neutrinos=3.046,
l_max_scalar=3000,
l_max_tensor=3000,
pivot_scalar=0.05,
outputs=[],
**kwargs):
self.model.set(output='tCl, lCl, pCl',
lensing='yes',
l_max_scalars=l_max_scalar,
**{self.name_mapping[k]:v for k,v in locals().items()
if k in self.name_mapping and v is not None})
self.model.compute()
ell = arange(l_max_scalar+1)
self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
for x in ['TT','TE','EE','BB','PP','TP']}
self.model.struct_cleanup()
self.model.empty()
return self.cmb_result
def get_bao_observables(self, z):
return {'H':self.model.Hubble(z),
'D_A':self.model.angular_distance(z),
'c':1.0,
'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
| [
"ajscacco@ucdavis.edu"
] | ajscacco@ucdavis.edu |
167bfeaee564f8d3e016763b63baeb7fb6fca90f | 96537900c2170dab239d9c3f13e3c11fd262646f | /introducao_a_ciencia_da_computacao_com_python/primalidade.py | 74ee662d7f67078db88997d458d25981556376f6 | [] | no_license | lucasvini92/cursos | 31be37c17417acc0b19e0dc263f0245bc9ac7ac5 | cbba4b3efa328b0a05fad3f36bf6850c39962d08 | refs/heads/master | 2023-07-11T05:33:03.064262 | 2021-08-28T16:32:46 | 2021-08-28T16:32:46 | 376,107,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | num = int(input("Digite um número inteiro: "))
div = 1
cont = 0
while num>=div:
if num % div == 0:
cont = cont + 1
div = div + 1
if cont <=2:
print("primo")
else:
print("não primo") | [
"lucasvsteofilo@gmail.com"
] | lucasvsteofilo@gmail.com |
79e21cfbaa2d7f08d3295fa45bba76eb09349b63 | 8c82d71c8d8edb259405787e010b943f921214c5 | /job/urls.py | 2ace98de5728d5d4b084b6ea651457d9d6fe5817 | [] | no_license | Yossef-Dawoad/JOB_FINDER-django_demo- | 14f4358c396479d1a8a862688fa6210d61f025df | 8aacbcf3a1e269b0f2b2605093559476ca1674d7 | refs/heads/main | 2023-08-31T15:46:25.272822 | 2021-03-25T07:08:45 | 2021-03-25T07:08:45 | 351,337,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path('',views.job_list),
path('<int:id>',views.job_detail),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"yossefdawoad15@gmail.com"
] | yossefdawoad15@gmail.com |
dd5fcde7a9d2b752ffd11fa2e08293aa5ca7e1d2 | ec1460fe71ed7669dc86cf3fabad60a54704c275 | /main.py | 5c73e430eed95479923d46aee1d6c50ef9c0bf35 | [] | no_license | michoy/techathon_tellolib | 948d5c42ee895045788bf4191aad924068197507 | d2d606fd7fa4d956be8ea5ea2ac45977d6eca5f5 | refs/heads/master | 2020-08-11T08:15:11.820703 | 2019-10-12T09:25:24 | 2019-10-12T09:25:24 | 214,525,118 | 0 | 0 | null | 2019-10-11T20:40:20 | 2019-10-11T20:40:19 | null | UTF-8 | Python | false | false | 467 | py | from dronelib.dronelib_tello import TelloDrone
from dronelib.util import *
from time import sleep
def main():
drone = TelloDrone()
drone.activate()
drone.takeoff(1.0)
drone.set_target(0, 0, yaw=1.57)
print(drone.position)
print(drone.yaw)
image = drone.camera_image
save_image(image)
drone.set_target(0, 0.5)
image = drone.camera_image
save_image(image)
drone.land()
if __name__ == "__main__":
main() | [
"michael.hoyer3@gmail.com"
] | michael.hoyer3@gmail.com |
69b06c5b9f0fa6a1d49e873a6054d2f98d73199e | 580daee0cc0a961664cae1dbce027f09aa1b1ebc | /Lab4/mysite/mysite/settings.py | 2ad06674b7af1f9a705f3038978ad089066442ce | [] | no_license | nmrchvz/cmput404-labs | a50cf115067adf842ddd73df308444e33c682d67 | 06d2ed47c59eb042ca76dc017606b2128fd288cc | refs/heads/main | 2023-08-06T10:34:51.408572 | 2021-10-05T00:13:40 | 2021-10-05T00:13:40 | 404,498,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^@$tzfi&ypl06bs24ngk+wsqd4yo@e8_k!s44j=9tiyivj@wzk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"npchavez@ualberta.ca"
] | npchavez@ualberta.ca |
0c3e715c31ed237c300f584fd1ce6e4102a51274 | f4c9e202ff98983702e7f6d132b70398fc81ab93 | /stb.py | 2b3ddcd72e32018b1f5402e9446589a33dbfe87e | [] | no_license | MrCheeze/skywardsword-tools | 5989f19bbc1a3c1f8fb37da91ad75ce6011adf01 | b69c7a3ad3805906a2dc6965666a9a356a8bf979 | refs/heads/master | 2020-05-30T13:30:52.045344 | 2019-06-22T18:46:08 | 2019-06-22T18:46:08 | 189,759,599 | 1 | 2 | null | 2020-01-04T21:01:39 | 2019-06-01T17:22:39 | Python | UTF-8 | Python | false | false | 1,404 | py | import glob
import collections
import struct
from util import *
for fname in glob.glob('Stage/**/*.stb', recursive=True):
print(fname)
f = open(fname,'rb')
f.seek(0x20)
parsed = []
while True:
seg_header = f.read(0x8)
if len(seg_header) <= 0:
break
seg_len, seg_id = struct.unpack('>i4s',seg_header)
seg_data = f.read(seg_len-0x8)
parsed_item = collections.OrderedDict()
parsed_item['seg_len'] = seg_len
parsed_item['seg_id'] = str(seg_id)
parsed_item['seg_data'] = seg_data
parsed_item['seg_data2'] = str(seg_data)
if seg_id == b'JFVB':
#animated actions (camera_
pass
elif seg_id == b'\xff\xff\xff\xff':
pass
elif seg_id == b'JACT':
#actor-related
pass
elif seg_id == b'JMSG':
#text pointers
pass
elif seg_id == b'JCMR':
#camera
pass
elif seg_id == b'JPTC':
pass
elif seg_id == b'JCTB':
assert seg_len == 56
elif seg_id == b'JLIT':
pass
else:
raise Exception('unimplemented ' + str(seg_id))
parsed.append(parsed_item)
f2 = open('output/demo/%s.json'%fname.replace('/','-').replace('\\','-'),'w')
f2.write(objToJson(parsed))
f2.close()
| [
"fishycheeze@yahoo.ca"
] | fishycheeze@yahoo.ca |
569c0fe40b397c4990eb34ce4716eead233cf51f | e0ede722874d222a789411070f76b50026bbe3d8 | /practice/solution/0040_combination_sum_ii.py | 522d0209dcadd27bc9829d15d2270d94bb200cd4 | [] | no_license | kesarb/leetcode-summary-python | cd67456cb57bdff7ee227dab3930aaf9c2a6ad00 | dc45210cb2cc50bfefd8c21c865e6ee2163a022a | refs/heads/master | 2023-05-26T06:07:25.943854 | 2021-06-06T20:02:13 | 2021-06-06T20:02:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.res = []
self.dfs(0, sorted(candidates), target, [])
return self.res
def dfs(self, start, candidates, target, value_list):
if target < 0:
return
if not target:
self.res.append(value_list)
for i in range(start, len(candidates)):
if i > start and candidates[i] == candidates[i - 1]:
continue
self.dfs(i + 1, candidates, target - candidates[i], value_list + [candidates[i]]) | [
"weikunhan@g.ucla.edu"
] | weikunhan@g.ucla.edu |
33e3d04f28f29015f75634a2bf0fb461acc8a6cc | fe85e3675c1d665dcc09fb9814003fc1030aa5c2 | /utils/metrics.py | 3903650d448783ce5b7f7eb0108ff508904bf13e | [] | no_license | nik1806/coco-segmentation | ef25bf721f5f65f64f1fc32ea7abe1243deaaa64 | b6f94c8cef13660183483617c74373071022a78a | refs/heads/main | 2023-06-14T15:15:47.188118 | 2021-07-09T09:17:16 | 2021-07-09T09:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | import numpy as np
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,) * 2)
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def pixel_accuracy_each_class(self):
acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
return acc
def Pixel_Accuracy_Class(self):
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def intersection_over_union_each_class(self):
iou = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1)
+ np.sum(self.confusion_matrix, axis=0)
- np.diag(self.confusion_matrix)
)
return iou
def Mean_Intersection_over_Union(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1)
+ np.sum(self.confusion_matrix, axis=0)
- np.diag(self.confusion_matrix)
)
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1)
+ np.sum(self.confusion_matrix, axis=0)
- np.diag(self.confusion_matrix)
)
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype("int") + pre_image[mask]
count = np.bincount(label, minlength=self.num_class ** 2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
| [
"290806078@qq.com"
] | 290806078@qq.com |
3a9a38abf83a67d5c4027f92a3dfc99fa6246173 | a60732108767b0da186463d2ad834821da01750e | /PDF-reader/app.py | ae75f891ab42b54cc51e1e7020c995cc07424e98 | [] | no_license | Luke943/QuickApps | 23c58db13094860cfdc595475e79fa8f433e7605 | 8aa6e2768f9c97e9235338499b937d29399341d1 | refs/heads/master | 2023-08-03T14:09:53.265624 | 2023-07-27T15:49:18 | 2023-07-27T15:49:18 | 329,692,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | import tkinter as tk
import PyPDF2
from PIL import Image, ImageTk
from tkinter.filedialog import askopenfile
# initialise
root = tk.Tk()
root.title("PDF Text Extractor")
canvas = tk.Canvas(root, width=600, height=300)
canvas.grid(columnspan=3, rowspan=3)
# logo
logo = Image.open("logo.png")
logo = ImageTk.PhotoImage(logo)
logo_label = tk.Label(image=logo)
logo_label.image = logo
logo_label.grid(column=1, row=0)
# instructions
instructions = tk.Label(
root, text="Select a PDF file to extract its text", font="Raleway")
instructions.grid(columnspan=3, column=0, row=1)
def open_file():
browse_text.set("Loading...")
file = askopenfile(parent=root, mode='rb', title="Choose a file",
filetype=[("Pdf file", "*.pdf")])
if file:
read_pdf = PyPDF2.PdfFileReader(file)
page = read_pdf.getPage(0)
page_content = page.extractText()
# text box
text_box = tk.Text(root, height=10, width=50, padx=15, pady=15)
text_box.insert(1.0, page_content)
text_box.tag_configure("left", justify="left")
text_box.tag_add("left", 1.0, "end")
text_box.grid(column=1, row=3)
browse_text.set("Browse")
# browse button
browse_text = tk.StringVar()
browse_text.set("Browse")
browse_btn = tk.Button(root, textvariable=browse_text, command=open_file,
font="Raleway", bg="#20bebe", fg="white", height=2, width=15)
browse_btn.grid(column=1, row=2)
canvas = tk.Canvas(root, width=600, height=250)
canvas.grid(columnspan=3)
# on close window
root.protocol("WM_DELETE_WINDOW", root.destroy)
# close main program
root.mainloop()
| [
"luke.hartley05@gmail.com"
] | luke.hartley05@gmail.com |
b92ba34be5084acd5dfdc2bbc230067045fbcdc5 | cc5dc60b5c343962bf7425865af8371b7e12e529 | /MagnitudeCalculator/Tester.py | a66bbb50a665010fc279ac775b2bd3b715d6641c | [] | no_license | debjyoti385/quakeanalysis | 044aa5f5a9429090ab7bd42370ea4b43d36c18d0 | 4e3225f1a20ae1f05e6be532836e4ff9ee405071 | refs/heads/master | 2021-05-04T11:31:50.052098 | 2017-02-28T00:35:36 | 2017-02-28T00:35:36 | 48,445,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | '''from obspy.fdsn.client import Client
from obspy import iris
from obspy import arclink
from obspy import UTCDateTime
client = Client("IRIS")
iClient = iris.Client()
arcClient = arclink.Client(user="sed-workshop@obspy.org")
timeSeriesClient = iris.Client()
t1 = UTCDateTime("2012-04-12T07:15:49.1700")
#t1 = UTCDateTime("1994-09-12T12:23:43.630000")
t2 = t1 + 5
st =iClient.getWaveform("CI", "BAR", "??", "BHZ", t1, t2)
print "waveform"
tr = st.select(station="BAR")[0]
print tr
#s1=st[0].stats.coordinates.latitude
#s2=st[0].stats.coordinates.longitude
#print s1, s2'''
'''from math import log10
import numpy as np
from obspy.arclink import Client
from obspy.core import UTCDateTime
from obspy.core.util.geodetics import gps2DistAzimuth
paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]}
client = Client(user="sed-workshop@obspy.org")
t = UTCDateTime("2012-04-03T02:45:03")
stations = client.getStations(t, t + 300, "BC")'''
from obspy.fdsn.client import Client
from obspy import UTCDateTime
from obspy.core import read
client = Client("IRIS")
t1 = UTCDateTime("2012-04-12T07:15:49.1700")
#IU.TUC.20.LNZ
st = client.get_waveforms("IU", "ANMO", "00", "BH?", t1, t1 + 4 * 3600)
st.detrend(type='demean')
for each in st:
ampl = each.data
print max(abs(ampl))
'''tr_n = st.select(component="N")[0]
ampl_n = max(abs(tr_n.data))
tr_e = st.select(component="E")[0]
ampl_e = max(abs(tr_e.data))
ampl = max(ampl_n, ampl_e)'''
st = read("/Users/zinniamukherjee/Education/BigData/Project/MagnitudeCalculator/LKBD_WA_CUT.MSEED")
st.detrend(type='demean')
ampl = st[0].data
print max(abs(ampl))
tr_n = st.select(component="N")[0]
ampl_n = max(abs(tr_n.data))
print ampl_n
tr_e = st.select(component="E")[0]
ampl_e = max(abs(tr_e.data))
ampl = max(ampl_n, ampl_e)
print ampl_e
| [
"zinniamukherjee@Zinnia.local"
] | zinniamukherjee@Zinnia.local |
b2be71e62e17a50fa0a9622b560930e5da02e4ef | 3f928eaab094902a37f1c282f8fea9064769a3f0 | /proxy_check.py | d6a32569a935ea14a9059a26b2efde6a660f8916 | [
"MIT"
] | permissive | HimanshuNayka/YouTube-Viewer | f5e77ddf4bac5148e2944b2b0c9ca7cf8f988c2d | adf9f196a9ec8e681e4f94558e0ff55804ac9dd2 | refs/heads/master | 2023-04-09T06:19:34.677733 | 2021-04-17T08:44:13 | 2021-04-17T08:44:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,998 | py | """
MIT License
Copyright (c) 2021 MShawon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import concurrent.futures.thread
import os
import shutil
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from fake_useragent import UserAgent, UserAgentError
os.system("")
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
print(bcolors.OKGREEN + """
____
| _ \ _ __ _____ ___ _
| |_) | '__/ _ \ \/ / | | |
| __/| | | (_) > <| |_| |
|_| |_|_ \___/_/\_\\__, |_
/ ___| |__ ___|___/| | _____ _ __
| | | '_ \ / _ \/ __| |/ / _ \ '__|
| |___| | | | __/ (__| < __/ |
\____|_| |_|\___|\___|_|\_\___|_|
""" + bcolors.ENDC)
'''
Backup previous checked goodproxies
'''
try:
os.remove('ProxyBackup.txt')
except:
pass
try:
shutil.copy('GoodProxy.txt', 'ProxyBackup.txt')
print(bcolors.WARNING + 'GoodProxy backed up in ProxyBackup' + bcolors.ENDC)
os.remove('GoodProxy.txt')
except:
pass
try:
ua = UserAgent(use_cache_server=False, verify_ssl=False)
except UserAgentError:
ua = UserAgent(path='fake_useragent_0.1.11.json')
checked = {}
def load_proxy():
proxies = []
filename = input(bcolors.OKBLUE +
'Enter your proxy file name: ' + bcolors.ENDC)
load = open(filename)
loaded = [items.rstrip().strip() for items in load]
load.close()
for lines in loaded:
proxies.append(lines)
return proxies
def mainChecker(type1, type2, proxy, position):
checked[position] = None
proxyDict = {
"http": f"{type1}://"+proxy,
"https": f"{type2}://"+proxy,
}
# print(proxyDict)
try:
agent = ua.random
headers = {
'User-Agent': f'{agent}',
}
response = requests.get(
'https://www.youtube.com/', headers=headers, proxies=proxyDict, timeout=30)
status = response.status_code
print(bcolors.OKBLUE + f"Tried {position+1} |" + bcolors.OKGREEN +
f' {proxy} | GOOD | Type : {type2} | Response : {status}' + bcolors.ENDC)
print(proxy, file=open('GoodProxy.txt', 'a'))
except:
print(bcolors.OKBLUE + f"Tried {position+1} |" + bcolors.FAIL +
f' {proxy} | {type2} |BAD ' + bcolors.ENDC)
checked[position] = type2
pass
def proxyCheck(position):
PROXY = proxy_list[position]
mainChecker('http', 'https', PROXY, position)
if checked[position] == 'https':
mainChecker('socks4', 'socks4', PROXY, position)
if checked[position] == 'socks4':
mainChecker('socks5', 'socks5', PROXY, position)
def main():
pool_number = [i for i in range(total_proxies)]
with ThreadPoolExecutor(max_workers=threads) as executor:
futures = [executor.submit(proxyCheck, position)
for position in pool_number]
try:
for future in as_completed(futures):
future.result()
except KeyboardInterrupt:
executor._threads.clear()
concurrent.futures.thread._threads_queues.clear()
except IndexError:
print(bcolors.WARNING + 'Number of proxies are less than threads. Provide more proxies or less threads.' + bcolors.ENDC)
pass
if __name__ == '__main__':
threads = int(
input(bcolors.OKBLUE+'Threads (recommended = 100): ' + bcolors.ENDC))
proxy_list = load_proxy()
proxy_list = list(set(proxy_list)) # removing duplicate proxies
proxy_list = list(filter(None, proxy_list)) # removing empty proxies
total_proxies = len(proxy_list)
print(bcolors.OKCYAN + f'Total proxies : {total_proxies}' + bcolors.ENDC)
main()
| [
"shawonk440@gmail.com"
] | shawonk440@gmail.com |
b2bc2b0ea9c43e023c7ac3dd9a00da3a702d32f6 | 69d0dfacdbff0d7aeee16ad590ef18b7bacdbd0c | /app.py | b14ff69cc1da8ea9ed6af4b2019e8d5dd88f79d5 | [
"MIT"
] | permissive | ybangaru/wallstreetbets-sentiment-analysis | 0a628c84f2229e5df2700ae76883e94c4df4735c | dc5a632f93f2c3a4454b45f96f9e6b580c6c4be5 | refs/heads/master | 2023-05-12T15:03:47.594562 | 2021-05-24T21:18:07 | 2021-05-24T21:18:07 | 333,496,347 | 11 | 0 | null | 2021-05-04T20:35:43 | 2021-01-27T16:50:25 | Python | UTF-8 | Python | false | false | 7,250 | py | import streamlit as st
import numpy as np
import pandas as pd
import datetime
import os
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import plotly.graph_objects as go
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import plotly.express as px
from plotly.subplots import make_subplots
import seaborn as sns
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk import ngrams
from collections import Counter
def run_app():
# @st.cache
def api_data():
ENDPOINT = st.secrets["db_ENDPOINT"]
PORT = 5432
USR = st.secrets["db_usr"]
token = st.secrets["db_token"]
REGION= st.secrets["db_region"]
DBNAME = st.secrets["db_name"]
DATABASE_URI = f'postgresql+psycopg2://{USR}:{token}@{ENDPOINT}:{PORT}/{DBNAME}'
engine = create_engine(DATABASE_URI, echo=False)
connection = engine.connect()
stmt = 'SELECT * FROM "daily-submissions-wsb"'
result_proxy = connection.execute(stmt)
results = result_proxy.fetchall()
return results
def call_api(no_days):
"""
Comment out the first 2 lines and comment the 3rd line here to use the data
from the csv file directly
"""
# root = f"{os.getcwd()}"
# df = pd.read_csv(f'{root}/test_data.csv')
df = pd.DataFrame(api_data(), columns=['id', 'title', 'url', 'datetime', 'comments', 'flair'])
df['datetime'] = pd.to_datetime(df['datetime'])
df = df.sort_values(by='datetime', ignore_index=True)
df = df.reset_index(drop=True)
df = df[-no_days:]
# print(df)
return df
stop_words = set(STOPWORDS)
def show_wordcloud(data, title = None):
"""Word cloud image"""
wordcloud = WordCloud(
background_color='black',
stopwords=stop_words,
max_words=200,
max_font_size=40,
scale=3,
random_state=1 # chosen at random by flipping a coin; it was heads
).generate(str(data))
fig = plt.figure(1, figsize=(15, 15))
plt.axis('off')
if title:
fig.suptitle(title, fontsize=20)
fig.subplots_adjust(top=2.3)
plt.imshow(wordcloud)
return fig
def get_vader_sentiment(comments_by_day):
analyser = SentimentIntensityAnalyzer()
scores=[]
for comments in comments_by_day:
sentiment_score=0
try:
sentiment_score=analyser.polarity_scores(comments)['compound']
except TypeError:
sentiment_score=0
scores.append(sentiment_score)
return scores
def clean_words(new_tokens):
new_tokens = [t.lower() for t in new_tokens]
new_tokens =[t for t in new_tokens if t not in stopwords.words('english')]
new_tokens = [t for t in new_tokens if t.isalpha()]
lemmatizer = WordNetLemmatizer()
new_tokens =[lemmatizer.lemmatize(t) for t in new_tokens]
return new_tokens
def get_all_clean_words_plot(all_data):
fig, ax = plt.subplots()
ax = sns.barplot(x='frequency',y='words',data=all_data)
return fig
def get_sentiment_plot(cleaned_data):
fig, ax = plt.subplots()
ax = sns.lineplot(data=cleaned_data, x="datetime", y="sentiment_scores")
plt.xticks(rotation=45)
return fig
st.title("Some basic charts!!")
no_days = st.number_input("How many days into the past would you like to look into!!", min_value=7, step=7, max_value=28)
cleaned_data = call_api(no_days)
final_string = ""
for day in range(len(cleaned_data['comments'])):
final_string+=cleaned_data['comments'].iloc[-day]
st.title(f"Word Cloud for the last {no_days} days")
st.pyplot(show_wordcloud(final_string))
# word_counts_df = (cleaned_data['comments'].str.split(expand=True).stack().value_counts().rename_axis('vals').reset_index(name='count'))
# word_counts_df = word_counts_df[:30]
# print(word_counts_df)
cleaned_data['words'] = cleaned_data['comments'].apply(lambda x : word_tokenize(x))
cleaned_data['clean_words'] = cleaned_data['words'].apply(lambda x:clean_words(x))
all_clean_words = pd.DataFrame(Counter(cleaned_data['clean_words'].sum()).items(), columns=['words','frequency']).sort_values(by='frequency',ascending=False).head(30)
# print(all_clean_words)
st.title(f"Most Frequently used words!")
st.pyplot(get_all_clean_words_plot(all_clean_words))
st.title(f"Top Bigrams with their frequency")
two_words_df = pd.DataFrame(Counter(ngrams(cleaned_data['clean_words'].sum(),2)).items(), columns=['words','frequency']).sort_values(by='frequency',ascending=False).head(30)
# print(two_words_df)
st.pyplot(get_all_clean_words_plot(two_words_df))
st.title(f"Top Trigrams with their frequency")
three_words_df = pd.DataFrame(Counter(ngrams(cleaned_data['clean_words'].sum(),3)).items(), columns=['words','frequency']).sort_values(by='frequency',ascending=False).head(30)
# print(three_words_df)
st.pyplot(get_all_clean_words_plot(three_words_df))
cleaned_data['sentiment_scores'] = get_vader_sentiment(cleaned_data['comments'])
st.title(f"Vader Sentiment scores for the last {no_days} days")
st.pyplot(get_sentiment_plot(cleaned_data))
def main():
st.sidebar.markdown("""
<h2>Choose the mode:</h2>
""", unsafe_allow_html=True)
mode = st.sidebar.selectbox('', [
'Dashboard!!',
'About the Project!',
])
if mode == 'About the Project!':
st.title('Details of the project!')
st.markdown("""
check out my github @ https://github.com/ybangaru/wallstreetbets-sentiment-analysis
""")
elif mode == 'Dashboard!!':
st.title('Wall Street Bets Daily Discussion Thread Analysis')
st.markdown("""
Wall Street Bets is a subreddit with 1.8 million members (they call themselves "degenerates"), lol!! that's funny tbh.
Anyways the project idea is to follow the recent sentiment on the daily discussion thread which is the most discussed thread
everyday on this subreddit. It's safe enough to say it's the most discussed financial thread on a daily basis in the world.
I mean, if we can't pickup sentiment of the market here, I'd expect, it'd be quite difficult to do that anywhere else ¯\\\_(ツ)_/¯ Change my mind! ;)
Work still in progress!
""")
st.write("""The recent GME spike was so fun to follow really! Here, have a look as these posts:
https://www.theverge.com/2021/2/24/22299795/gamestop-stock-up-reddit-wallstreetbets-gme-pump &
https://www.bloomberg.com/news/articles/2021-01-25/how-wallstreetbets-pushed-gamestop-shares-to-the-moon""")
run_app()
if __name__ == '__main__':
main() | [
"yaswanthbangaru13@gmail.com"
] | yaswanthbangaru13@gmail.com |
5e9075f5cad527f8659449177fea32357ade4ad8 | 96a91d882e73cff97da43eb92e79826570678ad0 | /done/county_process.py | afc3d40850b409a05a415f65e05ddbed2603f389 | [] | no_license | carrickdb/FakeNews | a7dea0b537709bfd3134c83c1a57391d95472596 | cc96e0b07f70e62ecc7525c65cbe71cdd465fb0d | refs/heads/master | 2021-10-21T18:38:05.596102 | 2019-03-05T17:48:29 | 2019-03-05T17:48:29 | 167,854,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | import csv
"""
Converts file with votes by county and party to a pivot table with each county only listed once
"""
counties = []
with open("county_votes.csv", 'r') as f:
with open("processed_votes.csv", 'a') as g:
reader = csv.reader(f)
writer = csv.writer(g)
header = next(reader)
curr_county = None
GOP = 0
DEM = 0
third = 0
for row in reader:
state_county = row[1] + row[2]
party = row[3]
num_votes = int(float(row[4]))
if state_county != curr_county:
if curr_county:
total = GOP + DEM + third
writer.writerow([state_county, GOP/total, DEM/total, third/total])
curr_county = state_county
GOP = 0
DEM = 0
third = 0
if party == "GOP":
GOP = num_votes
elif party == "DEM":
DEM = num_votes
else:
third = num_votes
| [
"carrickdb@gmail.com"
] | carrickdb@gmail.com |
41b0b6ae8bf47a9965ce80c9f09a4a499ded45fb | 51320b2d4101f347229227495762a646f4d41ad8 | /DDPG Agent - Continous Reacher Environment/a_memory.py | 0d786c87f67d03b62314438012e20ea985ee199c | [
"MIT"
] | permissive | szemyd/deep-reinforcement-learning | 805533eb9653c8b35009fcea00fc552d0bf99920 | 2dc2ab8511bcf3975c51dc057b66d2d500e2cf80 | refs/heads/main | 2023-04-11T12:51:56.732555 | 2021-04-26T11:20:23 | 2021-04-26T11:20:23 | 337,783,645 | 0 | 0 | MIT | 2021-02-10T16:30:51 | 2021-02-10T16:30:50 | null | UTF-8 | Python | false | false | 1,946 | py |
import numpy as np
import random
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from constants import *
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=BUFFER_SIZE) # internal memory (deque)
self.batch_size = BATCH_SIZE
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(DEVICE)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(DEVICE)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(DEVICE)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(DEVICE)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(DEVICE)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | [
"szemy2@gmail.com"
] | szemy2@gmail.com |
9b76cc3e5a3c0554ecc498d1baca0cc45673e406 | b99e57154208f6ac74d40fdebf4416edc7b51a36 | /coding_challenge_6.py | bfb0a253361b075d1475f6f90be499448d0c0e3e | [] | no_license | Shaletanu/avodha_code_challenge | 1103186f84290e0b6451bb4759376f9df645286f | bac6a8f93c4a5bf51da95f884718578e0fc69458 | refs/heads/master | 2023-02-12T22:41:27.106797 | 2021-01-14T14:56:00 | 2021-01-14T14:56:00 | 326,682,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | file1 = open("demo.txt", "w")
file1.write("Hello, I am Anu Shalet Philip.")
file1 = open("demo.txt", "r")
content = file1.read()
print(content)
file1 = open("demo.txt", "a")
file1.write("I am from Kottayam.")
file1 = open("demo.txt", "r")
content = file1.read()
print(content)
file1.close() | [
"shaletanu0@gmail.com"
] | shaletanu0@gmail.com |
dfaffdcd61d7a94c5ada0c487c4730ac16adde76 | ac1a206541472b3fd096c928e511e7b13cf97733 | /python/src/cph.py | 3dc2f4a969eb45d7547b7a5e80731790b4417f53 | [] | no_license | ScorpioCPH/code | 2979d1e00476ae181cd979d81ba2a8f82a875d72 | faeef3383633d11f9f3e533c817b4bfac3c6d701 | refs/heads/master | 2021-01-19T09:52:09.721808 | 2017-12-08T11:42:59 | 2017-12-08T11:42:59 | 82,145,392 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | # coding=utf-8
def print_str(str):
print(str)
| [
"scorpiocph@gmail.com"
] | scorpiocph@gmail.com |
e80586d7b73647ff3cab136e14a4b886a1f83e38 | e6789e78f86279eb1bda423aee5d6fffed23ae2d | /Source/Travel/app.py | f4dd6ac98d3ee596e13821aa9ef04498b60dc3b5 | [] | no_license | frankli0324/LCTF2018 | 4b4872db2c2c6ee75d8672c02000cc1b8eec9257 | b8d95312b534258c85e9bb2e34a0ce85f3858192 | refs/heads/master | 2022-11-15T02:50:08.902822 | 2020-07-12T06:47:25 | 2020-07-12T06:47:25 | 278,698,725 | 2 | 0 | null | 2020-07-10T17:50:07 | 2020-07-10T17:50:06 | null | UTF-8 | Python | false | false | 1,106 | py | # -*- coding: utf-8 -*-
from flask import request, render_template
from config import create_app
import os
import urllib
import requests
import uuid
app = create_app()
@app.route('/upload/<filename>', methods = ['PUT'])
def upload_file(filename):
name = request.cookies.get('name')
pwd = request.cookies.get('pwd')
if name != 'lctf' or pwd != str(uuid.getnode()):
return "0"
filename = urllib.unquote(filename)
with open(os.path.join(app.config['UPLOAD_FOLDER'], filename), 'w') as f:
f.write(request.get_data(as_text = True))
return "1"
return "0"
@app.route('/', methods = ['GET'])
def index():
url = request.args.get('url', '')
if url == '':
return render_template('index.html')
if "http" != url[: 4]:
return "hacker"
try:
response = requests.get(url, timeout = 10)
response.encoding = 'utf-8'
return response.text
except:
return "Something Error"
@app.route('/source', methods = ['GET'])
def get_source():
return open(__file__).read()
if __name__ == '__main__':
app.run()
| [
"tundrawork@gmail.com"
] | tundrawork@gmail.com |
b7f7294d6eed3c6580709c80a3bbdedfde794b91 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03767/s342121711.py | e279603e0ad4d33f1c70bcc3c868122d20a4b586 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | n = int(input())
a = list(map(int, input().split()))
a.sort(reverse = True)
list = []
for i, j in enumerate(a):
if i % 2 == 1:
list.append(j)
answer = sum(list[0 : n])
print(answer) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b756c6d0e4cc938e243af0395a0c6bc9ec64ea18 | 31e71ffe0a7d283db1bc35509cc1e87bbc1ffb11 | /exercise/exercise29.py | 4d1f35041cead990dd8d4ce618b8b0a7210c4a8e | [] | no_license | dayadaoshanghai/Learn-Python-the-hard-way | 879d3435e3651481728e6e3a919ad25413cf4466 | cc635d5a752074c04e74b704b55b4b88ac9005b3 | refs/heads/master | 2021-01-01T17:24:47.118273 | 2017-07-31T12:42:34 | 2017-07-31T12:42:34 | 98,064,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The wolrd is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs.")
if people == dogs:
print("People are equal to dogs.")
| [
"noreply@github.com"
] | noreply@github.com |
fcc07437f6183ea5ca1b7698be6bd8ec4aa4f8a1 | 6b62ec48d3772e3422053bc3d7eca2959b7fb88f | /platforms.py | 9b1ab27164c7c95f1cf3cf12acbdac633c44f1ff | [] | no_license | SueFolkerts/Python2InfinitePlatformerPhase5 | 89a6f4eee402e7a165b5f6dc0ed20f94b60f65f0 | 903be21ef091ea23c5ae3e0d78aaf3e02c48531c | refs/heads/master | 2020-03-23T13:11:40.634618 | 2018-07-20T19:16:49 | 2018-07-20T19:16:49 | 141,604,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import pygame, random
class Platforms(pygame.sprite.Sprite):
def __init__(self, pos, img_path, width=70, height=70):
super().__init__()
self.image = pygame.Surface([width, height]).convert()
self.image.blit(pygame.image.load(img_path).convert(), (0, 0), (0, 0, width, height))
self.image.set_colorkey((0, 0, 0))
self.rect = self.image.get_rect()
self.rect.center = pos
def scroll(self, change):
# NEW FOLLOWING *************************************************************************
screen_info = pygame.display.Info()
self.rect.top += change
if self.rect.top > screen_info.current_h:
self.rect.top = -50
self.rect.left = random.randint(5,(screen_info.current_w-50)//10)*10
| [
"sfolkerts@coinstar.com"
] | sfolkerts@coinstar.com |
87ac431ed770aef0ee07763a3f61bd864ed4841c | 092227c7e85555a5f2b547629a7e9639c34b7417 | /personal_portfolio/urls.py | 47fbf7302c14ab33b45f3cde91812f4ff657f2f7 | [] | no_license | Shibly10/django-personal-portfolio | d4093ebad23b04f6380009f56a55b5aeeb662a59 | b6618c09a21d121da64b1426cdd375b16f7ad99a | refs/heads/main | 2023-01-05T22:03:09.295517 | 2020-10-29T17:08:51 | 2020-10-29T17:08:51 | 308,362,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | """personal_portfolio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from portfolio import views
from django.conf.urls.static import static
from django.conf import settings
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home , name = 'home'),
path('blog/', include('blog.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
"shibly.ar@gmail.com"
] | shibly.ar@gmail.com |
533aa898ba74cdb427d7bac5c8d8a70e01eee588 | dba1be9c35a2044fb9d07a55f52b3740147b467e | /tests/bam2msa.py | 47cf401d9e5c528613aa6ab5a9fddb116f012c18 | [] | no_license | 849189526/BioExt | 2279cb2d76f158266a08b29d2e31b8300b411b89 | e36c3b4a118fb73281ae4f068fceb046f35ab1f6 | refs/heads/master | 2020-06-25T08:18:37.373893 | 2019-01-11T18:00:58 | 2019-01-11T18:00:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | #!/usr/bin/env python3
import nose
import functools
import os
from Bio import SeqIO
from BioExt.io import BamIO
from BioExt.args import (
add_alphabet,
add_reference,
add_scorematrix
)
from BioExt.uds import _align_par
from BioExt.misc import compute_cigar, gapless
from BioExt.scorematrices import (
DNAScoreMatrix,
FrequenciesError,
ProteinScoreMatrix,
BLOSUM62
)
def setup():
''' Define sequence reference and records '''
@nose.with_setup(setup=setup)
def test_align():
''' Ensure that sequence that ends with a '-' will not cause an error '''
dir_path = os.path.dirname(os.path.realpath(__file__))
## Load reference sequence
seqpath = os.path.join(dir_path, "./rsrc/SHORT.FASTA")
output_file = os.path.join(dir_path, "./rsrc/SHORT.FASTA.test.bam")
records = SeqIO.parse(seqpath, 'fasta')
reference = gapless(next(records))
def allseqs(records):
yield compute_cigar(reference, reference)
for record in records:
print(record)
yield record
def output(records):
BamIO.write(
allseqs(records),
output_file,
reference
)
_align_par(
reference,
records,
BLOSUM62.load(),
True,
False,
None,
None,
output,
False
)
# Read output file
BamIO.sort(output_file)
@nose.with_setup(setup=setup)
def test_cigar():
pass
| [
"sweaver@temple.edu"
] | sweaver@temple.edu |
aa95ecd48c26cdc6d818087c90547e93aabb6d85 | a1662bb69ffb305c5182b41f102a17ec259b143a | /src/triangles/hashed_edge_iterator.py | 309bf35421ebdcaf3a4ecbaea597a536ec9e1b4b | [] | no_license | AndersHqst/SAAS-2014 | d5df874d835e6253e05f9b1d79f132297c24dbfb | 28b48e448baf678a8c954b78f70b8e6eed1de6fb | refs/heads/master | 2016-09-05T13:46:30.356794 | 2015-05-01T14:17:05 | 2015-05-01T14:17:05 | 34,905,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,700 | py | import utils
from time import time
# TODO: clean up
def print_stats(G):
edge_list_lengths = []
_max = 0
_min = 9999999
for key in G:
l = len(G[key][0])
if l < _min:
_min = l
if l > _max:
_max = l
edge_list_lengths.append(l)
avg = sum(edge_list_lengths) / float(len(edge_list_lengths))
s = "Edge list max:{} min:{} avg:{} number:{}".format(_max, _min, avg, len(G))
print s
def triangles(a, b, G):
centers = set()
# triangles, if b is in a's candidate dict, a
# triangle node exist for each center node
if b in G[a][1]:
centers = G[a][1][b]
res = []
# Constructing the triangles, should only be a small to constant number of iterations.
# and could entirely be omitted by returning a less pretty result og (a,b, [centers])
for center in centers:
# res.append(utils.triple_sort((a, center, b))) #sorted
res.append((a, b, center)) #unsorted
return centers, res
def insert(a, b, G, centers=set()):
"""
Insert a, and compare b to all nodes in a's edge list 'c_i'
insert max(b, c_i) into min(b, c_i)'s candidate dict.
"""
# TODO, can we avoid checking the center nodes this way?
# c is the center node of a triangle created from
# the edge insertion, no need for update
for c in G[a][0]:
if c in centers:
continue
# Insert/create candidate dict on smallest Node
if c < b:
if b in G[c][1]:
G[c][1][b].add(a)
else:
G[c][1][b] = set([a])
else:
if c in G[b][1]:
G[b][1][c].add(a)
else:
G[b][1][c] = set([a])
# add b to a's edge list
G[a][0].append(b)
def fun(V, E):
"""
Finds all triangles in the edge list.
Nodes are unique integers. Each node is stored by its integer key in the constructed grapg G
Edges are tuples of Nodes (intergers) (a, b).
with a tuple ([], {}) of edge list, candidate dictionary.
Iterative algorithm that constructs a graph, and finds
triangles at the same time. If an end node 'a' of an inserted edge (a,b)
is already in the graph, all neighbours c of 'a' will be compared with b,
and the smaller will save a candidate dict with key being the min node, and the value being the set
of middle nodes, e.g. {min(b,c_i): set(a_i)}.
If both ends (a,b) of an edge already exists in the graph, getting all
triangles that the edge produces is a lookup in this candidate dict, with
a small iteration building the triangles.
"""
G = {}
res = []
for a, b in E:
# Create non-atatched
if not a in G and not b in G:
G[a] = ([b], {})
G[b] = ([a], {})
# insert edge end 'a'
elif a in G and not b in G:
G[b] = ([a], {})
insert(a, b, G)
# insert edge end 'b'
elif b in G and not a in G:
G[a] = ([b], {})
insert(b, a, G)
# possible triangle
else:
centers = []
ts = []
if a < b:
centers, ts = triangles(a, b, G)
insert(a, b, G, centers)
insert(b, a, G, centers)
else:
centers, ts = triangles(b, a, G)
insert(a, b, G, centers)
insert(b, a, G, centers)
# append the result
if len(ts) > 0:
res = res + ts
# print_stats(G)
return res
# test_edge_iterator()
# def test():
# edges = [(1,2), (2,3), (1,4), (4,3), (1,3)]
# print fun(None, edges)
# def test2():
# # mine frequent items
# transactions = parser.parse_csv_to_mat('/Users/ahkj/Dropbox/SAAS/data/csv/sample-big/customers.txt')
# all_frequent_items = fpgrowth(transactions, supp=-10, min=1, max=3)
# # edges
# edges = [items for (items, freq) in all_frequent_items if len(items) == 2]
# print 'mined edges'
# # Time the 'fun' algorithm
# times = []
# for i in range(1000):
# start = time()
# triangles = fun(None, edges)
# times.append(time() - start)
# print 'fun avg: {}'.format(sum(times) / float(len(times)))
# print "Length: {}".format(len(triangles))
# # Time the filter items algorithm
# times = []
# for i in range(1000):
# start = time()
# triangles, trips = filter_items(all_frequent_items)
# times.append(time() - start)
# print 'filter avg: {}'.format(sum(times) / float(len(times)))
# print "Length: {}".format(len(triangles))
# test2()
| [
"ahk@e-conomic.com"
] | ahk@e-conomic.com |
798efca679f2d54fa6c1a967b92fe1d157e03f55 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnfaulti.py | 0db9ec8c46bcffabd20b375fd10825d23728bee3 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 624 | py | ii = [('CookGHP3.py', 1), ('SadlMLP.py', 2), ('MartHSI2.py', 1), ('LeakWTI2.py', 1), ('KembFJ1.py', 1), ('WilkJMC3.py', 1), ('WilbRLW5.py', 1), ('GellWPT2.py', 1), ('SeniNSP.py', 1), ('LyttELD.py', 1), ('AdamHMM.py', 1), ('ClarGE.py', 3), ('DaltJMA.py', 1), ('NewmJLP.py', 3), ('CoopJBT.py', 1), ('LeakWTI4.py', 1), ('MereHHB3.py', 1), ('HowiWRL2.py', 1), ('BailJD3.py', 1), ('MartHRW.py', 1), ('KembFJ2.py', 1), ('BellCHM.py', 1), ('HaliTBC.py', 2), ('WilbRLW3.py', 1), ('AinsWRR2.py', 1), ('ClarGE3.py', 2), ('MartHRW2.py', 2), ('DibdTRL.py', 2), ('MartHSI.py', 1), ('LyelCPG3.py', 1), ('TaylIF.py', 3), ('WaylFEP.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
6df06f09484f86ee27ca27323878d76a2cc016fc | c4c068b02899e333fb6d7d735bece664d257c563 | /pset8/mashup/helpers.py | e4858d86ab07cb1cccca30737499e3a730ef69be | [
"CC-BY-3.0"
] | permissive | eqmvii/CS50 | 442290ea91d6352617beaf8319e3c338bbf1d103 | 0024ea50131abefa4e87fc1b90cee8430a648575 | refs/heads/master | 2021-01-20T04:50:43.719740 | 2017-04-28T20:37:10 | 2017-04-28T20:37:10 | 89,742,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import feedparser
import urllib.parse
def lookup(geo):
"""Looks up articles for geo."""
# check cache for geo
if geo in lookup.cache:
return lookup.cache[geo]
# get feed from Google. !!! CURRENTLY TO AVOID SPAM BOT J'ACUSE FROM GOOGLE !!!
# http://news.google.com/news?geo=22044&output=rss
feed = feedparser.parse("http://news.google.com/news?geo={}&output=rss".format(urllib.parse.quote(geo, safe="")))
# feed = feedparser.parse("http://ThisIsVeryBrokenOnPurpose.broken.com/news?geo={}&output=rss".format(urllib.parse.quote(geo, safe="")))
flag = 1
# print(feed)
# if no items in feed, get feed from Onion
if not feed["items"]:
feed = feedparser.parse("http://www.theonion.com/feeds/rss")
if flag == 1:
print("!!!!!!!!!!!!!!!!!!!!!!Google still hates you!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# cache results
lookup.cache[geo] = [{"link": item["link"], "title": item["title"]} for item in feed["items"]]
# return results
return lookup.cache[geo]
# initialize cache
lookup.cache = {}
| [
"coelomate@gmail.com"
] | coelomate@gmail.com |
ea9f4f343b6d8e2a0a698b51ce3bb7b4ef6f0749 | d749ebc7b7f0a686cf04f31bf59abb82a5ea28e4 | /sourceful/MusicMatcher.py | 4e26db72f55c8ebf7e209d190b477b76debaa87c | [] | no_license | ssa11200/python_warmup | c780a97fe823ce61512bf34d8434b339a14943b1 | 507285405fe4f7d43e1eecca9eb5ec5cdd217e74 | refs/heads/master | 2023-03-24T13:15:27.060823 | 2021-03-23T20:27:04 | 2021-03-23T20:27:04 | 343,135,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | from .api_modules import (
analyse_feeling_polarity,
find_music_for_feeling,
find_lyrics_for_music,
)
class MusicMatcher:
def __init__(self, feeling):
self.feeling = feeling
self._music = None
self._lyrics = None
self._polarity = None
def _analyse_feeling(self):
self._polarity = analyse_feeling_polarity(self.feeling)
def _find_music(self):
self._music = find_music_for_feeling(self._polarity)
def _find_lyrics(self):
self._lyrics = find_lyrics_for_music(self._music)
def match_music(self):
self._analyse_feeling()
self._find_music()
self._find_lyrics()
@property
def music(self):
return {"music_details": self._music, "music_lyrics": self._lyrics} | [
"ssa11200@gmail.com"
] | ssa11200@gmail.com |
6bfa1fa52e42430692f0306be410aa035a39fb92 | 811acdfdd52a56a8440ca72c889bec850280b5ba | /libro/funcion_print.py | 1c05f5886b660b6b11ea36481ef57465e3a3e9eb | [] | no_license | josselinereyes841/CYPJosselineRM | ca36e4acbb242a290682c07234f3e7cf864e9114 | db42ec10b4529424c975f9dc4dcf53eef896efee | refs/heads/master | 2021-06-30T13:19:13.001339 | 2021-01-15T23:16:44 | 2021-01-15T23:16:44 | 207,672,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | #print tiene 4 formas de uso
"""
1.- Con comas
2.- con signo '+'
3.- con la funcion format()
4.- es con una variante de format()
"""
#Con comas , concatenar agregando
# un espacio y haciendo casting de tipo
edad = 10
nombre = "Juan"
estatura =1.67
print(edad , estatura , nombre)
#Con '+' hace lo mismo pero no hace el casting automático
#No agrega espacio
print(str(edad) + str(estatura) + nombre)
#funcion format()
print("Nombre:{} Edad:{} Estatura:.{}".format(nombre,edad,estatura))
#4.-con una variante de format() simplificada
print(f"Nombe: \"{nombre}\" \nEdad:\t{edad} ")
#print y el argumento end
print("Solo hay dos tipos de personas, las que saben binario y las que no",end=" ")
print("otra linea")
| [
"josselinereyes35@outlook.es"
] | josselinereyes35@outlook.es |
b7c74db336e873841c529bfce999bb9f261be520 | 01e7b0d71126597de316e95858c54267ee30d3f5 | /data_utils/video_utils.py | d4e3ded1253752a0f6949ab56e3df9664f1adf80 | [
"MIT"
] | permissive | Sapphirine/201912-13-video-object-segmentation | 0604aaf8457e68139aef4cc6f56c324b6557b669 | cfd36f502385f99b39e309ae39309d5e1785cfc8 | refs/heads/master | 2020-11-26T17:09:53.467432 | 2019-12-19T14:59:01 | 2019-12-19T14:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | import os
TMP_FOLDER = "tmp"
MODEL_FOLDER = "models"
cached_video = set()
cached_model = set()
for f in os.listdir(TMP_FOLDER):
if os.path.isdir(os.path.join(TMP_FOLDER, f)):
for i in os.listdir(os.path.join(TMP_FOLDER, f)):
if os.path.splitext(i)[1] == ".mp4":
cached_video.add(i)
for f in os.listdir(MODEL_FOLDER):
if os.path.isdir(os.path.join(MODEL_FOLDER, f)):
cached_model.add(f)
def video2img(video_path, img_folder):
cmd = "ffmpeg -i %s -r 24 -t 140 -start_number 0 %s/%%05d.jpg" % (
video_path, img_folder)
print(cmd)
os.system(cmd)
def img2video(img_folder, video_path):
cmd = "ffmpeg -f image2 -i %s/%%05d.jpg -c:v libx264 -preset" \
" medium -crf 18 -r 24 %s" % (
img_folder, video_path)
print(cmd)
os.system(cmd)
def init_video(video_name):
if not video_name in cached_video:
cached_video.add(video_name)
video_folder = os.path.join(TMP_FOLDER, video_name)
result_path = os.path.join(TMP_FOLDER, video_name, 'pred')
concate_path = os.path.join(TMP_FOLDER, video_name, 'concat')
img_path = os.path.join(TMP_FOLDER, video_name, 'img')
if not os.path.exists(video_folder):
os.makedirs(video_folder)
if not os.path.exists(result_path):
os.makedirs(result_path)
if not os.path.exists(concate_path):
os.makedirs(concate_path)
if not os.path.exists(img_path):
os.makedirs(img_path)
first_mask_path = os.path.join(video_folder, "first_mask.png")
video_path = os.path.join(video_folder, video_name + ".mp4")
return video_path, img_path, first_mask_path
def render_video(video_name, method="CONCAT"):
if method.upper() == "CONCAT":
concate_path = os.path.join(TMP_FOLDER, video_name, 'concat')
video_path = os.path.join(TMP_FOLDER, video_name,
video_name + "-concat.mp4")
img2video(concate_path, video_path)
return video_path
elif method.upper() == "ANNO":
pred_path = os.path.join(TMP_FOLDER, video_name, 'pred')
video_path = os.path.join(TMP_FOLDER, video_name,
video_name + "-anno.mp4")
img2video(pred_path, video_path)
return video_path
else:
raise ValueError("Render Method %s is not supported" % method.upper())
| [
"1178869226@qq.com"
] | 1178869226@qq.com |
9233296ae37437b00a31b248c98072ae1ac1daeb | 3ed9e1c43f8560ad08ce7ffff83bdae8aaa0992a | /superset/datasets/commands/importers/v1/utils.py | c21c66ff18077bb2fed9927fb56fc49fc3a1d27e | [
"Apache-2.0",
"OFL-1.1"
] | permissive | sekikn/superset | 7e07b3a6f494ead275b43909a2f8990cf67e170c | 45020860d5c65f08c73f550054166ba1089fecef | refs/heads/master | 2022-12-22T13:25:37.095102 | 2021-05-25T21:59:05 | 2021-05-25T21:59:05 | 92,110,168 | 0 | 0 | Apache-2.0 | 2022-12-19T00:52:32 | 2017-05-23T00:03:38 | Python | UTF-8 | Python | false | false | 5,394 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-branches
import gzip
import json
import logging
import re
from typing import Any, Dict
from urllib import request
import pandas as pd
from flask import current_app
from sqlalchemy import BigInteger, Boolean, Date, DateTime, Float, String, Text
from sqlalchemy.orm import Session
from sqlalchemy.sql.visitors import VisitableType
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.utils.core import get_example_database
logger = logging.getLogger(__name__)
CHUNKSIZE = 512
VARCHAR = re.compile(r"VARCHAR\((\d+)\)", re.IGNORECASE)
JSON_KEYS = {"params", "template_params"}
type_map = {
"BOOLEAN": Boolean(),
"VARCHAR": String(255),
"STRING": String(255),
"TEXT": Text(),
"BIGINT": BigInteger(),
"FLOAT": Float(),
"FLOAT64": Float(),
"DOUBLE PRECISION": Float(),
"DATE": Date(),
"DATETIME": DateTime(),
"TIMESTAMP WITHOUT TIME ZONE": DateTime(timezone=False),
"TIMESTAMP WITH TIME ZONE": DateTime(timezone=True),
}
def get_sqla_type(native_type: str) -> VisitableType:
if native_type.upper() in type_map:
return type_map[native_type.upper()]
match = VARCHAR.match(native_type)
if match:
size = int(match.group(1))
return String(size)
raise Exception(f"Unknown type: {native_type}")
def get_dtype(df: pd.DataFrame, dataset: SqlaTable) -> Dict[str, VisitableType]:
return {
column.column_name: get_sqla_type(column.type)
for column in dataset.columns
if column.column_name in df.keys()
}
def import_dataset(
session: Session,
config: Dict[str, Any],
overwrite: bool = False,
force_data: bool = False,
) -> SqlaTable:
existing = session.query(SqlaTable).filter_by(uuid=config["uuid"]).first()
if existing:
if not overwrite:
return existing
config["id"] = existing.id
# TODO (betodealmeida): move this logic to import_from_dict
config = config.copy()
for key in JSON_KEYS:
if config.get(key) is not None:
try:
config[key] = json.dumps(config[key])
except TypeError:
logger.info("Unable to encode `%s` field: %s", key, config[key])
for metric in config.get("metrics", []):
if metric.get("extra"):
try:
metric["extra"] = json.dumps(metric["extra"])
except TypeError:
logger.info("Unable to encode `extra` field: %s", metric["extra"])
# should we delete columns and metrics not present in the current import?
sync = ["columns", "metrics"] if overwrite else []
# should we also load data into the dataset?
data_uri = config.get("data")
# import recursively to include columns and metrics
dataset = SqlaTable.import_from_dict(session, config, recursive=True, sync=sync)
if dataset.id is None:
session.flush()
example_database = get_example_database()
try:
table_exists = example_database.has_table_by_name(dataset.table_name)
except Exception as ex:
# MySQL doesn't play nice with GSheets table names
logger.warning("Couldn't check if table %s exists, stopping import")
raise ex
if data_uri and (not table_exists or force_data):
load_data(data_uri, dataset, example_database, session)
return dataset
def load_data(
data_uri: str, dataset: SqlaTable, example_database: Database, session: Session
) -> None:
data = request.urlopen(data_uri)
if data_uri.endswith(".gz"):
data = gzip.open(data)
df = pd.read_csv(data, encoding="utf-8")
dtype = get_dtype(df, dataset)
# convert temporal columns
for column_name, sqla_type in dtype.items():
if isinstance(sqla_type, (Date, DateTime)):
df[column_name] = pd.to_datetime(df[column_name])
# reuse session when loading data if possible, to make import atomic
if example_database.sqlalchemy_uri == current_app.config.get(
"SQLALCHEMY_DATABASE_URI"
) or not current_app.config.get("SQLALCHEMY_EXAMPLES_URI"):
logger.info("Loading data inside the import transaction")
connection = session.connection()
else:
logger.warning("Loading data outside the import transaction")
connection = example_database.get_sqla_engine()
df.to_sql(
dataset.table_name,
con=connection,
schema=dataset.schema,
if_exists="replace",
chunksize=CHUNKSIZE,
dtype=dtype,
index=False,
method="multi",
)
| [
"noreply@github.com"
] | noreply@github.com |
cc693a128105938fd7647af5527a511855e80f4c | abcfd07772ce75f34e51592189c29cf84d1a3611 | /flask/lib/python3.6/site-packages/whoosh/util/times.py | aded78f78b30e1d657ebea53c144a87c93bb6d1c | [] | no_license | yuhaihui3435/p_mc | 66d89bcccf214e53729b26a0f80ddee8797e9e3e | 3039a5c691b649fc88e941a2553b1a7e0aac2a0a | refs/heads/master | 2021-06-28T18:52:00.111385 | 2017-09-15T00:26:02 | 2017-09-15T00:26:58 | 103,524,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,948 | py | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import calendar
import copy
from datetime import date, datetime, timedelta
from whoosh.compat import iteritems
class TimeError(Exception):
pass
def relative_days(current_wday, wday, dir):
"""Returns the number of days (positive or negative) to the "next" or
"last" of a certain weekday. ``current_wday`` and ``wday`` are numbers,
i.e. 0 = monday, 1 = tuesday, 2 = wednesday, etc.
>>> # Get the number of days to the next tuesday, if today is Sunday
>>> relative_days(6, 1, 1)
2
:param current_wday: the number of the current weekday.
:param wday: the target weekday.
:param dir: -1 for the "last" (past) weekday, 1 for the "next" (future)
weekday.
"""
if current_wday == wday:
return 7 * dir
if dir == 1:
return (wday + 7 - current_wday) % 7
else:
return (current_wday + 7 - wday) % 7 * -1
def timedelta_to_usecs(td):
total = td.days * 86400000000 # Microseconds in a day
total += td.seconds * 1000000 # Microseconds in a second
total += td.microseconds
return total
def datetime_to_long(dt):
"""Converts a datetime object to a long integer representing the number
of microseconds since ``datetime.min``.
"""
return timedelta_to_usecs(dt.replace(tzinfo=None) - dt.min)
def long_to_datetime(x):
"""Converts a long integer representing the number of microseconds since
``datetime.min`` to a datetime object.
"""
days = x // 86400000000 # Microseconds in a day
x -= days * 86400000000
seconds = x // 1000000 # Microseconds in a second
x -= seconds * 1000000
return datetime.min + timedelta(days=days, seconds=seconds, microseconds=x)
# Ambiguous datetime object
class adatetime(object):
"""An "ambiguous" datetime object. This object acts like a
``datetime.datetime`` object but can have any of its attributes set to
None, meaning unspecified.
"""
units = frozenset(("year", "month", "day", "hour", "minute", "second",
"microsecond"))
def __init__(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None):
if isinstance(year, datetime):
dt = year
self.year, self.month, self.day = dt.year, dt.month, dt.day
self.hour, self.minute, self.second = dt.hour, dt.minute, dt.second
self.microsecond = dt.microsecond
else:
if month is not None and (month < 1 or month > 12):
raise TimeError("month must be in 1..12")
if day is not None and day < 1:
raise TimeError("day must be greater than 1")
if (year is not None and month is not None and day is not None
and day > calendar.monthrange(year, month)[1]):
raise TimeError("day is out of range for month")
if hour is not None and (hour < 0 or hour > 23):
raise TimeError("hour must be in 0..23")
if minute is not None and (minute < 0 or minute > 59):
raise TimeError("minute must be in 0..59")
if second is not None and (second < 0 or second > 59):
raise TimeError("second must be in 0..59")
if microsecond is not None and (microsecond < 0
or microsecond > 999999):
raise TimeError("microsecond must be in 0..999999")
self.year, self.month, self.day = year, month, day
self.hour, self.minute, self.second = hour, minute, second
self.microsecond = microsecond
def __eq__(self, other):
if not other.__class__ is self.__class__:
if not is_ambiguous(self) and isinstance(other, datetime):
return fix(self) == other
else:
return False
return all(getattr(self, unit) == getattr(other, unit)
for unit in self.units)
def __repr__(self):
return "%s%r" % (self.__class__.__name__, self.tuple())
def tuple(self):
"""Returns the attributes of the ``adatetime`` object as a tuple of
``(year, month, day, hour, minute, second, microsecond)``.
"""
return (self.year, self.month, self.day, self.hour, self.minute,
self.second, self.microsecond)
def date(self):
return date(self.year, self.month, self.day)
def copy(self):
return adatetime(year=self.year, month=self.month, day=self.day,
hour=self.hour, minute=self.minute, second=self.second,
microsecond=self.microsecond)
def replace(self, **kwargs):
"""Returns a copy of this object with the attributes given as keyword
arguments replaced.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.replace(year=2010)
(2010, 10, 31, None, None, None, None)
"""
newadatetime = self.copy()
for key, value in iteritems(kwargs):
if key in self.units:
setattr(newadatetime, key, value)
else:
raise KeyError("Unknown argument %r" % key)
return newadatetime
def floor(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their lowest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 1, 0, 0, 0, 0)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 1
if d is None:
d = 1
if h is None:
h = 0
if mn is None:
mn = 0
if s is None:
s = 0
if ms is None:
ms = 0
return datetime(y, m, d, h, mn, s, ms)
def ceil(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their highest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 30, 23, 59, 59, 999999)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 12
if d is None:
d = calendar.monthrange(y, m)[1]
if h is None:
h = 23
if mn is None:
mn = 59
if s is None:
s = 59
if ms is None:
ms = 999999
return datetime(y, m, d, h, mn, s, ms)
def disambiguated(self, basedate):
"""Returns either a ``datetime`` or unambiguous ``timespan`` version
of this object.
Unless this ``adatetime`` object is full specified down to the
microsecond, this method will return a timespan built from the "floor"
and "ceil" of this object.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.disambiguated()
timespan(datetime(2009, 10, 31, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
dt = self
if not is_ambiguous(dt):
return fix(dt)
return timespan(dt, dt).disambiguated(basedate)
# Time span class
class timespan(object):
"""A span of time between two ``datetime`` or ``adatetime`` objects.
"""
def __init__(self, start, end):
"""
:param start: a ``datetime`` or ``adatetime`` object representing the
start of the time span.
:param end: a ``datetime`` or ``adatetime`` object representing the
end of the time span.
"""
if not isinstance(start, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % start)
if not isinstance(end, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % end)
self.start = copy.copy(start)
self.end = copy.copy(end)
def __eq__(self, other):
if not other.__class__ is self.__class__:
return False
return self.start == other.start and self.end == other.end
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.start, self.end)
def disambiguated(self, basedate, debug=0):
"""Returns an unambiguous version of this object.
>>> start = adatetime(year=2009, month=2)
>>> end = adatetime(year=2009, month=10)
>>> ts = timespan(start, end)
>>> ts
timespan(adatetime(2009, 2, None, None, None, None, None), adatetime(2009, 10, None, None, None, None, None))
>>> td.disambiguated(datetime.now())
timespan(datetime(2009, 2, 28, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
# - If year is in start but not end, use basedate.year for end
# -- If year is in start but not end, but startdate is > basedate,
# use "next <monthname>" to get end month/year
# - If year is in end but not start, copy year from end to start
# - Support "next february", "last april", etc.
start, end = copy.copy(self.start), copy.copy(self.end)
start_year_was_amb = start.year is None
end_year_was_amb = end.year is None
if has_no_date(start) and has_no_date(end):
# The start and end points are just times, so use the basedate
# for the date information.
by, bm, bd = basedate.year, basedate.month, basedate.day
start = start.replace(year=by, month=bm, day=bd)
end = end.replace(year=by, month=bm, day=bd)
else:
# If one side has a year and the other doesn't, the decision
# of what year to assign to the ambiguous side is kind of
# arbitrary. I've used a heuristic here based on how the range
# "reads", but it may only be reasonable in English. And maybe
# even just to me.
if start.year is None and end.year is None:
# No year on either side, use the basedate
start.year = end.year = basedate.year
elif start.year is None:
# No year in the start, use the year from the end
start.year = end.year
elif end.year is None:
end.year = max(start.year, basedate.year)
if start.year == end.year:
# Once again, if one side has a month and day but the other side
# doesn't, the disambiguation is arbitrary. Does "3 am to 5 am
# tomorrow" mean 3 AM today to 5 AM tomorrow, or 3am tomorrow to
# 5 am tomorrow? What I picked is similar to the year: if the
# end has a month+day and the start doesn't, copy the month+day
# from the end to the start UNLESS that would make the end come
# before the start on that day, in which case use the basedate
# instead. If the start has a month+day and the end doesn't, use
# the basedate.
start_dm = not (start.month is None and start.day is None)
end_dm = not (end.month is None and end.day is None)
if end_dm and not start_dm:
if start.floor().time() > end.ceil().time():
start.month = basedate.month
start.day = basedate.day
else:
start.month = end.month
start.day = end.day
elif start_dm and not end_dm:
end.month = basedate.month
end.day = basedate.day
if floor(start).date() > ceil(end).date():
# If the disambiguated dates are out of order:
# - If no start year was given, reduce the start year to put the
# start before the end
# - If no end year was given, increase the end year to put the end
# after the start
# - If a year was specified for both, just swap the start and end
if start_year_was_amb:
start.year = end.year - 1
elif end_year_was_amb:
end.year = start.year + 1
else:
start, end = end, start
start = floor(start)
end = ceil(end)
if start.date() == end.date() and start.time() > end.time():
# If the start and end are on the same day, but the start time
# is after the end time, move the end time to the next day
end += timedelta(days=1)
return timespan(start, end)
# Functions for working with datetime/adatetime objects
def floor(at):
if isinstance(at, datetime):
return at
return at.floor()
def ceil(at):
if isinstance(at, datetime):
return at
return at.ceil()
def fill_in(at, basedate, units=adatetime.units):
"""Returns a copy of ``at`` with any unspecified (None) units filled in
with values from ``basedate``.
"""
if isinstance(at, datetime):
return at
args = {}
for unit in units:
v = getattr(at, unit)
if v is None:
v = getattr(basedate, unit)
args[unit] = v
return fix(adatetime(**args))
def has_no_date(at):
"""Returns True if the given object is an ``adatetime`` where ``year``,
``month``, and ``day`` are all None.
"""
if isinstance(at, datetime):
return False
return at.year is None and at.month is None and at.day is None
def has_no_time(at):
"""Returns True if the given object is an ``adatetime`` where ``hour``,
``minute``, ``second`` and ``microsecond`` are all None.
"""
if isinstance(at, datetime):
return False
return (at.hour is None and at.minute is None and at.second is None
and at.microsecond is None)
def is_ambiguous(at):
"""Returns True if the given object is an ``adatetime`` with any of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return any((getattr(at, attr) is None) for attr in adatetime.units)
def is_void(at):
"""Returns True if the given object is an ``adatetime`` with all of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return all((getattr(at, attr) is None) for attr in adatetime.units)
def fix(at):
"""If the given object is an ``adatetime`` that is unambiguous (because
all its attributes are specified, that is, not equal to None), returns a
``datetime`` version of it. Otherwise returns the ``adatetime`` object
unchanged.
"""
if is_ambiguous(at) or isinstance(at, datetime):
return at
return datetime(year=at.year, month=at.month, day=at.day, hour=at.hour,
minute=at.minute, second=at.second,
microsecond=at.microsecond)
| [
"125227112@qq.com"
] | 125227112@qq.com |
3c156345bff5b954c74eae0c7acbc51069a76b7c | 67af82e4e3e654fd59dc8e14479907c084d68e4b | /LeetCode-Python/677. Map Sum Pairs.py | 638b4bab681c819d3d5830cb2219593a060b123b | [] | no_license | KaranJaswani/Codes | 6ba3f90cc3a0908ccfa4cf4c9b4794f5de6c63a6 | 0a833b8f666385500de5a55731b1a5590827b207 | refs/heads/master | 2020-05-30T07:12:37.383960 | 2017-12-30T14:06:45 | 2017-12-30T14:06:45 | 68,880,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | class TrieNode(object):
def __init__(self, char, value):
self.character = char
self.value = value
self.isWord = False
self.children = [None] * 26
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode('', 0)
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
current = self.root
isPresent = self.search(key)
for char in key:
pos = ord(char) - ord('a')
if current.children[pos] == None:
current.children[pos] = TrieNode(char, val)
else:
if isPresent:
current.children[pos].value = val
else:
current.children[pos].value += val
current = current.children[pos]
current.isWord = True
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
node = self.root
for char in word:
pos = ord(char) - ord('a')
if node.children[pos] == None:
return False
else:
node = node.children[pos]
return node.isWord
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
current = self.root
for char in prefix:
pos = ord(char) - ord('a')
if current != None:
current = current.children[pos]
else:
return 0
return current.value if current != None else 0
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix) | [
"karanjaswani123@gmail.com"
] | karanjaswani123@gmail.com |
a2098e532178626c84dfc0a6903f249677bf9452 | 66c959e86bc9e44134f905411dcfca772111727c | /read_serial.py | e2542d445cb1b428315d864e69adf5d445cdb4a1 | [] | no_license | Rohan1997/Heat-Map | 18a71c258e6da0c2995b77ae3b2bd59bc7380402 | ba1f1b3cd7dfe8bc20c6716182e9bbb338dc706f | refs/heads/master | 2020-03-09T17:51:54.515814 | 2018-04-21T12:19:44 | 2018-04-21T12:19:44 | 128,918,123 | 0 | 0 | null | 2018-04-10T12:12:20 | 2018-04-10T10:53:23 | Python | UTF-8 | Python | false | false | 2,707 | py | # Basic working code for reading data directly into python
import serial
import time
# This port address is for the serial tx/rx pins on the GPIO header
SERIAL_PORT = '/dev/rfcomm0'
# Set this to the rate of communication between PC and HC-05
SERIAL_RATE = 115200
def char_to_float(units, tens, hundreds, thousands):
number = 1000*thousands + 100*hundreds + 10*tens + units
return number
def main():
ser = serial.Serial(SERIAL_PORT, SERIAL_RATE)
while True:
reading = ser.readline().decode('utf-8')
# reading becomes a string of the form "0121\n0345\n0004\n0112\n0812\n"
# reading is a string...do whatever you want from here
print(reading)
for i in range(9):
read = char_to_float(reading[3+i*4], reading[2+i*4], reading[1+i*4], reading[0+i*4])
print(read, " ",i)
inputdata[i] = read
#count = (count + 1)%9
P1 = np.zeros((height,width))
amplitude = inputdata
for i in range(n1):
P1 = P1 + amplitude[i]*np.exp( (-(X-xMean[i])**2)/(kx[i]*kx[i]) -((Y-yMean[i])**2)/(ky[i]*ky[i]) )
P1[img>30] = 0
x = X.ravel()
y = Y.ravel()
z = P1.ravel()
plt.hexbin(x, y, C=z, gridsize=gridsize, cmap=CM.jet, bins=None)
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.pause(0.01)
plt.draw()
plt.pause(1)
time.sleep(3)
if __name__ == "__main__":
count = 0
inputdata = np.zeros(9)
img = cv2.imread('left.png',0)
im = Image.open('left.png')
width, height = im.size
print(width,height)
x = np.linspace(0, width, width)
y = np.linspace(0, height, height)
X, Y = np.meshgrid(x, y) #Generate x,y matrix
n1 = 9
xMean = [72 ,120 ,192 ,188 ,107 ,201 ,135 ,199 ,177 ]
yMean = [122 ,75 ,54 ,150 ,188 ,251 ,320 ,366 ,412 ]
kx = [60 ,70 ,70 ,90 ,70 ,70 ,46 ,45 ,90]
ky = [60 ,70 ,70 ,90 ,70 ,70 ,46 ,45 ,90]
kMean = [60 ,70 ,70 ,90 ,70 ,70 ,46 ,45 ,90]
amplitude = [200 ,900 ,800 ,150 ,850 ,742 ,249 ,245 ,900 ]
P1 = np.zeros((height,width))
for i in range(n1):
P1 = P1 + amplitude[i]*np.exp( (-(X-xMean[i])**2)/(kx[i]*kx[i]) -((Y-yMean[i])**2)/(ky[i]*ky[i]) )
P1[img>30] = 0
x = X.ravel()
y = Y.ravel()
z = P1.ravel()
gridsize=200
plt.subplot(111)
plt.hexbin(x, y, C=z, gridsize=gridsize, cmap=CM.jet, bins=None)
plt.axis([x.min(), x.max(), y.min(), y.max()])
cb = plt.colorbar()
cb.set_label('mean value')
plt.ion()
plt.pause(0.01)
plt.draw()
main() | [
"noreply@github.com"
] | noreply@github.com |
37d16ddeb7663fc42eb684c9fd238cc1286dc69c | 6c202bfadef2d80242567be70e2437e384a96b20 | /IO/IO.py | b7ef9cb9e50b01ac979792f2d15848dbfe3327fd | [] | no_license | hoylemd/Python | 257acecc1bc6c0e21d7c9bffc2d3b8861add9dab | 39e3863c87d83883d15e5db39a5fd5ce605b6ebb | refs/heads/master | 2018-12-28T20:33:16.497465 | 2012-10-15T02:44:36 | 2012-10-15T02:44:36 | 1,898,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | # read a string from the terminal
str = raw_input()
# print it back out!
print str | [
"hoylemd@gmail.com"
] | hoylemd@gmail.com |
3c07bcfba5b64326ebbc13e40028b85a23b263a6 | 0d0fc8fbe57a2ea04e4c8b1193f7a222b4885c0d | /guppypy/x86_64.py | 28359221129c6d2386d71238d824fe5c16d3443b | [
"MIT"
] | permissive | alexpatel/guppypy | 350ccabfe3419e381fb8ca12aaa853958c2fd09a | 99e8cee2bceaa39922dfef047ae631e5e6cac941 | refs/heads/master | 2021-01-01T17:11:16.838570 | 2017-08-01T00:16:07 | 2017-08-01T00:16:07 | 98,016,177 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | from pysmt.shortcuts import Symbol, Int, And, GE, LE, LT
from pysmt.typing import INT
# x86_64 BarrelfishOS user-space system call argument conventions
machine_description = lambda regs: And(
# each register in regs has to be loaded from user stack
And(reg.has_valid_stack_slot() for reg in regs.values()),
# x86 syscall callframe load dependency graph
regs['%rcx'].precedes(regs['%r11']),
regs['%r11'].precedes(regs['%rbx']),
regs['%rbx'].precedes(regs['%rbp']),
regs['%rbp'].precedes(regs['%rax']),
regs['%rax'].precedes(regs['%r15']),
regs['%rax'].precedes(regs['%r15']),
regs['%r15'].precedes(regs['%r14']),
regs['%r14'].precedes(regs['%r13']),
regs['%r13'].precedes(regs['%r12']),
regs['%r12'].precedes(regs['%r9']),
regs['%r9'].precedes(regs['%r8']),
regs['%r8'].precedes(regs['%r10']),
)
class X86StackRegister(object):
"""
An X86StackRegister is a logical atom. It stores a mapping a from x86_64
register name to an slot index into the user-space stack arguments loaded into
the kernel stack in a system call handler.
"""
stack = range(0, 12)
register_names = [
'%r10', '%r11', '%r12', '%r13', '%r14', '%r15', '%r8', '%r9', '%rax', '%rbp',
'%rbx', '%rcx'
]
def __init__(self, name):
assert name in X86StackRegister.register_names
self.name = name
self.sym = Symbol(name, INT)
# condition: is a register that needs to be loaded from user-space stack
def has_valid_stack_slot(self):
return And(GE(self.sym, Int(min(self.stack))),
LE(self.sym, Int(max(self.stack))))
# condition: needs to be loaded from user-space stack before register
def precedes(self, register):
return LT(self.sym, register.sym)
def pushq(self):
return ' pushq %s' % (self.name)
@classmethod
def get_stack_registers(cls):
return {name: cls(name) for name in cls.register_names}
def get_stack_index(self, model):
return model.get_py_value(self.sym)
@classmethod
def pushqs(cls, registers):
asm = '\n'.join([reg.pushq() for reg in registers])
block = '/* SYNTHESIZED_%s */\n'
asm = block % 'START' + asm + '\n' + block % 'END'
print ' ' + asm.replace('\n', '\n ')
return asm
| [
"patelalex02@gmail.com"
] | patelalex02@gmail.com |
05a3ec96169afb4787eb493000b49e279c8e524a | b6fe842749ca288b5e7f048c149b04f035f62b93 | /mydb/pymongo_sort.py | 04d1b772002037998bc33da0a68299c6aeb45b5f | [] | no_license | zxcvbnm123xy/leon_python | c8fa74dd0186402b9edf7466f9a28a6fa586b17c | b68f5d5e8240c5a99ec7c155fb85f816ac0d54d1 | refs/heads/master | 2020-03-27T03:03:43.610701 | 2018-11-14T09:17:15 | 2018-11-14T09:17:15 | 145,836,887 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # 加载依赖
import pymongo
# 创建连接
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
# 定位到数据库mongo_python和集合sites
##use mongo_python
mp = myclient["mongo_python"]
sites = mp["sites"]
#按照alexa降序,name升序 db.sites.find().sort({"alexa": -1, "name": 1})
siteList = sites.find({}, {"_id": 0, "name": 1, "alexa": 1})\
.sort([("alexa", pymongo.DESCENDING), \
("name", pymongo.ASCENDING)])
for site in siteList:
print(site)
| [
"737878501@qq.com"
] | 737878501@qq.com |
2912d2a93ef0093a5fc03526ab6241d7cb22a290 | 5f4a544e4903bc8fcfcd67e18a8a615e76b36974 | /dog.py | 5bb3af7c0a7e6898df925d697416d2ed0d61731b | [] | no_license | tkkohei/hangman | a6d4fbbccd0c5a2c4488206649cf662a04c13dac | 6e4d20366fae171fd5fa8391c93ceceedd29eebb | refs/heads/master | 2020-09-07T00:08:23.779349 | 2019-11-10T03:39:28 | 2019-11-10T03:39:28 | 220,597,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,136 | py | class Dog:
def __init__(self, name, age, weight):
self.name = name
self.age = age
self.weight = weight
def bark(self):
if self.weight > 29:
print(self.name, 'says "WOOF WOOF"')
else:
print(self.name, 'says "woof woof"')
def human_years(self):
return 7 * self.age
def walk(self):
print(self.name, 'is walking')
def __str__(self):
return "I'm a dog named " + self.name
def print_dog(dog):
print(dog.name + "'s", 'age is', dog.age,
'and weight is', dog.weight)
class ServieceDog(Dog):
def __init__(self, name, age, weight, handler):
Dog.__init__(self, name, age, weight)
self.hanlder = handler
self.is_working = False
def walk(self):
if self.is_working:
print(self.name, 'is helping its handler', self.hanlder, 'walk')
else:
Dog.walk(self)
def bark(self):
if self.is_working:
print(self.name, 'says, "I can\'t bark, I\'m working')
else:
Dog.bark(self)
class Frisbee:
def __init__(self, color):
self.color = color
def __str__(self):
return "I'm a " + self.color + ' frisbee'
class FrisbeeDog(Dog):
def __init__(self, name, age, weight):
Dog.__init__(self, name, age, weight)
self.frisbee = None
def bark(self):
if self.frisbee:
print("I can't bark, I have a frisbee in my mouth.")
else:
Dog.bark(self)
def walk(self):
if self.frisbee:
print("I can't walk")
else:
Dog.walk(self)
def catch(self, frisbee):
self.frisbee = frisbee
print(self.name, 'caught', frisbee.color, 'frisbee.')
def give(self):
if self.frisbee:
frisbee = self.frisbee
self.frisbee = None
print('Give the frisbee')
return frisbee
else:
print('Have no frisbee')
return None
def __str__(self):
msg = "I'm a dog named " + self.name
if self.frisbee:
msg += ' and I have a frisbee'
return msg
class Hotel:
def __init__(self, name):
self.name = name
# self.kernel_names = []
# self.kernel_dogs = []
self.kernel = {}
def check_in(self, dog):
if isinstance(dog, Dog):
# self.kernel_names.append(dog.name)
# self.kernel_dogs.append(dog)
self.kernel[dog.name] = dog
print(dog.name, 'check in')
else:
print('Sorry, only dogs are allowed into')
def check_out(self, name):
# for i in range(0, len(self.kernel_names)):
# if name == self.kernel_names[i]:
# dog = self.kernel_dogs[i]
# del self.kernel_names[i]
# del self.kernel_dogs[i]
# print(dog.name + 'ckecked out')
# return dog
if name in self.kernel:
dog = self.kernel[name]
print(dog.name + ' ckecked out')
del self.kernel[name]
return dog
print('Sorry,', name, ' is not boarding at', self.name)
return None
def barktime(self):
for dog_name in self.kernel:
dog = self.kernel[dog_name]
dog.bark()
# def walking_service(self):
# for dog_name in self.kernel:
# dog = self.kernel[dog_name]
# dog.walk()
def hire_walker(self, walker):
if isinstance(walker, DogWalker):
self.walker = walker
else:
print('Sorry,', walker.name, ' is not a Dog Walker')
def walking_service(self):
if self.walker:
self.walker.walk_the_dogs(self.kernel)
class Cat:
def __init__(self, name):
self.name = name
def meow(self):
print(self.name, 'Says, Meow')
class Person:
def __init__(self, name):
self.name = name
def __str__(self):
return "I'm a person and my name is " + self.name
class DogWalker(Person):
def __init__(self, name):
Person.__init__(self, name)
def walk_the_dogs(self, dogs):
for dog_name in dogs:
dogs[dog_name].walk()
def test_code():
# codie = Dog('Codie', 12, 38)
# jackson = Dog('Jackson', 9, 12)
# #print_dog(codie)
# #print_dog(jackson)
# print(codie.name + "'s age in human years is ", codie.human_years())
# print(jackson.name + "'s age in human years is ", jackson.human_years())
# dude = FrisbeeDog('Dude', 5, 20)
# blue_frisbee = Frisbee('blue')
# print(dude)
# dude.bark()
# dude.catch(blue_frisbee)
# dude.bark()
# print(dude)
# frisbee = dude.give()
# print(frisbee)
# print(dude)
codie = Dog('Codie', 12, 38)
jackson = Dog('Jackson', 9, 12)
sparky = Dog('Sparky', 2, 14)
rody = ServieceDog('Rody', 8, 38, 'Joseph')
rody.is_working = True
frisbee = Frisbee('red')
dude = FrisbeeDog('Dude', 5, 20)
# dude.catch(frisbee)
# kitty = Cat('Kitty')
hotel = Hotel('Doggie Hotel')
hotel.check_in(codie)
hotel.check_in(jackson)
hotel.check_in(rody)
hotel.check_in(dude)
# hotel.check_in(kitty)
# dog = hotel.check_out(codie.name)
# print('Checked out', dog.name, 'who is', dog.age, 'and', dog.weight, 'lbs')
# dog = hotel.check_out(jackson.name)
# print('Checked out', dog.name, 'who is', dog.age, 'and', dog.weight, 'lbs')
# dog = hotel.check_out(rody.name)
# print('Checked out', dog.name, 'who is', dog.age, 'and', dog.weight, 'lbs')
# dog = hotel.check_out(dude.name)
# print('Checked out', dog.name, 'who is', dog.age, 'and', dog.weight, 'lbs')
# dog = hotel.check_out(sparky.name)
# print('Checked out', dog.name, 'who is', dog.age, 'and', dog.weight, 'lbs')
# hotel.barktime()
# codie.walk()
# jackson.walk()
# rody.walk()
# dude.walk()
joe = DogWalker('joe')
hotel.hire_walker(joe)
hotel.walking_service()
test_code() | [
"kenjimitana@gmail.com"
] | kenjimitana@gmail.com |
35cb7793738f6b54b9ee1b5787d61123bef9078c | e65a4dbfbfb0e54e59787ba7741efee12f7687f3 | /devel/py-plex/files/setup.py | 8dd6df636fa7769ec66d39646010d04752d975bc | [
"BSD-2-Clause"
] | permissive | freebsd/freebsd-ports | 86f2e89d43913412c4f6b2be3e255bc0945eac12 | 605a2983f245ac63f5420e023e7dce56898ad801 | refs/heads/main | 2023-08-30T21:46:28.720924 | 2023-08-30T19:33:44 | 2023-08-30T19:33:44 | 1,803,961 | 916 | 918 | NOASSERTION | 2023-09-08T04:06:26 | 2011-05-26T11:15:35 | null | UTF-8 | Python | false | false | 124 | py | from distutils.core import setup
setup(
name = 'plex',
version = "%%PORTVERSION%%",
packages = ['Plex'],
)
| [
"perky@FreeBSD.org"
] | perky@FreeBSD.org |
cc844affbd029334ae04e31e99055fa06ee0a668 | d7c9392aab11ffa4e94846198e4793015e22d48c | /ApproximatePatternCount.py | a45b6fc1e967aee52a73aecc9596717b4819e709 | [] | no_license | vk208/BMP | 46fb787339e53eb851b87baffa0b2f626ec5eff0 | 83cd09fb8d7ef463eb5b6cc6eefb564dc86a1bf8 | refs/heads/master | 2021-01-17T12:26:04.476703 | 2016-03-04T14:36:11 | 2016-03-04T14:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | __author__ = 'Mohammad Yousuf Ali, aliyyousuf@gmail.com, fb.com/aliyyousuf'
# Our goal now is to modify our previous algorithm for the Frequent Words Problem in order to find
# DnaA boxes by identifying frequent k-mers, possibly with mismatches. Given input strings Text and
# Pattern as well as an integer d, we extend the definition of PatternCount to the function
# ApproximatePatternCount(Pattern, Text, d). This function computes the number of occurrences of
# Pattern in Text with at most d mismatches. For example:
# Sample Input:
# GAGG
# TTTAGAGCCTTCAGAGG
# Sample Output:
# 2
def ApproximatePatternCount(Pattern, Text, d):
POS = 0
for i in range(0,(len(Text)-len(Pattern))+1):
dif = 0
for ii,ii2 in zip(Text[i:(len(Pattern)+i)],Pattern):
if ii != ii2:
dif += 1
if dif <= d:
POS += 1
return POS
| [
"aliyyousuf@gmail.com"
] | aliyyousuf@gmail.com |
2376aebcad3f0702f3a564e5846c0a36edd5a57b | 2bbf7151418e9272da01440626b6a6c1e6aa1944 | /code/utils/tools.py | 353f7fc3cf498c6e392805ca0c7b48332638a7fe | [] | no_license | aliparac/AIMOS | b458c3c213095601cb38ac5ab109413f57278ea8 | 579b239f9ce3650c8f924c034128bfe9e7f67bdf | refs/heads/master | 2023-03-16T08:13:39.109936 | 2021-01-26T11:04:14 | 2021-01-26T11:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,985 | py | import numpy as np
import torch
from torch.nn import functional as F
from utils import architecture
#%%
def choose_architecture(config):
if( config['architecture'] == 'Unet64'):
model = architecture.UNet64(num_classes = len(config["trainingCIDs"]) + 1)
elif(config['architecture'] == 'Unet128'):
model = architecture.UNet128(num_classes = len(config["trainingCIDs"]) + 1)
elif(config['architecture'] == 'Unet256'):
model = architecture.UNet256(num_classes = len(config["trainingCIDs"]) + 1)
elif(config['architecture'] == 'Unet512'):
model = architecture.UNet512(num_classes = len(config["trainingCIDs"]) + 1)
elif(config['architecture'] == 'Unet768'):
model = architecture.UNet768(num_classes = len(config["trainingCIDs"]) + 1)
elif(config['architecture'] == 'Unet1024'):
model = architecture.UNet1024(num_classes = len(config["trainingCIDs"]) + 1)
else:
raise ValueError("Model not implemented")
return model
class RunningAverage():
"""
A simple class that maintains the running average of a quantity
"""
def __init__(self):
self.steps = 0
self.total = 0
self.avg = None
def update(self, val):
self.total += val
self.steps += 1
self.avg = self.total/float(self.steps)
def __call__(self):
return self.avg
def get_metrics(vol_gt, vol_segmented, config):
num_classes = len(config['trainingCIDs']) + 1 # add +1 for BG class (CID = 0)
oneHotGT = np.zeros((num_classes,vol_gt.shape[0],vol_gt.shape[1],vol_gt.shape[2]))
oneHotSeg = np.zeros((num_classes,vol_gt.shape[0],vol_gt.shape[1],vol_gt.shape[2]))
metrics = {}
for classname in config['trainingCIDs'].keys():
CID = config['trainingCIDs'][classname]
oneHotGT[CID,:,:,:][np.where(vol_gt==CID)] = 1
oneHotSeg[CID,:,:,:][np.where(vol_segmented==CID)] = 1
metrics[classname] = {}
metrics[classname]["DICE"] = dice(oneHotGT[CID,:,:,:], oneHotSeg[CID,:,:,:])
return metrics
def dice(gt, seg):
"""
compute dice score
"""
eps = 0.0001
gt = gt.astype(np.bool)
seg = seg.astype(np.bool)
intersection = np.logical_and(gt, seg)
dice = 2 * (intersection.sum() + eps) / (gt.sum() + seg.sum() + eps)
return dice
def dice_loss(label, logits, eps=1e-7):
"""Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this case, we would like to maximize
the dice loss so we return the negated dice loss.
Args:
label: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
Taken from: https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py#L78
"""
num_classes = logits.shape[1]
true_1_hot = torch.eye(num_classes)[label.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, label.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps))
return [(1 - dice_loss.mean()), dice_loss.detach().cpu().numpy()]
def sigmoid(x):
'''
Exact Numpy equivalent for torch.sigmoid()
'''
y = 1/(1+np.exp(-x))
return y
def sortAbyB(listA, listB):
'''
sorted_listA = sortAbyB(listA, ListB)
Sorts list A by values of list B (alphanumerically)
'''
if(listB == sorted(listB)):
return listA # a) no sorting needed; b) also avoids error when all elements of A are identical
else:
return [a for _,a in sorted(zip(listB,listA))]
| [
"noreply@github.com"
] | noreply@github.com |
36ae4bf7acd0fbbd49bef683b159f3a04f225313 | 9c395cde750544f92eaaaa7e2c38981ee05c023b | /tetris.py | 7726ba1a31a3036f7d6e8e6f40654cf5400f8730 | [] | no_license | MukulBarai/Block-Puzzle | a4c39e703d7393b1dbade71a9067f4fbd3f5bd0f | eb16527ca159906bf688e9439c6bcc8baf6e0bc7 | refs/heads/master | 2020-12-21T20:44:09.937514 | 2020-01-29T10:13:02 | 2020-01-29T10:13:02 | 236,554,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,363 | py | import pygame
import time
import random
import copy
#Declaring Variables
col = 20
row = 40
blockW = 15
width = col * blockW
height = row * blockW
gameOver = False
matrices = []
#Variable for clock
startTime = time.time()
clockTime = 0.25
nTime = 0
#Initializing pygame
pygame.init()
window = pygame.display.set_mode((width, height))
pygame.display.update()
#Declaring Classes
class ShapeO:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
width = 2
height = 2
color = (52, 235, 207)
shape = [
[1, 1],
[1, 1]
]
class ShapeZ:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
color = (58, 235, 52)
width = 3
height = 2
shape = [
[1, 1, 0],
[0, 1, 1]
]
class ShapeS:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
width = 3
height = 2
color = (58, 235, 52)
shape = [
[0, 1, 1],
[1, 1, 0]
]
class ShapeI:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
width = 1
height = 4
color = (235, 52, 76)
shape = [
[1],
[1],
[1],
[1]
]
class ShapeL:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
width = 2
height = 3
color = (235, 52, 223)
shape = [
[1, 0],
[1, 0],
[1, 1]
]
class ShapeF:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
width = 2
height = 3
color = (235, 52, 223)
shape = [
[0, 1],
[0, 1],
[1, 1]
]
class ShapeT:
def __init__(self, xCor, yCor):
self.xCor = xCor
self.yCor = yCor
width = 3
height = 2
color = (235, 232, 52)
shape = [
[1, 1, 1],
[0, 1, 0]
]
random.seed(5850)
shapes = [ShapeZ, ShapeS, ShapeT, ShapeF, ShapeL, ShapeI, ShapeF, ShapeO]
#Single block
class Block:
def draw(self, xCor, yCor):
pygame.draw.rect(window, self.color,
(yCor*self.height, xCor*self.width, self.width, self.height)
)
width = blockW
height = blockW
fill = False
color = (0, 250, 0)
def newShape():
global shapes
shape = shapes[random.randrange(len(shapes))]
return shape(9, 0)
#Global shape variable
shape = newShape()
#Method for rotation
def getRotation(shape):
col = []
nShape = copy.deepcopy(shape)
for i in range(nShape.width):
row = []
for j in range(nShape.height):
row.append(nShape.shape[nShape.height-j-1][i])
col.append(row)
nShape.shape = col
nShape.width = shape.height
nShape.height = shape.width
nShape.xCor += round((nShape.height - nShape.width) / 2)
nShape.yCor -= round((nShape.height - nShape.width) / 2)
return nShape
def checkCollision(shape):
#Checking out of index
if shape.xCor <= -1:
return False
if shape.xCor + shape.width >= col:
return False
if shape.yCor + shape.height >= row:
return False
for i in range(shape.height):
for j in range(shape.width):
if shape.shape[i][j] and matrices[shape.yCor+i][shape.xCor+j].fill:
return False
return True
def rotate():
global shape
nShape = getRotation(shape)
if checkCollision(nShape):
shape = nShape
return
#Method for moving left and right
def checkLeft(shape):
global col
if shape.xCor - 1 <= -1:
return False
for i in range(shape.height):
for j in range(shape.width):
if shape.shape[i][j] and matrices[shape.yCor+i][shape.xCor+j-1].fill:
return False
return True
def checkRight(shape):
global col
if shape.xCor + shape.width >= col:
return False
for i in range(shape.height):
for j in range(shape.width):
if shape.shape[i][j] and matrices[shape.yCor+i][shape.xCor+j+1].fill:
return False
return True
def moveLeft(shape):
if checkLeft(shape):
shape.xCor -= 1
return
def moveRight(shape):
if checkRight(shape):
shape.xCor += 1
return
def checkBottom(shape):
global row
if shape.yCor + shape.height >= row:
return False
for i in range(shape.height):
for j in range(shape.width):
if shape.shape[i][j] and matrices[shape.yCor+i+1][shape.xCor+j].fill:
return False
return True
def goDown(shape):
shape.yCor += 1
return
def fillMatrices():
global shape
for i in range(shape.height):
for j in range(shape.width):
if shape.shape[i][j]:
matrices[shape.yCor+i][shape.xCor+j].color = shape.color
matrices[shape.yCor+i][shape.xCor+j].fill = True
shape = newShape()
def clearRow(row):
global matrices
for i in range(row, 0, -1):
for j in range(col):
matrices[i][j] = matrices[i-1][j]
return
def checkPoint():
for i in range(row):
for j in range(col):
if matrices[i][j].fill is False:
break
if j + 1 == col:
clearRow(i)
return
def drawShape(shape):
for i in range(shape.height):
for j in range(shape.width):
if shape.shape[i][j]:
pygame.draw.rect(window, shape.color,
(shape.xCor*blockW + j*blockW,
shape.yCor*blockW + i*blockW, blockW, blockW)
)
def draw():
#Drawing Background
pygame.draw.rect(window, (0, 0, 0), (0, 0, width, height))
global row, col
for i in range(row):
for j in range(col):
block = matrices[i][j]
if block.fill:
block.draw(i, j)
drawShape(shape)
for i in range(row):
pygame.draw.line(window, (17, 17, 18), (0, i*blockW), (col*blockW, i*blockW))
for i in range(col):
pygame.draw.line(window, (17, 17, 18), (i*blockW, 0), (i*blockW, row*blockW))
pygame.display.update()
#Fillig matrices with initial block
def initialize():
for i in range(row):
nRow = []
for j in range(col):
nRow.append(Block())
matrices.append(nRow)
def update():
global shape, clockTime
if checkBottom(shape):
goDown(shape)
else:
fillMatrices()
checkPoint()
clockTime = 0.25
return
def run():
global gameOver, nTime, startTime, clockTime
while gameOver is False:
startTime = time.time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
moveLeft(shape)
elif event.key == pygame.K_DOWN:
clockTime = 0.025
elif event.key == pygame.K_RIGHT:
moveRight(shape)
elif event.key == pygame.K_UP:
rotate()
#endif
draw()
timeNow = time.time()
nTime += timeNow - startTime
if nTime > clockTime:
update()
nTime = 0
pygame.quit()
quit()
#Running the game
initialize()
run() | [
"mukulbarai82@gmail.com"
] | mukulbarai82@gmail.com |
ec85b4f9323253ff48527e8e39cfd5d87818c3b9 | 2b01bc8e7af1cecbb9fcc19003e39cac34302824 | /backend/backend/settings.py | 60771b66ca8921eb1aa87be8d6b52d26174f3ec6 | [] | no_license | pushp1997/django-react-ToDo | 19db10806c1f84214fc0661cda4df40ede9af5a1 | b4347ab10b97860881028eebeb11a3eb68066f6e | refs/heads/master | 2020-07-06T14:31:37.702231 | 2019-08-19T14:10:18 | 2019-08-19T14:10:18 | 203,050,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm%!@x1g8qtbym)sqcd3&^_8^seic(rw1wyp)851u!(obwdr&%b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'todo',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# whitelisting localhost:3000 because that's where frontend will be served
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
) | [
"pushptyagi1@gmail.com"
] | pushptyagi1@gmail.com |
bf7f467480751bd795551a06f18da4ced2b45588 | bbba463ab46369d22adc35dd1a819d4c8a994973 | /preprocess.py | 5a0c2d376fdec3bfcf3176859f651b99d9a65a5d | [] | no_license | ajdapretnar/SegTra | f74a7c13cbe8569a87193f160d70fc91b205ebeb | 76747824283dbf2f674f39e5a0e7ef10d5191d19 | refs/heads/master | 2020-04-02T17:29:13.797909 | 2019-05-24T09:27:22 | 2019-05-24T09:27:22 | 154,659,337 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,343 | py | import re
import pickle
import json
import lemmagen
from lemmagen.lemmatizer import Lemmatizer
from nltk.tokenize import RegexpTokenizer
import ufal_udpipe as udpipe
def remove_comments(corpus):
regex = "\\((.*?)\\)"
corpus['Questions'] = corpus['Questions'].apply(lambda x: re.sub(regex, '',
x))
corpus['Answers'] = corpus['Answers'].apply(lambda x: re.sub(regex, '', x))
regex2 = "\\[(.*?)\\]"
corpus['Questions'] = corpus['Questions'].apply(lambda x: re.sub(regex2,
'', x))
corpus['Answers'] = corpus['Answers'].apply(lambda x: re.sub(regex2, '', x))
return corpus
def tokenize(corpus):
tokenizer = RegexpTokenizer(r'\w+')
return [[token.lower() for token in tokenizer.tokenize(doc)] for doc in
corpus]
def remove_stopwords(tokens):
stopwords = pickle.load(open('cache/stopwords.pkl', 'rb'))
stopwords.append('um')
return [[token for token in doc if token not in stopwords] for doc in tokens]
#temporary standardization
def standardize(tokens):
slovar = pickle.load(open('cache/slovar.pkl', 'rb'))
return [[slovar[token] if token in slovar else token for token in doc]
for doc in tokens]
def lemmatize(tokens):
lemmatizer = Lemmatizer(dictionary=lemmagen.DICTIONARY_SLOVENE)
return [[lemmatizer.lemmatize(token) for token in doc] for doc in tokens]
def pos_tag(tokens):
# noinspection PyTypeChecker
model = udpipe.Model.load("model/slovenian-ssj-ud-2.3-181115.udpipe")
output_format = udpipe.OutputFormat.newOutputFormat('epe')
tagged_tokens = []
for doc in tokens:
temp_doc = []
for token in doc:
sentence = udpipe.Sentence()
sentence.addWord(token)
model.tag(sentence, model.DEFAULT)
output = output_format.writeSentence(sentence)
output = json.loads(output)
temp_doc.append(
(output["nodes"][0]["form"], output["nodes"][0]["properties"]["upos"]))
tagged_tokens.append(temp_doc)
return tagged_tokens
def make_dict():
slovar = {}
with open('utils/slovenian-colloquial-dict.txt', 'r') as f:
for i in f.read().splitlines():
slovar[i.split(', ')[0]] = i.split(', ')[1]
pickle.dump(slovar, open('cache/slovar.pkl', 'wb'))
def make_stopwords():
with open('utils/slovenian-stopwords.txt', 'r') as f:
pickle.dump([i.strip(' ') for i in f.read().splitlines()],
open('cache/stopwords.pkl', 'wb'))
def preprocessing_pipeline(corpus):
corpus = remove_comments(corpus)
q_tokens = pos_tag(lemmatize(standardize(remove_stopwords(tokenize(
corpus['Questions'])))))
q_tokens = [[token for token, tag in doc if tag in ['NOUN', 'VERB']] for doc
in q_tokens]
a_tokens = pos_tag(lemmatize(standardize(remove_stopwords(tokenize(
corpus['Answers'])))))
a_tokens = [[token for token, tag in doc if tag in ['NOUN', 'VERB']] for doc
in a_tokens]
# tokens = [q + a for q, a in zip(q_tokens, a_tokens)]
pickle.dump(q_tokens, open('cache/q_tokens.pkl', 'wb'))
pickle.dump(a_tokens, open('cache/a_tokens.pkl', 'wb'))
return corpus, q_tokens, a_tokens
| [
"ajdapretnar@gmail.com"
] | ajdapretnar@gmail.com |
0d497de579e262500807394359bad38278397bee | 90ea49bb872623a0fc117632df0232f26e078033 | /redis_main.py | 6e22c3d257bc2ca5b18745dc8e70d73601aefcc6 | [
"MIT"
] | permissive | JX-Wang/Redis-servcie-T | e4612967a30c8c18ba5fa51aac91482e5f4f591a | 26005d0b15defa8628220512046aadc94765bd5b | refs/heads/master | 2020-06-17T04:39:28.779495 | 2019-07-09T12:35:38 | 2019-07-09T12:35:38 | 195,799,949 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # usr/bin/env python
# coding:utf-8
"""
redis Notes
============
Date@2019/7/9
Author@Wangjunxiong
"""
import redis
try:
r = redis.Redis(host="39.106.165.57", port=6379, db=0)
r.get("msg")
except Exception as e:
print "Connect Error as -> ", str(e)
| [
"1411349759@qq.com"
] | 1411349759@qq.com |
f6e16e606ad5e19332738646c1e75716288366be | a462e55dceb569502bb7c9e4460522af296f49b6 | /code/lisa-caffe-public/examples/new_lrcn/data_verify.py | c3391d21b202bd9d718f12b6a9597bc30dafdd2e | [
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-public-domain"
] | permissive | giserh/cluster_video | 1e83ff130cf80e409815ba5fe8e89f2715422591 | 8c5f4157b76f768138b7c19261d396cdf9122866 | refs/heads/master | 2021-01-24T12:36:18.183421 | 2017-03-13T01:22:43 | 2017-03-13T01:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import numpy as np
import scipy.io as io
import cv2 as cv
import sys, os
import caffe
import pdb
model_path = os.path.join( '/cs/vml2/xla193/cluster_video/output/UCF-101/snapshots_lstm_RGB/_iter_2500.caffemodel' )
print "Start loading network.."
# load net
net = caffe.Net('/cs/vml2/xla193/cluster_video/code/lisa-caffe-public/examples/new_lrcn/train_test_lstm_RGB.prototxt',
model_path,
caffe.TEST)
net.forward()
out = net.blobs['lstm1-drop'].data
lab = net.blobs['label'].data
pdb.set_trace()
print "Output shape: {}".format( str(out.shape) )
print "Output maximum: {}".format( str(out.max()) )
print "Output minimum: {}".format( str(out.min()) )
# result = Image.fromarray( (visual * 255).astype(np.uint8))
io.savemat('out.mat', {"data": visual} )
# result.save('out.png') | [
"xla193@sfu.ca"
] | xla193@sfu.ca |
dfab13a39ba70b6e52dbdf34305264365e154d8a | e4b7348387bbda5e4b44b69cfd015f52db892e5a | /Scripts/Workflow/make_1d_vmodel.py | 63f087d378942f64a32d6aaf8fb7d5f1f88e3624 | [] | no_license | calum-chamberlain/kaikoura-aftershocks | 9199e6949a08cf7cbd1f810faaf85325e86b736f | ec0ae1f100bc2e686f42c32fefeb18805d19ccfb | refs/heads/master | 2023-04-14T02:55:31.928872 | 2021-06-25T23:47:11 | 2021-06-25T23:47:11 | 310,416,172 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | """
Script to extract an average 1D model from Donna's NZ3D model.
"""
import numpy as np
import pandas as pd
MODEL_FILE = "vlnzw2p2dnxyzltln.tbl.txt" # NZ3D 2.2 from https://zenodo.org/record/3779523#.YCRFaOrRVhF
def extract_one_d(
min_x: float = 72.0,
max_x: float = 110.0,
min_y: float = -100.0,
max_y: float = 80.0,
) -> pd.DataFrame:
"""
Extract a one-d spatial average velocity model from NZ3D.
Parameters
----------
min_x:
Minimum X value in NZ3D co-ordinate system
max_x:
Maximum X value in NZ3D co-ordinate system
min_y:
Minimim Y value in NZ3D co-ordinate system
max_y:
Maximum Y value in NZ3D co-ordinate system
"""
v_model = pd.read_csv(MODEL_FILE, header=1, delim_whitespace=True)
x_mask = np.logical_and(
v_model["x(km)"] <= max_x, v_model["x(km)"] >= min_x)
y_mask = np.logical_and(
v_model["y(km)"] <= max_y, v_model["y(km)"] >= min_y)
mask = np.logical_and(x_mask, y_mask)
region = v_model[mask]
# Make a quick plot showing the region
bl = region[np.logical_and(region["x(km)"] == region["x(km)"].min(),
region["y(km)"] == region["y(km)"].min())]
br = region[np.logical_and(region["x(km)"] == region["x(km)"].min(),
region["y(km)"] == region["y(km)"].max())]
tl = region[np.logical_and(region["x(km)"] == region["x(km)"].max(),
region["y(km)"] == region["y(km)"].min())]
tr = region[np.logical_and(region["x(km)"] == region["x(km)"].max(),
region["y(km)"] == region["y(km)"].max())]
bl = (bl.Latitude.to_list()[0], bl.Longitude.to_list()[0])
br = (br.Latitude.to_list()[0], br.Longitude.to_list()[0])
tl = (tl.Latitude.to_list()[0], tl.Longitude.to_list()[0])
tr = (tr.Latitude.to_list()[0], tr.Longitude.to_list()[0])
plot_region(corners=[bl, tl, tr, br])
depths = sorted(list(set(region["Depth(km_BSL)"])))
# Get average vp and vs for each depth
vp, vs = [], []
for depth in depths:
vp.append((region[region["Depth(km_BSL)"] == depth]).Vp.mean())
vs.append((region[region["Depth(km_BSL)"] == depth]).Vs.mean())
out = pd.DataFrame(data={"Depth": depths, "vp": vp, "vs": vs})
return out
def plot_region(corners):
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
fig = plt.figure()
ax = fig.add_subplot(projection=ccrs.PlateCarree())
lats, lons = zip(*corners)
lats, lons = list(lats), list(lons)
ax.set_extent((min(lons) - 1, max(lons) + 1, min(lats) - 1, max(lats) + 1),
crs=ccrs.PlateCarree())
ax.coastlines()
lons.append(lons[0])
lats.append(lats[0])
ax.plot(lons, lats, transform=ccrs.PlateCarree())
ax.gridlines(draw_labels=True, dms=False, x_inline=False, y_inline=False)
plt.show()
if __name__ == "__main__":
vmodel = extract_one_d()
# Write to GrowClust format
with open("vzmodel.txt", "w") as f:
for _, row in vmodel.iterrows():
f.write(f"{row.Depth:5.1f} {row.vp:.2f} {row.vs:.2f}\n")
| [
"calum.chamberlain@vuw.ac.nz"
] | calum.chamberlain@vuw.ac.nz |
04e81eb61b7efc691cb29caabfe5ffa22242caf9 | 0d0ba103050607a7e4994ee5253140f3e9c13f6f | /8-5.py | 8c2870cec98c8f81bebb1d51d146a603532b999d | [] | no_license | yinyangguaiji/yygg-C1 | ac30d8ae5fd3a51985723acd527844860e658dd6 | 08ef81785a44715680b2f8543ac665424f8ce63b | refs/heads/master | 2023-03-12T09:25:22.747657 | 2021-02-19T07:24:33 | 2021-02-19T07:24:33 | 332,170,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | def describe_city(city,country='China'):
print(city + ' is in '+ country + '.')
describe_city('Chengdu')
describe_city('Beijing')
describe_city('London','Britain')
| [
"noreply@github.com"
] | noreply@github.com |
73be6368949f3dee451811616a66cda102ccc108 | abf3a3712cf12d1de4b2657e7dd38063d2f83534 | /ephys_functions.py | e226c6fab775ff3e3d34363aa91a2e2de9a52221 | [] | no_license | anupgp/ephys_analysis | 2876ca4371c5ced8b7d3db4e6c068bdf284b54b1 | 888ace8a440509de6007bd8e8cacdf6f0e5ddf81 | refs/heads/master | 2021-08-11T06:33:37.531018 | 2021-08-08T00:16:06 | 2021-08-08T00:16:06 | 246,890,724 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | import pandas as pd
# contains all the accessory functions for ephys data analysis
# ---------------------------------------------------------
# joins values from multiple columns into a single column
# input: dataframe and the columns to combine
# return: a single column with joined values from the other columns
def combine_columns(df,columns):
converted = pd.DataFrame()
for i in range(len(columns)):
if(df[columns[i]].dtype.kind in 'biufc'):
# print(columns[i],' is ', 'not string')
converted[columns[i]] = df[columns[i]].astype(int).astype(str)
else:
# print(columns[i],' is ', 'string')
converted[columns[i]] = df[columns[i]]
joined = converted[columns].agg('_'.join,axis=1)
joined=joined.rename("joined")
return(joined)
| [
"anupgp@gmail.com"
] | anupgp@gmail.com |
ed377733d23fb5c6cc2ea478a613ec2ca714d70f | 8ea915b38738ec717e2ac763786005877cfc0064 | /Practice/practice02.py | 7726d69d700c832ee7b92bfdac92b968faa4571a | [] | no_license | jimmy43333/Python_Learning | 374e3c2695200ab3020e423d02513a6866530adb | 01672e23838827ac77dcf1c49034bde015d14be5 | refs/heads/master | 2023-08-14T17:54:18.630258 | 2023-07-12T14:07:28 | 2023-07-12T14:10:07 | 104,288,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import random as rd
def helper(member_list, team_list):
# print(best_friend)
best_friend = "TK"
score = [ rd.randint(1, 100) for ele in member_list ]
score.sort(reverse=True)
print(score)
def is_team_member(who):
if who in team_list:
return (0, who)
return (1, who)
def judge():
print(score)
output = []
for m, s in zip(member_list, score):
if s - rd.randint(1, 10) > 60:
output.append(m)
return output
def update_best(choose_function):
# global best_friend
# nonlocal best_friend
new_best = choose_function()
if new_best:
print(new_best)
best_friend = new_best[0]
# member_list.sort()
member_list.sort(key=is_team_member)
update_best(judge)
print(score)
print(member_list)
return best_friend
# best_friend = "Cris"
# members = ["Ashelly", "Ben", "Simon", "Jimmy", "Bruce", "Vicky", "Terisa"]
# teams = ["Ashelly", "Ben", "Simon", "Jimmy"]
# choose = helper(members, teams)
# print(choose)
# print(best_friend)
def calculate():
try:
r = 100
# b = a -- 1
except Exception as e:
raise ValueError("Input valid") from e
else:
# b = a -- 2
r = 200
return r
def handle():
try:
r = calculate()
except ValueError as e:
print("Calculate Error")
r = 200
except Exception as e:
print("Exception")
return
else:
r += 10
print(r)
def outer(index):
flag = True
def inner():
flag = False
print(index)
print(a)
a = 10
inner()
print(flag)
a = 1
outer(5)
| [
"jimmy43333@gmail.com"
] | jimmy43333@gmail.com |
d7cb7e3d8b6e6939858dfba41a52bad6f3ce5423 | fc9dcc3d291f1c4f7e419cdf06ca1b56ec0fe94c | /Quiz_Age - 4.py | 6853b1d94ec828a607ef933220179bff0c9c5983 | [] | no_license | WilliamStacey/GeneralKnowledgeQuiz | d4d1020fb96ffc03b263ebbeae28be70cdf27f02 | 4a22fb0143b6e9bd3967c17a848b71fea87d74be | refs/heads/main | 2023-08-16T02:06:29.596529 | 2021-10-21T23:14:19 | 2021-10-21T23:14:19 | 357,341,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #ask age
def age():
age = ''
while True:
try:
age = int(input("Enter your age: "))
if age < 10:
print("You are too young to take this quiz.")
raise SystemExit
elif age > 18:
print("You are too old to take this quiz.")
raise SystemExit
break
except ValueError:
print("Please enter numbers only.")
age()
print("program complete")
| [
"noreply@github.com"
] | noreply@github.com |
a351f9a383b7908bc81f7ee0129447f3b5624fb0 | ff652d85c1b2a6a22cea6bbd84e5d9f96b43c64b | /Final Deliverables/Code/ResultsVisualizer.py | b679753a857d9e45de272e3cd3c6c4c59cf537d9 | [] | no_license | ironhide23586/Map-Matcher | a3b9ecb7103e9500473eafbc6d62dc44093a7ca2 | c838ed59db9c3987913a669bb7204505dd4640d4 | refs/heads/master | 2021-03-24T12:05:19.572010 | 2018-11-10T05:22:47 | 2018-11-10T05:22:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | import numpy as np
import matplotlib.pyplot as plt
import csv
def show_hist(arr, title_name, xlabel, show_plot=True):
hist, bins = np.histogram(arr, 30)
bins = bins[:-1].astype(np.int)
if show_plot == True:
ind = np.arange(1, bins.shape[0] + 1) # the x locations for the groups
maxs = hist
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects_mid = ax.bar(ind, maxs, .2, color='r')
ax.set_ylabel('Count')
ax.set_xlabel(xlabel)
ax.set_xticks(ind - .5*.2)
ax.set_xticklabels(bins)
plt.title(title_name)
plt.show()
return hist, bins
def get_last_col_from_csv(matched_points_csv_reader):
dist_from_links = np.array([float(row[-1]) for row in matched_points_csv_reader])
return dist_from_links
def visualize_distribution(fname, hist_title, xlabel):
file_fstream = open(fname, 'rb')
file_reader = csv.reader(file_fstream)
points = get_last_col_from_csv(file_reader)
hist_title += '\nNumber of data points = ' + str(points.shape[0])
show_hist(points, hist_title, xlabel)
file_fstream.close()
if __name__ == "__main__":
visualize_distribution('Partition6467MatchedPoints.csv',
'Probe-Point to link distance distribution in matched points result file',
'Distance from Link (in metres)')
visualize_distribution('Partition6467ComputedSlopes.csv',
'Slope (in degrees) distribution as computed from matched probes & links',
'Slope (in degrees)')
visualize_distribution('Partition6467LinkDataSlopes.csv',
'Slope (in degrees) distribution as obtained from provided Link Data file',
'Slope (in degrees)')
visualize_distribution('Partition6467ComputedVsLinkDataSlopeComparisons.csv',
'Distribution of the absolute difference between computed slopes (from matched probes) and actual slopes',
'|Actual Link Slope - Computed Slopes (from matched probes)| (in degrees)') | [
"sbiswas7@hawk.iit.edu"
] | sbiswas7@hawk.iit.edu |
da2897495af6a0514b4c1cc5803cd5a24e8c543c | 548b89cb1f7d9c953dedc61674525a0d1eadaf86 | /fdp/services.py | e79e13901564d42437bd4c42fdcd55299ab9ca95 | [
"BSD-2-Clause"
] | permissive | ScottishCovidResponse/fdp | c5a0b8ee8df9874f40fc6c20e376cc2fa1d410a8 | 5d67649048fffd19b9bf46e85545490001eb0b05 | refs/heads/main | 2023-04-25T00:24:01.565036 | 2021-05-13T15:57:57 | 2021-05-13T15:57:57 | 359,503,677 | 0 | 0 | BSD-2-Clause | 2021-05-21T13:59:29 | 2021-04-19T15:13:49 | Python | UTF-8 | Python | false | false | 865 | py | from pathlib import Path
import requests
from data_pipeline_api.registry.download import download_from_config_file
def registry_installed():
user_home = Path.home()
scrc_dir = user_home.joinpath(".scrc")
return scrc_dir.exists()
def registry_running():
try:
r = requests.get("http://localhost:8000/api?")
except requests.exceptions.ConnectionError:
return False
else:
if r.status_code == 200:
return True
else:
return False
def token():
"""
TODO: Use the registry's get_token endpoint for this
"""
with open("token.txt", "r") as file:
api_token = file.read()
return api_token
def download_data(config):
"""
Download any data required by read: from the remote data store.
"""
download_from_config_file(config, token())
pass
| [
"nathan.cummings@ukaea.uk"
] | nathan.cummings@ukaea.uk |
f5ece496682064b3b75d259ee4b9a085cfcd7867 | 3333ed6da323c4dc3a4dab7f6a82f5d84db53f62 | /quiz_tests/quiz_1.py | 1fc684cac42394b07352042560b453a45436e68e | [] | no_license | NateOwl1108/assignments | 4823d49852eb2dad62b11d6be502cdb1f60a85fd | 4238420f262bcb0fc39b0e80ed08e5ddf97a26d3 | refs/heads/master | 2023-05-09T18:14:59.726344 | 2021-06-19T21:46:09 | 2021-06-19T21:46:09 | 291,084,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | def separate_into_words(sentence):
return sentence.split(' ')
print(separate_into_words("look the dog ran fast"))
separate_into_words("look the dog ran fast")
def reverse_word_order(sentence):
words_list = separate_into_words(sentence)
reverse = words_list[::-1]
reverse_sentence = ''
reverse_sentence += reverse[0]
for index in range(1,len(reverse)):
reverse_sentence += " " + reverse[index]
print(reverse_sentence)
return reverse_sentence
assert reverse_word_order("look the dog ran fast") == "fast ran dog the look"
| [
"natedog112004@gmail.com"
] | natedog112004@gmail.com |
77e62c1c823d2937af521648a473b6f93b4731f7 | 2834298c6a50ff7cfada61fb028b9fd3fc796e85 | /desenvolvimento/programas/magic_square.py | bc855866757b1a2961b256809c6df378cdebb02e | [] | no_license | ernestojfcosta/IPRP_LIVRO_2013_06 | 73841c45d000dee7fc898279d4b10d008c039fd0 | a7bb48745ad2fbfeb5bd4bc334cb7203d8f204a4 | refs/heads/master | 2021-01-22T05:00:57.868387 | 2013-06-07T11:00:55 | 2013-06-07T11:00:55 | 10,548,127 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,291 | py | # =============
# magic_square_
# =============
#
# *Simple operations with magic squares.*
#
# Copyright (c) 2007 `Alec Mihailovs`_ <alec@mihailovs.com>
# All rights reserved. Licensed under the `MIT License`_ .
#
# .. _magic_square: http://mihailovs.com/Alec/Python/magic_square.html
#
# .. _`Alec Mihailovs`: http://mihailovs.com/Alec/
#
# .. _`MIT License`: http://opensource.org/licenses/mit-license.php
#
########################################################################
r"""
*Simple operations with magic squares.*
**Prerequisites:**
- NumPy_
**Functions:**
- `ismagic(A)` -- test whether *A* is a magic square.
- `magic(N)` -- create an *N* by *N* magic square.
- `magic_constant(A)` -- calculate the magic constant of *A*.
**Examples:**
>>> from magic_square import *
>>> print magic(3)
[[8 1 6]
[3 5 7]
[4 9 2]]
>>> magic_constant()
15
>>> ismagic(magic(4))
True
>>> magic_constant()
34
>>> magic_constant([1, 1, 1, 1])
2
>>> ismagic([[1, 2], [3, 4]])
False
**Notes:**
(1) Function `magic(N)` produces the same magic squares as Matlab
and Octave command ``magic(N)``. The speed of calculations for *N*
close to 1000 is about 100--200 times faster than in Octave.
(2) Integer arithmetic in NumPy_ is done modulo ``2**32``. That
can give a false positive in `ismagic(A)` for integer arrays with
overflowing in row, column, or diagonal sums. To avoid that and to
avoid wrong answers in `magic_constant(A)`, in such cases the
array's ``dtype`` should be changed to ``'int64'``, or if
``'int64'`` also overflows, to ``'object'``.
**Screenshots:**
That's how it looks in SAGE_:
|SAGE|
And that's how it looks in IDLE_:
|IDLE|
**Author:**
`Alec Mihailovs`_ <alec@mihailovs.com>
**Last Updated:**
February 22, 2007.
.. _NumPy: http://www.scipy.org/Download
.. _SAGE: http://sage.math.washington.edu/sage/
.. _IDLE: http://www.python.org/idle/
.. |SAGE| image:: sage.png
.. |IDLE| image:: idle.png
.. _`Alec Mihailovs`: http://mihailovs.com/Alec/
"""
__version__ = "0.2"
"""Development Status :: 3 - Alpha"""
__author__ = "Alec Mihailovs <alec@mihailovs.com>"
"""
`Alec Mihailovs`_ <alec@mihailovs.com>
.. _`Alec Mihailovs`: http://mihailovs.com/Alec/
"""
__docformat__ = "restructuredtext"
"""http://docutils.sourceforge.net/rst.html"""
from numpy import arange, asarray, flipud, r_, tile
from math import sqrt
_constant = None # to avoid an exception in magic_square._constant
"""Last calculated magic constant."""
def ismagic(A):
r"""
Test whether the given array is a magic square.
**Input:**
*A* -- 2D array, or a sequence that can be interpreted as such.
**Output:**
``bool`` or ``NotImplementedType`` -- ``True`` if *A* is a
magic square, ``NotImplemented`` if the number of dimensions of
*A* is not 2 or 1, or the size is not a perfect square in the
1D case, and ``False`` otherwise.
**Examples:**
>>> from magic_square import *
>>> ismagic(magic(3))
True
>>> ismagic([1, 1, 1, 1])
True
>>> ismagic([[8, 1, 6], [3, 5, 7], [4, 9, 2]])
True
>>> ismagic(1) # 0 dimensions
NotImplemented
>>> ismagic('[[1]]') # a string gives 0 dimensions
NotImplemented
>>> ismagic([[[1]]]) # 3 dimensions
NotImplemented
>>> ismagic(array([[1, 2], [3, 4]]))
False
**Notes:**
Integer arithmetic in NumPy_ is done modulo ``2**32`` as in the
following example:
>>> from numpy import array
>>> array([2**16])
array([65536])
>>> _*_
array([0])
That can give a false positive in `ismagic(A)` for integer
arrays with overflowing in row, column, or diagonal sums.
To avoid that, in such cases the array's ``dtype`` should be
changed to either ``'int64'``, or ``'object'``, see
`magic_constant(A)` Notes.
.. _NumPy: http://www.scipy.org/Download
"""
global _constant
_constant = None # may be commented out if desirable
a = asarray(A)
if a.ndim == 2:
m = flipud(a).trace()
t = (r_[a.sum(axis=0), a.sum(axis=1), a.trace()] == m).all()
if t == True: # not "is" because t is a NumPy boolean
_constant = m
return True # not "return t",
else: # to make sure that
return False # the return value is of the bool type
elif a.ndim == 1:
s = sqrt(a.size)
if a.size == s*s:
return ismagic(a.reshape(s,s))
else:
return NotImplemented
else:
return NotImplemented
def magic_constant(A=None):
r"""
Magic constant of the magic square.
**Input:**
*A* -- 2D array, or a sequence that can be interpreted as such.
If not entered, the last constructed `magic(n)` or last array
*A* tested in `ismagic(A)` is used.
**Output:**
``dtype`` of the array, or Python ``long``, or ``NoneType`` --
the magic constant if the array is a magic square, or ``None``
otherwise. Python ``long`` can occur if *A* is ``None`` and the
magic constant is calculated for the last constructed
`magic(n)` with large *n*.
**Examples:**
>>> from magic_square import *
>>> magic_constant([1, 1, 1, 1])
2
>>> print magic_constant([1, 2, 3, 4])
None
>>> ismagic(magic(6))
True
>>> magic_constant()
111
>>> a = magic(5000)
>>> magic_constant()
62500002500L
**Notes:**
Integer arithmetic in NumPy_ is done modulo ``2**32``. That
makes `magic_constant(A)` to return wrong answers for integer
arrays with overflowing in row, column, or diagonal sums. For
example,
>>> magic_constant(magic(5000))
-1924506940
>>> ismagic(magic(5000))
True
>>> magic_constant()
-1924506940
Note that
>>> 62500002500L % 2**32 == -1924506940 % 2**32
True
To avoid such wrong answers, the array's ``dtype`` can be
changed to ``'int64'``, or if ``'int64'`` also overflows, to
``'object'`` (that one significantly slows down the
calculations.) In this example,
>>> from numpy import array
>>> magic_constant(array(magic(5000), dtype='int64'))
62500002500
>>> magic_constant(array(magic(5000), dtype='object')) # long
62500002500L
.. _NumPy: http://www.scipy.org/Download
"""
if A is None or ismagic(A) is True: # avoiding NotImplemented
return _constant
def magic(N):
r"""
Create an *N* by *N* magic square.
**Input:**
*N* -- an integer in some form, may be float or quotted.
**Output:**
an ``'int32'`` *N* by *N* array -- the same magic square as in
Matlab and Octave ``magic(N)`` commands. In particular, the
Siamese method is used for odd *N* (but with a different
implementation.)
**Examples:**
>>> from magic_square import *
>>> magic(4)
array([[16, 2, 3, 13],
[ 5, 11, 10, 8],
[ 9, 7, 6, 12],
[ 4, 14, 15, 1]])
>>> magic_constant()
34
>>> magic(5.0) # can be float
array([[17, 24, 1, 8, 15],
[23, 5, 7, 14, 16],
[ 4, 6, 13, 20, 22],
[10, 12, 19, 21, 3],
[11, 18, 25, 2, 9]])
>>> print magic('6') # can be quotted
[[35 1 6 26 19 24]
[ 3 32 7 21 23 25]
[31 9 2 22 27 20]
[ 8 28 33 17 10 15]
[30 5 34 12 14 16]
[ 4 36 29 13 18 11]]
>>> magic(2) # consistent with Octave
Traceback (most recent call last):
TypeError: No such magic squares exist.
>>> magic(0)
array([], shape=(0, 0), dtype=int32)
>>> magic_constant() # the empty sum is 0
0
**Notes:**
The calculations for *n* close to 1000 are about 100--200
times faster than in Octave.
"""
global _constant
n = int(N)
if n < 0 or n == 2: # consistent with Octave
raise TypeError("No such magic squares exist.")
elif n%2 == 1:
m = n>>1
b = n*n + 1
_constant = n*b>>1
return (tile(arange(1,b,n),n+2)[m:-m-1].reshape(n,n+1)[...,1:]+
tile(arange(n),n+2).reshape(n,n+2)[...,1:-1]).transpose()
elif n%4 == 0:
b = n*n + 1
_constant = n*b>>1
d=arange(1, b).reshape(n, n)
d[0:n:4, 0:n:4] = b - d[0:n:4, 0:n:4]
d[0:n:4, 3:n:4] = b - d[0:n:4, 3:n:4]
d[3:n:4, 0:n:4] = b - d[3:n:4, 0:n:4]
d[3:n:4, 3:n:4] = b - d[3:n:4, 3:n:4]
d[1:n:4, 1:n:4] = b - d[1:n:4, 1:n:4]
d[1:n:4, 2:n:4] = b - d[1:n:4, 2:n:4]
d[2:n:4, 1:n:4] = b - d[2:n:4, 1:n:4]
d[2:n:4, 2:n:4] = b - d[2:n:4, 2:n:4]
return d
else:
m = n>>1
k = m>>1
b = m*m
d = tile(magic(m), (2,2)) # that changes the _constant
_constant = _constant*8 - n - m
d[:m, :k] += 3*b
d[m:,k:m] += 3*b
d[ k, k] += 3*b
d[ k, 0] -= 3*b
d[m+k, 0] += 3*b
d[m+k, k] -= 3*b
d[:m,m:n-k+1] += b+b
d[m:,m:n-k+1] += b
d[:m, n-k+1:] += b
d[m:, n-k+1:] += b+b
return d
##################################################################
# Python 2.5 (r25:51908, Sep 19 2006, 09:52:17) [MSC v.1310 32 bit
# (Intel)] on win32
#
# >>> from magic_square import *
# >>> from time import clock
# >>> t=clock(); a=magic(1000); clock()-t
# 0.0191592494101839
# >>> t=clock(); a=magic(1001); clock()-t
# 0.018718461322123403
# >>> t=clock(); a=magic(1002); clock()-t
# 0.027449660797152831
# >>> t=clock(); ismagic(a); clock()-t
# True
# 0.021589410496389405
#################################################################
# $ ipython
# Python 2.5 (r25:51908, Jan 11 2007, 22:47:00)
# IPython 0.7.3.svn -- An enhanced Interactive Python.
#
# In [1]: from magic_square import *
#
# In [2]: time a=magic(1000)
# CPU times: user 0.02 s, sys: 0.00 s, total: 0.02 s
# Wall time: 0.02
#
# In [3]: time a=magic(1001)
# CPU times: user 0.00 s, sys: 0.01 s, total: 0.01 s
# Wall time: 0.02
#
# In [4]: time a=magic(1002)
# CPU times: user 0.00 s, sys: 0.02 s, total: 0.02 s
# Wall time: 0.03
#
# In [5]: time ismagic(a)
# CPU times: user 0.01 s, sys: 0.00 s, total: 0.01 s
# Wall time: 0.02
################################################################
# $ octave
# GNU Octave, version 2.1.73 (i686-pc-cygwin).
#
# octave:1> t=cputime();a=magic(1000);cputime()-t
# ans = 2
# octave:2> t=cputime();a=magic(1001);cputime()-t
# ans = 4.1410
# octave:3> t=cputime();a=magic(1002);cputime()-t
# ans = 4.9840
################################################################
| [
"ernesto@dei.uc.pt"
] | ernesto@dei.uc.pt |
e03ff8873d0460764ab6c955459e2d3a0795df63 | 7c654433dca32f65dba5986f25e7f34d28a7535e | /virt3/lib/python2.7/warnings.py | 14229a2e4f26e2c5e25308d9952ea1fba0ac149f | [] | no_license | albertan3/virtualenv_test | 3bfab23d3465b530728421a6bd7aa688be905a29 | 72603b1f070ee9b0faeb7adaa26a39e1793234f4 | refs/heads/master | 2020-06-10T07:40:00.269911 | 2017-04-27T23:12:54 | 2017-04-27T23:12:54 | 75,987,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | /Users/alban/anaconda/lib/python2.7/warnings.py | [
"alban@visa.com"
] | alban@visa.com |
fdb85ea027d97f7491476a2ab44e654498f4b677 | da64a4bc36fd794cebce4fd527440b5e1a823a24 | /paging-tlb/homework/tlb_script.py | 0cb52f363747f5172314a0f144356cb1772afec9 | [] | no_license | prolific-dev/Betriebssysteme | 8f3f08feae1ad33231588fd2c84e2eb60d428b40 | a642fb448744653807349105eb3627a43512e86f | refs/heads/main | 2023-08-16T15:53:43.695940 | 2021-09-26T01:15:18 | 2021-09-26T01:15:18 | 349,684,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | from numpy import *
import matplotlib.pyplot as plt
import subprocess
if __name__ == '__main__':
print("Anzahl an Veruchen: ")
nr_trials = int(input())
max_pages = 4096
print("Starte mit {} Versuchen... (Bitte warten)".format(nr_trials))
subprocess.run(["gcc", "-O0", "-o", "tlb.out", "tlb.c", "-Wall"])
x = [2**z for z in range(1, 14)]
y = []
for i in x:
process = subprocess.run(['./tlb.out', '-p {}.'.format(i), '-t {}'.format(nr_trials)], stdout=subprocess.PIPE,
text=True)
result = process.stdout
y.append(float(result))
fig = plt.figure()
plt.plot(x, y, marker='o', linestyle='--', color='blue')
plt.margins(0)
plt.xlabel('Frame-Anzahl')
plt.xscale('log')
plt.ylabel('Zeit pro Zugriff (ns)')
plt.title('TLB Zeitmessung')
plt.savefig('tlb.png', dpi=227)
plt.show()
print("tlb.png wurde erfolgreich erzeugt")
| [
"de391ago@htwg-konstanz.de"
] | de391ago@htwg-konstanz.de |
70a3cc57b63eaa1a001d1e47f6d3bb9473736a5c | f1ab57ae527f434dfb4a8db8f8242d67514d104c | /setup.py | 79119d9913116d04a8460dc8b142f6db6d4fbd5e | [] | no_license | abookStore/bookStore | f6de9c00297fe1290f3630a392ec5e2c0786d0b5 | 2c6b478c4c0aad217617b4260b27cf91078d5d08 | refs/heads/master | 2021-05-12T07:31:59.100221 | 2018-05-30T16:20:26 | 2018-05-30T16:20:26 | 117,246,596 | 0 | 3 | null | 2018-01-30T16:27:18 | 2018-01-12T13:52:37 | JavaScript | UTF-8 | Python | false | false | 273 | py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bookStore',
packages=find_packages(),
install_requires=[
'Flask',
'Flask-SQLAlchemy'
],
setup_requires=[],
tests_require=[
'pytest'
]
)
| [
"zhou.xun@qianka.com"
] | zhou.xun@qianka.com |
7125de95fee231cb947372fc2fdf2d5816aca876 | 649cd51d3ce32dd4a89f781a14741d19b703350f | /server/clients/mfcClient.py | 73185ec8c1c16005d6c0e1ad2116c10ebff66a27 | [] | no_license | flybrains/clm | 03ff00c11c49e89f0adc9c2f801d8102fe0577fb | 2b6cfa66175d4d875baefc7a9c0497d912a988d0 | refs/heads/master | 2021-01-08T09:58:36.181550 | 2020-03-12T21:51:42 | 2020-03-12T21:51:42 | 241,995,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,046 | py | import time
import socket
import json
class MFCClient(object):
def __init__(self,lookup_table=None, oob_option=None, replay=True):
self.load_json()
self.host = self.config_data['local_host']
self.port = self.config_data['local_port']
self.replay = replay
#print('Started MFC')
if not self.replay and lookup_table is not None:
self.airchannel = lookup_table[0]
self.channel1 = lookup_table[1]
self.channel2 = lookup_table[2]
self.w = self.airchannel.shape[1]
self.h = self.airchannel.shape[0]
def load_json(self):
with open('/home/patrick/Desktop/clm/config.json', 'r+') as j:
self.config_data = json.load(j)
def connect(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.host, self.port))
self.client.sendall(bytes("MFCs Connected",'UTF-8'))
def read_from_server(self):
data = self.client.recv(1024).decode()
if not data=='<>':
try:
data = [float(e) for e in data[1:-1].split(',')]
except ValueError:
data = "<>"
return data
def send_to_server(self, send_val):
self.client.sendall(bytes('mf_{}'.format(send_val),'UTF-8'))
def check_conditions(self):
px = int(data[7])
py = int(data[8])
# Out of bounds options
if (self.oob_option is None) or (self.oob_option.x_choice == 'loop' and self.oob_option.y_choice == 'loop'):
if px > self.w/2:
while px > self.w/2:
px = px - self.w
elif px < -self.w/2:
while px < -self.w/2:
px = px + self.w
if py > self.h/2:
while py > self.h/2:
py = py - self.h
elif py < -self.h/2:
while py < -self.h/2:
py = py + self.h
send_val = [float(self.airchannel[px,py]), float(self.channel1[px,py]), float(self.channel2[px,py])]
elif self.oob_option.x_choice == 'constant' and self.oob_option.y_choice == 'constant':
pass
elif self.oob_option.x_choice == 'loop' and self.oob_option.y_choice == 'constant':
if py > self.h/2:
send_val = self.oob_option.y_const_hi
elif py < -self.h/2:
send_val = self.oob_option.y_const_lo
else:
if px > self.w/2:
while px > self.w/2:
px = px - self.w
elif px < -self.w/2:
while px < -self.w/2:
px = px + self.w
send_val = [float(self.airchannel[px,py]), float(self.channel1[px,py]), float(self.channel2[px,py])]
elif self.oob_option.x_choice == 'constant' and self.oob_option.y_choice == 'loop':
if px > self.w/2:
send_val = self.oob_option.x_const_hi
elif px < -self.w/2:
send_val = self.oob_option.x_const_lo
else:
if py > self.h/2:
while py > self.h/2:
py = py - self.h
elif py < -self.h/2:
while py < -self.h/2:
py = py + self.h
send_val = [float(self.airchannel[px,py]), float(self.channel1[px,py]), float(self.channel2[px,py])]
return send_val
def run(self):
while True:
data = self.read_from_server()
#print('MFC Data: ', data)
if data=='<>':
self.client.sendall(bytes('<>'.format(data),'UTF-8'))
self.client.close()
break
if self.replay:
send_val = [float(data[2]), float(data[3]), float(data[4])]
else:
send_val = self.check_conditions(data)
self.send_to_server(send_val)
#print('MFC Exit')
| [
"pstock@rockefeller.edu"
] | pstock@rockefeller.edu |
69e9bebc4513c00a473c70457e1a049832307ad5 | 8ebb138562884f01cae3d3ffaad9501a91e35611 | /dbCruiseKeywords/insertKeywordsAMT09.py | df5628bef408fcbba07deedb761444ed58a7b142 | [] | no_license | simonscmap/DBIngest | 7b92214034e90f8de88b06c17b48f83c769d8d35 | 9ae035cbf7453df375f0af5e920df3880a419107 | refs/heads/master | 2021-07-16T07:12:31.749027 | 2020-08-13T16:28:24 | 2020-08-13T16:28:24 | 200,295,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT09 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT09'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT09.xlsx'
keyword_col = 'cruise_keywords'
import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT09 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT09'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT09.xlsx'
keyword_col = 'cruise_keywords'
############################
""" Reads in the keyword excel file"""
df = pd.read_excel(rawFilePath + rawFileName)
ID = cF.getCruiseID(cruise_name)
prov_df = cF.getLonghurstProv(cruise_name)
ocean_df = cF.getOceanName(cruise_name)
seasons_df = cF.getCruiseSeasons(cruise_name)
months_df = cF.getCruiseMonths(cruise_name)
years_df = cF.getCruiseYear(cruise_name)
details_df = cF.getCruiseDetails(cruise_name)
short_name_df = cF.getCruiseAssosiatedShortName(cruise_name)
# long_name_df = cF.getCruiseAssosiatedLongName(cruise_name)
short_name_syn_df = cF.getShortNameSynonyms(cruise_name)
dataset_name_df = cF.getCruiseAssosiatedDataset_Name(cruise_name)
df = cF.addDFtoKeywordDF(df, dataset_name_df)
df = cF.addDFtoKeywordDF(df, short_name_syn_df)
df = cF.addDFtoKeywordDF(df, prov_df)
df = cF.addDFtoKeywordDF(df, ocean_df)
df = cF.addDFtoKeywordDF(df, seasons_df)
df = cF.addDFtoKeywordDF(df, months_df)
df = cF.addDFtoKeywordDF(df, years_df)
df = cF.addDFtoKeywordDF(df, details_df)
df = cF.addDFtoKeywordDF(df, short_name_df)
# df = cF.addDFtoKeywordDF(df, long_name_df)
df = cF.removeDuplicates(df)
df = cF.stripWhitespace(df,keyword_col)
df = cF.removeAnyRedundantWord(df)
""" INSERTS INTO tblCruise_Keywords"""
cF.insertCruiseKeywords(ID,df,server)
| [
"norlandrhagen@gmail.com"
] | norlandrhagen@gmail.com |
321fb15794d5e2e00771737a5237693b81c6223c | 9d5522c21b60fa64111b54d1c31a442d755ddd2a | /Problem Solving/Hackerrank/SockMerchant.py | a5c1394641a679af17aac0c2f6e4aea8f17ec86b | [] | no_license | faiyazkhanwif/ProblemSolving-Python | 8e156ac95e5a78f3ab7af74710934570964856ee | 56b897c29cdc1ffcd3f6638d608f63aff1d7e46e | refs/heads/master | 2022-12-29T06:07:18.429488 | 2020-10-14T04:31:16 | 2020-10-14T04:31:16 | 303,902,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
pairs = 0
checklist=[]
for i in range(len(ar)):
if ar[i] in checklist:
continue
else:
elcount = ar.count(ar[i])
if elcount/2>0:
temp = int(elcount/2)
pairs+=temp
checklist.append(ar[i])
return pairs
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"faiyazkhanwif@gmail.com"
] | faiyazkhanwif@gmail.com |
dc3441675e94bea8afa0085dc88c1917551088d9 | 18c0511cdaef2d09f07ed071e4f300a78368900f | /graphs/cases_per_admin_region_overtime.py | 210d828332e9dac41fba44f56b4128f0f5f5cc2a | [
"CC0-1.0"
] | permissive | ElNiak/covidbe-opendata | 3bcaca2ebf4a345d0529e5c69249ae0d83d0768f | 115883d379da91c72a260c66d4ae49b241b111f5 | refs/heads/master | 2023-04-09T12:59:11.414369 | 2021-04-21T04:40:43 | 2021-04-21T04:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,122 | py | from datetime import datetime, date
import geopandas
import plotly.express as px
import pandas as pd
import numpy as np
from flask_babel import gettext
from pages import get_translation
import plotly.graph_objects as go
from graphs import register_plot_for_embedding
geojson = geopandas.read_file('static/json/admin-units/be-geojson.geojson')
df3 = pd.read_csv("static/csv/cases_weekly_ins3.csv",encoding='utf8')
#df3 = df3[df3.WEEK >= 32]
df3d = pd.read_csv("static/csv/cases_daily_ins3.csv",encoding='utf8')
@register_plot_for_embedding("cases_per_admin_region_inhabitant overtime")
def map_totcases_admin_region_overtime():
maxv = df3.CASES.max()
fig = px.choropleth_mapbox(df3, geojson=geojson,
locations="NIS3",
color='CASES', color_continuous_scale="magma_r",
range_color=(0, maxv),
animation_frame="WEEK", animation_group="NIS3",
featureidkey="properties.NIS3",
center={"lat": 50.641111, "lon": 4.668889},
hover_name="CASES",
hover_data=["CASES",'CASES_PER_1000HABITANT', "name"],
height=600,
mapbox_style="carto-positron", zoom=6)
fig.update_geos(fitbounds="locations")
fig.layout.coloraxis.colorbar.title = get_translation(fr="Nombres de cas",en="Number of cases")
fig.layout.coloraxis.colorbar.titleside = "right"
fig.layout.coloraxis.colorbar.ticks = "outside"
fig.layout.coloraxis.colorbar.tickmode = "array"
fig.update_traces(
hovertemplate=gettext(gettext("<b>%{customdata[2]}%</b><br>%{customdata[1]}<br>%{customdata[2]}"))
)
fig.update_layout(template="plotly_white", margin=dict(l=0, r=0, t=5, b=0))
return fig
@register_plot_for_embedding("cases_per_habitant_admin_region_inhabitant overtime")
def map_cases_per_habittant_admin_region_overtime():
maxv = df3.CASES_PER_1000HABITANT.max()
fig = px.choropleth_mapbox(df3, geojson=geojson,
locations="NIS3",
color='CASES_PER_1000HABITANT', color_continuous_scale="magma_r",
range_color=(0, maxv),
animation_frame="WEEK", animation_group="NIS3",
featureidkey="properties.NIS3",
center={"lat": 50.641111, "lon": 4.668889},
hover_name="CASES",
hover_data=["CASES",'CASES_PER_1000HABITANT', "name"],
height=600,
mapbox_style="carto-positron", zoom=6)
fig.update_geos(fitbounds="locations")
fig.layout.coloraxis.colorbar.title = get_translation(fr="Nombres de cas / 1000 habitants",en="Number of cases / 1000 inhabitants")
fig.layout.coloraxis.colorbar.titleside = "right"
fig.layout.coloraxis.colorbar.ticks = "outside"
fig.layout.coloraxis.colorbar.tickmode = "array"
fig.update_traces(
hovertemplate=gettext(gettext("<b>%{customdata[2]}%</b><br>%{customdata[1]}<br>%{customdata[2]}"))
)
fig.update_layout(template="plotly_white", margin=dict(l=0, r=0, t=5, b=0))
return fig
def dfoff(offset):
df_names = pd.DataFrame(geojson.drop(columns='geometry'))
cutoff1 = (pd.to_datetime('today') - pd.Timedelta(str(17 + offset) + ' days')).date()
cutoff2 = (pd.to_datetime('today') - pd.Timedelta(str(4 + offset) + ' days')).date()
df3d = pd.read_csv("static/csv/cases_daily_ins3.csv", encoding='latin1')
df3d = df3d[df3d.DATE >= str(cutoff1)]
df3d = df3d[df3d.DATE <= str(cutoff2)]
df3d = df3d.groupby([df3d.NIS3, df3d.POP]).agg({'CASES': ['sum']}).reset_index()
df3d.columns = df3d.columns.get_level_values(0)
df3d['NIS3'] = df3d['NIS3'].astype(int)
df3d['CASES_PER_100KHABITANT'] = df3d['CASES'] / df3d['POP'] * 100000
df3d = pd.merge(df3d, df_names, left_on='NIS3', right_on='NIS3', how='left')
df3d = df3d.round({'CASES_PER_100KHABITANT': 1})
df3d = df3d.sort_values(by='CASES_PER_100KHABITANT', axis=0)
return df3d
def dfoff7days(offset):
df_names = pd.DataFrame(geojson.drop(columns='geometry'))
cutoff1 = (pd.to_datetime('today') - pd.Timedelta(str(10 + offset) + ' days')).date()
cutoff2 = (pd.to_datetime('today') - pd.Timedelta(str(4 + offset) + ' days')).date()
df3d = pd.read_csv("static/csv/cases_daily_ins3.csv", encoding='latin1')
df3d = df3d[df3d.DATE >= str(cutoff1)]
df3d = df3d[df3d.DATE <= str(cutoff2)]
df3d = df3d.groupby([df3d.NIS3, df3d.POP]).agg({'CASES': ['sum']}).reset_index()
df3d.columns = df3d.columns.get_level_values(0)
df3d['NIS3'] = df3d['NIS3'].astype(int)
df3d['CASES_PER_100KHABITANT'] = df3d['CASES'] / df3d['POP'] * 100000
df3d = pd.merge(df3d, df_names, left_on='NIS3', right_on='NIS3', how='left')
df3d = df3d.round({'CASES_PER_100KHABITANT': 1})
df3d = df3d.sort_values(by='CASES_PER_100KHABITANT', axis=0)
return df3d
@register_plot_for_embedding("scattter-incidence-nis3")
def scatter_incidence_nis3():
df_names = pd.DataFrame(geojson.drop(columns='geometry'))
# title = get_translation(fr="Nombres de cas/100K past [d-17,d-4] days",en="Number of cases/100K past [d-17,d-4] days")
df1 = dfoff7days(0)
df2 = dfoff7days(3)
df = df1.merge(df2, left_on=['NIS3', 'POP', 'name'], right_on=['NIS3', 'POP', 'name'])
df['increase3d'] = ((df['CASES_PER_100KHABITANT_x'] / df['CASES_PER_100KHABITANT_y']) - 1) * 100
df = df.sort_values(by='CASES_PER_100KHABITANT_x', axis=0)
fig = go.Figure()
fig.add_trace(go.Scatter(y=df.name, x=df['CASES_PER_100KHABITANT_x'],
name="[t-10,t-4]", mode='markers',
marker=dict(
color=df['increase3d'], #set color equal to a variable
size = 12,
cmin=-50,
cmax=50,
colorscale=[
[0, "#238b45"],
[0.50, "#e5f5f9"],
[0.50, "#fee0d2"],
[1, "#cb181d"]],
showscale=True
)))
fig.add_trace(go.Scatter(y=df.name, x=df['CASES_PER_100KHABITANT_y'],
name="[t-13,t-7]", mode='markers', marker_color='black'))
fig.update_layout(autosize=True, height=900)
fig.update_layout(template="plotly_white")
fig.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig.update_layout(
title="7 days-Incidence evolution over last 3 days (color = % increase/decrease)",
)
return fig
@register_plot_for_embedding("map_increase_incidence_admin_region_percentage")
def map_increase_incidence_admin_region_percentage():
maxv = df3.CASES_PER_1000HABITANT.max()
df1 = dfoff7days(0)
df2 = dfoff7days(3)
df = df1.merge(df2, left_on=['NIS3', 'POP', 'name'], right_on=['NIS3', 'POP', 'name'])
df['increase5d'] = ((df['CASES_PER_100KHABITANT_x'] / df['CASES_PER_100KHABITANT_y']) - 1) * 100
fig = px.choropleth_mapbox(df, geojson=geojson,
locations="NIS3",
color='increase5d',
range_color=(-40, 40),
color_continuous_scale=[
[0, "#238b45"],
[0.50, "#e5f5f9"],
[0.50, "#fee0d2"],
[1, "#cb181d"]],
featureidkey="properties.NIS3",
center={"lat": 50.641111, "lon": 4.668889},
hover_name="name",
hover_data=["increase5d", 'CASES_PER_100KHABITANT_x', "name"],
height=600,
mapbox_style="carto-positron", zoom=6)
fig.update_geos(fitbounds="locations")
fig.layout.coloraxis.colorbar.title = get_translation(fr="Increase/Decrease % over last 3 days",
en="Augmentation/Diminution en % sur les 3 derniers jours")
fig.layout.coloraxis.colorbar.titleside = "right"
fig.layout.coloraxis.colorbar.ticks = "outside"
fig.layout.coloraxis.colorbar.tickmode = "array"
fig.update_layout(template="plotly_white", margin=dict(l=0, r=0, t=5, b=0))
return fig
@register_plot_for_embedding("map_cases_incidence_nis3")
def map_cases_incidence_nis3():
geojson = geopandas.read_file('static/json/admin-units/be-geojson.geojson')
df_names = pd.DataFrame(geojson.drop(columns='geometry'))
cutoff1 = (pd.to_datetime('today') - pd.Timedelta('17 days')).date()
cutoff2 = (pd.to_datetime('today') - pd.Timedelta('4 days')).date()
df3d = pd.read_csv("static/csv/cases_daily_ins3.csv", encoding='latin1')
df3d = df3d[df3d.DATE >= str(cutoff1)]
df3d = df3d[df3d.DATE <= str(cutoff2)]
df3d = df3d.groupby([df3d.NIS3, df3d.POP]).agg({'CASES': ['sum']}).reset_index()
df3d.columns = df3d.columns.get_level_values(0)
df3d['NIS3'] = df3d['NIS3'].astype(int)
df3d['CASES_PER_100KHABITANT'] = df3d['CASES'] / df3d['POP'] * 100000
df3d = pd.merge(df3d, df_names, left_on='NIS3', right_on='NIS3', how='left')
df3d = df3d.round({'CASES_PER_100KHABITANT': 1})
fig = px.choropleth_mapbox(df3d, geojson=geojson,
locations="NIS3",
color='CASES_PER_100KHABITANT',
range_color=(0, 500),
color_continuous_scale="magma_r",
#color_continuous_scale=[(0, "green"), (15/150, "green"), (15/150, "yellow"),
# (30/150, "yellow"), (30/150, "orange"), (50/150, "orange"),
# (50/150, "red"), (100/150, "red"),(100/150, "black"),(150/150, "black")],
featureidkey="properties.NIS3",
center={"lat": 50.641111, "lon": 4.668889},
hover_name="CASES_PER_100KHABITANT",
hover_data=["CASES_PER_100KHABITANT", "POP","NIS3", "name"],
custom_data=["CASES_PER_100KHABITANT", "POP","NIS3", "name"],
height=600,
mapbox_style="carto-positron", zoom=6)
fig.update_geos(fitbounds="locations")
fig.layout.coloraxis.colorbar.title = get_translation(fr="Nombres de cas/100K past [d-17,d-4] days",
en="Number of cases/100K past [d-17,d-4] days")
fig.layout.coloraxis.colorbar.titleside = "right"
fig.layout.coloraxis.colorbar.ticks = "outside"
fig.layout.coloraxis.colorbar.tickmode = "array"
fig.update_traces(
hovertemplate=gettext(
gettext("incidence:<b>%{customdata[0]}<br>pop:<b>%{customdata[1]}<br><b>%{customdata[3]}"))
)
fig.update_layout(template="plotly_white", margin=dict(l=0, r=0, t=5, b=0))
return fig
@register_plot_for_embedding("cases_per_admin_region_inhabitant overtime plot")
def plot_cases_admin_region_overtime():
fig = px.line(data_frame=df3, x='WEEK', y='CASES', line_group ='name',color='name')
fig.update_layout(template="plotly_white")
return fig
@register_plot_for_embedding("cases_per_habitant_admin_region_inhabitant overtime plot")
def plot_cases_per_habittant_admin_region_overtime():
df3d['CASES_PER_1000HABITANT'] = df3d['CASES']/df3d['POP']*100000
fig = px.area(data_frame=df3d, x='DATE', y='CASES_PER_1000HABITANT',color='name')
fig.update_layout(template="plotly_white")
return fig
@register_plot_for_embedding("casesdaily_per_admin_region_inhabitant overtime plot")
def plot_cases_daily_admin_region_overtime():
fig = px.area(data_frame=df3d, x='DATE', y='CASES',color='name')
fig.update_layout(template="plotly_white")
return fig
@register_plot_for_embedding("barplot_admin")
def barplot_admin(nis=46):
df_admin = df3d[df3d.NIS3 == nis]
descr = df_admin['name'].values
title = "admin"
if len(descr) > 0:
title = descr[0]
cases = sum(df_admin.CASES.values)
fig = px.bar(x=df_admin.DATE, y=df_admin.CASES)
fig.add_trace(
go.Scatter(x=df_admin.DATE, y=df_admin.CASES.rolling(7).mean(), showlegend=False)
)
fig.update_layout(title_text=gettext("Number of cases in {title} since beginning: {cases}").format(title=title, cases=cases),
height=500, template="plotly_white", margin=dict(l=20, r=0, t=60, b=0))
fig.layout.coloraxis.showscale = False
fig.update_yaxes(title="cases (1 = <5)")
fig.update_xaxes(title="")
fig.update_traces(
hovertemplate=gettext("<b>%{x}</b><extra>%{y} cases</extra>"),
)
return fig
| [
"pschaus@gmail.com"
] | pschaus@gmail.com |
f73c9d617c1cf558ba76fd96c0a0973f40d43195 | ddca95269478743a9697c1c3224b8a983c5eb3b9 | /LintCode/50. 数组剔除元素后的乘积.py | 7deeffc910413d6da36c74bf61b47e2a69b479b6 | [] | no_license | hijkzzz/leetcode | d265bd4b6548b84cc91ca65d2e50f500eea33100 | 135003500fa25f47a45ded87f360ab40ed9cc35f | refs/heads/master | 2021-10-23T20:55:21.334513 | 2018-08-25T08:58:48 | 2018-08-25T08:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | class Solution:
"""
@param: nums: Given an integers array A
@return: A long long array B and B[i]= A[0] * ... * A[i-1] * A[i+1] * ... * A[n-1]
"""
def productExcludeItself(self, nums):
if len(nums) < 2:
return [1]
# write your code here
left = nums[:]
right = nums[:]
for i in range(1, len(nums)):
left[i] *= left[i - 1]
right[len(nums) - i - 1] *= right[len(nums) - i]
B = [0 for i in range(len(nums))]
for i in range(len(nums)):
if i == 0:
B[i] = right[i + 1]
elif i == len(nums) - 1:
B[i] = left[i - 1]
else:
B[i] = left[i - 1] * right[i + 1]
return B | [
"janhu9527@gmail.com"
] | janhu9527@gmail.com |
0a6acd4a14f06588baa4f0bdf0b0b56f25938617 | f2993f5bebdcd5ac38b38c283e1027f134250963 | /DataValue.py | 296c489578982688bbadec64d9e086007111f040 | [] | no_license | ButtrmlkPncakes/MIS-5400-Final-Project | 883f010da41c9e904c35a40259148c66c4e8e1bf | a41764f6c475e01474f7f41f359b37d791740d3a | refs/heads/master | 2020-04-10T19:31:51.969438 | 2018-12-14T04:58:38 | 2018-12-14T04:58:38 | 161,238,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,859 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 17:07:43 2018
@author: Test1
"""
import pandas as pd
import pyodbc
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GetCredentials
User,Pass = GetCredentials.GetCreds('its me, dummy','let me in')
#%matplotlib inline
graphList = ['OverpayAmt','TaxDueAmt']
def GetData():
#engine = create_engine(
# 'mssql+pyodbc://' + User + '@cody-practice.database.windows.net:' + Pass + '@cody-practice.database.windows.net/Cody-IRS-Data?driver=SQL+Server+Native+Client+11.0',
# echo=True, connect_args={'interactive_timeout': 30000, 'wait_timeout': 30000})
#con = engine.connect()
#df2016 = pd.read_sql('Tax_Data_2016',con=con)
#df2015 = pd.read_sql('Tax_Data_2015',con=con)
#df2014 = pd.read_sql('Tax_Data_2014',con=con)
#df2013 = pd.read_sql('Tax_Data_2013',con=con)
df2016 = pd.read_excel('Tax_Data_2016.xlsx')
df2015 = pd.read_excel('Tax_Data_2015.xlsx')
df2014 = pd.read_excel('Tax_Data_2014.xlsx')
df2013 = pd.read_excel('Tax_Data_2013.xlsx')
AllYears = [df2016,df2015,df2014,df2013]
dfAll = pd.DataFrame(df2016)
print(dfAll.columns)
dfAll = dfAll.append([df2015,df2014,df2013])
try:
dfAll = dfAll[dfAll.ZIPCODE != 0]
except:
dfAll = dfAll[dfAll.zipcode != 0]
labels = ['$1\nto\n$25,000','$25,000\nto\n$50,000','$50,000\nto\n$75,000','$75,000\nto\n$100,000','$100,000\nto\n$200,000','$200,000\nor more']
return dfAll, AllYears,labels
def BarCharts():
# df = dfAll
df, AllYears,labels = GetData()
for cat in graphList:
dataOne = sum(df[df.agi_stub == 1][cat])
dataTwo = sum(df[df.agi_stub == 2][cat])
dataThree = sum(df[df.agi_stub == 3][cat])
dataFour = sum(df[df.agi_stub == 4][cat])
dataFive = sum(df[df.agi_stub == 5][cat])
dataSix = sum(df[df.agi_stub == 6][cat])
YearsDict = {}
x = 2013
for yr in AllYears:
data1 = sum(df[(df.agi_stub == 1) & (df.TaxYear == yr)][cat])
data2 = sum(df[(df.agi_stub == 2) & (df.TaxYear == yr)][cat])
data3 = sum(df[(df.agi_stub == 3) & (df.TaxYear == yr)][cat])
data4 = sum(df[(df.agi_stub == 4) & (df.TaxYear == yr)][cat])
data5 = sum(df[(df.agi_stub == 5) & (df.TaxYear == yr)][cat])
data6 = sum(df[(df.agi_stub == 6) & (df.TaxYear == yr)][cat])
newList = [data1,data2,data3,data4,data5,data6]
YearsDict['df'+ str(x)] = newList
x += 1
newestest = [sum(df[df.agi_stub == x]['NumTaxDue']) for x in range(1,7)]
leny = np.arange(len(labels))
plt.ticklabel_format(style='plain',axis='y',useOffset=False)
data = [dataOne,dataTwo,dataThree,dataFour,dataFive,dataSix]
barlabes = []
height = max(data)
plt.bar(leny, data, align='center',width=.5)
plt.xticks(leny,labels)
plt.xlabel('AGI Range')
if cat == 'OverpayAmt':
plt.title('Tax Overpayment Amounts 2013-2017')
plt.ylabel('Amount of Overpayment')
elif cat =='TaxDueAmt':
plt.title('Tax Due Amounts 2013-2017')
plt.ylabel('Amount of Tax Due')
fig2,ax2 = plt.subplots()
plt.ticklabel_format(style='plain',axis='y',useOffset=False)
index = np.arange(6)
bar_width = .15
rects1 = plt.bar(leny-bar_width,YearsDict['df2013'],bar_width,label='Tax Year 2013',align='center')
rects2 = plt.bar(leny,YearsDict['df2014'],bar_width, label='Tax Year 2014',align='center')
rects3 = plt.bar(leny+bar_width,YearsDict['df2015'],bar_width, label='Tax Year 2015',align='center')
rects4 = plt.bar(leny+(bar_width*2),YearsDict['df2016'],bar_width, label='Tax Year 2016',align='center')
plt.xticks(leny,labels)
plt.xlabel('AGI Range')
if cat == 'OverpayAmt':
plt.title('Tax Overpayment Amounts 2013-2017 by Year')
plt.ylabel('Amount of Overpayment')
elif cat =='TaxDueAmt':
plt.title('Tax Due Amounts 2013-2017 by Year')
plt.ylabel('Amount of Tax Due')
plt.legend()
plt.show()
def HeatMap():
#state = state.upper()
df, AllYears,labels = GetData()
leny = np.arange(len(labels))
StateFrame = df
StateFrame = StateFrame.loc[:,['STATE','agi_stub','TaxDueAmt']]
StateFrame = StateFrame.sort_values(['TaxDueAmt'],ascending=False)
StateGroups = StateFrame.groupby(['STATE'],as_index=False).sum()
#StateGroups = StateGroups.nlargest(10,columns='TaxDueAmt')
#StateFrame = StateFrame[StateFrame.ZIPCODE.isin(TopZips.ZIPCODE)]
#StateFrame.ZIPCODE = pd.to_numeric(StateFrame.ZIPCODE)
StateFrame.agi_stub = pd.to_numeric(StateFrame.agi_stub)
StateTable = StateFrame.pivot_table(index='STATE',columns='agi_stub',values='TaxDueAmt',aggfunc='sum')
#StateTable = StateTable.nlargest(10,columns='TaxDueAmt')
#StateTable = StateTable.sortlevel(level=0,ascending=True,inplace=True)
f, ax = plt.subplots(figsize = (9,12))
sns.heatmap(StateTable,annot=True,ax=ax,linewidths=.5,fmt='d')
plt.xlabel('AGI Range')
plt.ylabel('States with most tax due')
plt.xticks(leny,labels)
plt.savefig('C:/Users/mes12/Desktop/Fall 2018 - USU/MIS 5400/Final Project/static/HeatMap1.jpg')# + str(state) + '.jpg')
return 'Heat Map successfully created.'
plt.show()
def HeatMap2(state):
state = state.upper()
df, AllYears,labels = GetData()
leny = np.arange(len(labels))
StateFrame = df[df.STATE == state]
StateFrame = StateFrame.loc[:,['ZIPCODE','agi_stub','TaxDueAmt']]
StateFrame = StateFrame.sort_values(['TaxDueAmt'],ascending=False)
ZipGroups = StateFrame.groupby(['ZIPCODE'],as_index=False).sum()
TopZips = ZipGroups.nlargest(10,columns='TaxDueAmt')
StateFrame = StateFrame[StateFrame.ZIPCODE.isin(TopZips.ZIPCODE)]
StateFrame.ZIPCODE = pd.to_numeric(StateFrame.ZIPCODE)
StateFrame.agi_stub = pd.to_numeric(StateFrame.agi_stub)
StateTable = StateFrame.pivot_table(index='ZIPCODE',columns='agi_stub',values='TaxDueAmt',aggfunc='sum')
f, ax = plt.subplots(figsize = (9,12))
sns.heatmap(StateTable,annot=True,ax=ax,linewidths=.5,fmt='d')
plt.xlabel('AGI Range')
plt.ylabel('Zip Codes with Most Tax Due - ' + str(state))
plt.xticks(leny,labels)
plt.savefig('C:/Users/mes12/Desktop/Fall 2018 - USU/MIS 5400/Final Project/static/HeatMap.jpg')# + str(state) + '.jpg')
return 'Heat Map successfully created.'
#BarCharts()
#HeatMap(dfAll)
#HeatMap2(dfAll)
| [
"noreply@github.com"
] | noreply@github.com |
78153155db6499ffdfa49e388d2356abe41433ff | 131df00f8a34cec9326fae86a452d85a93784e5f | /matcherapp/apps.py | bd11e5f47ac208434f3d99c0bf256d65bb3a6848 | [] | no_license | ZetaZeta/dictmatcher | a2c8cc1823d3a0fe071eac1cf36123585d966716 | df8bac38316e472eb9b61deae9bef6c90b36aaee | refs/heads/master | 2020-04-26T22:47:32.895064 | 2019-03-05T06:22:37 | 2019-03-05T06:22:37 | 173,883,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class MatcherAppConfig(AppConfig):
name = 'matcherapp'
| [
"MatthewSCarson@gmail.com"
] | MatthewSCarson@gmail.com |
6e241b8e71c9f1a3e79f351f9de5ef956299d76c | c6081127387caef35500075f57b01b4ee30f348e | /data/recall.py | 3848f187eaedf61b2bd585004d5c9e23b0478b9c | [] | no_license | yuyanlebond/pytorch_solov2 | 561048bfd56828f6d0d52bc9100f1735b9235f1f | c171850244cc767290e0fd585ee03c67271ab10a | refs/heads/master | 2023-06-03T11:19:49.069454 | 2021-06-15T15:06:53 | 2021-06-15T15:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,073 | py | import numpy as np
from terminaltables import AsciiTable
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
def _recalls(all_ious, proposal_nums, thrs):
img_num = all_ious.shape[0]
total_gt_num = sum([ious.shape[0] for ious in all_ious])
_ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
for k, proposal_num in enumerate(proposal_nums):
tmp_ious = np.zeros(0)
for i in range(img_num):
ious = all_ious[i][:, :proposal_num].copy()
gt_ious = np.zeros((ious.shape[0]))
if ious.size == 0:
tmp_ious = np.hstack((tmp_ious, gt_ious))
continue
for j in range(ious.shape[0]):
gt_max_overlaps = ious.argmax(axis=1)
max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
gt_idx = max_ious.argmax()
gt_ious[j] = max_ious[gt_idx]
box_idx = gt_max_overlaps[gt_idx]
ious[gt_idx, :] = -1
ious[:, box_idx] = -1
tmp_ious = np.hstack((tmp_ious, gt_ious))
_ious[k, :] = tmp_ious
_ious = np.fliplr(np.sort(_ious, axis=1))
recalls = np.zeros((proposal_nums.size, thrs.size))
for i, thr in enumerate(thrs):
recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
return recalls
def set_recall_param(proposal_nums, iou_thrs):
"""Check proposal_nums and iou_thrs and set correct format.
"""
if isinstance(proposal_nums, list):
_proposal_nums = np.array(proposal_nums)
elif isinstance(proposal_nums, int):
_proposal_nums = np.array([proposal_nums])
else:
_proposal_nums = proposal_nums
if iou_thrs is None:
_iou_thrs = np.array([0.5])
elif isinstance(iou_thrs, list):
_iou_thrs = np.array(iou_thrs)
elif isinstance(iou_thrs, float):
_iou_thrs = np.array([iou_thrs])
else:
_iou_thrs = iou_thrs
return _proposal_nums, _iou_thrs
def eval_recalls(gts,
proposals,
proposal_nums=None,
iou_thrs=None,
print_summary=True):
"""Calculate recalls.
Args:
gts(list or ndarray): a list of arrays of shape (n, 4)
proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5)
proposal_nums(int or list of int or ndarray): top N proposals
thrs(float or list or ndarray): iou thresholds
Returns:
ndarray: recalls of different ious and proposal nums
"""
img_num = len(gts)
assert img_num == len(proposals)
proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
all_ious = []
for i in range(img_num):
if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
scores = proposals[i][:, 4]
sort_idx = np.argsort(scores)[::-1]
img_proposal = proposals[i][sort_idx, :]
else:
img_proposal = proposals[i]
prop_num = min(img_proposal.shape[0], proposal_nums[-1])
if gts[i] is None or gts[i].shape[0] == 0:
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
all_ious.append(ious)
all_ious = np.array(all_ious)
recalls = _recalls(all_ious, proposal_nums, iou_thrs)
if print_summary:
print_recall_summary(recalls, proposal_nums, iou_thrs)
return recalls
| [
"liujiangkuan.test@gmail.com"
] | liujiangkuan.test@gmail.com |
4fb8058533804c25fc812504e99f7c776b0d6bf3 | 590757ded91424c1216ec5027870b9e06d17e436 | /tests/test_figure.py | 13305a8b41a8cc9de62be13b954e57c5e524566a | [] | no_license | mkile/Figures_Class | 63c8b078c9fd5c9d8b979898a6d4fd2edab03856 | ec49ecd3525a2b2cbe5162417010a37871c619c9 | refs/heads/main | 2023-04-09T19:21:29.706139 | 2021-04-17T18:38:40 | 2021-04-17T18:38:40 | 355,606,945 | 0 | 0 | null | 2021-04-17T18:38:41 | 2021-04-07T16:07:52 | Python | UTF-8 | Python | false | false | 145 | py | import pytest
from source.classes import Figure
def test_baseclass_create():
with pytest.raises(Exception):
Figure('name', [1], 1)
| [
"mkiles81@gmail.com"
] | mkiles81@gmail.com |
4d7795fb58f8e3398f796892e22306053d589308 | 6d681b03f6345141da76296fed568dd404ab3124 | /mylib/performance.py | e7e8112373e35ab9ff6eabee92b4f9cc81c40f63 | [] | no_license | germank/mylib | 44596da38edecd1a174a1dfe86791c2095a169f8 | 5317035df039d90474916d118bfc569a67b2ce69 | refs/heads/master | 2016-09-08T02:41:22.676133 | 2015-01-21T16:34:33 | 2015-01-21T16:34:33 | 29,599,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | import math
#y correct class
#cls predicted class
def get_performance_measure(y, cls, perform_measure):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
for label, pred in zip(y,cls):
assert label == 1 or label == 0
assert pred == 1 or pred == 0
if label and pred:
tp += 1
elif label:
fn += 1
if not label and pred:
fp += 1
elif not label:
tn += 1
try:
prec = tp / (tp + fp)
except:
prec = 0
try:
rec = tp / (tp + fn)
except:
rec = 0
try:
f1 = 2*(prec*rec)/(prec+rec)
except:
f1 = 0
perf = {
'add' : tp + fp,
'acc' : (tp + tn) / (tp + tn + fp + fn),
'accuracy' : (tp + tn) / (tp + tn + fp + fn),
'prec' : prec,
'precision' : prec,
'rec' : rec,
'recall' : rec,
'f1': f1,
'g' : math.sqrt(prec * rec)
}
return perf[perform_measure]
def get_contingency_table(y, x, wps, thr):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
con_table = defaultdict(list)
for label, pred, words in zip(y, x, wps):
if label and pred > thr:
tp += 1
cat = 'tp'
elif label:
fn += 1
cat = 'fn'
if not label and pred > thr:
fp += 1
cat = 'fp'
elif not label:
tn += 1
cat = 'tn'
con_table[cat].append((words, pred))
return con_table
if __name__== '__main__':
main()
| [
"german.kruszewski@unitn.it"
] | german.kruszewski@unitn.it |
ae4a090e21da024645d11c315cc632f99f3bb14c | c5046ff113dce225974a86601b7195d2ef7950a1 | /FourthGen/Bprime_B2G/step0/BprimeToBZinc/BprimeToBZinc_M_825_TuneZ2star_8TeV_madgraph_cff_py_GEN.py | fce05c53d8920628074e34a19adb5514d5b8cf22 | [] | no_license | dmajumder/cms-UserCode | b8c340f889a119f33be0b169c61308536a0fae78 | f519a221dc0d4e8634f7eab7a1a8c802a2708210 | refs/heads/master | 2020-03-26T10:16:49.305776 | 2013-08-15T03:21:17 | 2013-08-15T03:21:17 | 13,921,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,123 | py | # Auto generated configuration file
# using:
# Revision: 1.372.2.3
# Source: /local/reps/CMSSW.admin/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/EightTeV/Bprime_B2G/BprimeToBZinc_M_825_TuneZ2star_8TeV_madgraph_cff.py --filein lhe:6475 -s GEN --conditions START52_V9::All --beamspot Realistic8TeVCollision --datatier GEN-SIM --eventcontent RAWSIM -n 1000 --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('GEN')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
# Input source
process.source = cms.Source("LHESource",
fileNames = cms.untracked.vstring('/store/lhe/6475/8TeV_bp_825_8.25_run12562_unweighted_events_qcut0_mgPostv2.lhe')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.372.2.3 $'),
annotation = cms.untracked.string('Configuration/GenProduction/python/EightTeV/Bprime_B2G/BprimeToBZinc_M_825_TuneZ2star_8TeV_madgraph_cff.py nevts:1000'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RAWSIMEventContent.outputCommands,
fileName = cms.untracked.string('BprimeToBZinc_M_825_TuneZ2star_8TeV_madgraph_cff_py_GEN.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.GlobalTag.globaltag = 'START52_V9::All'
process.generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettings = cms.vstring('MSTU(21)=1 ! Check on possible errors during program execution',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)',
'MSTP(52)=2 ! work with LHAPDF',
'PARP(82)=1.921 ! pt cutoff for multiparton interactions',
'PARP(89)=1800. ! sqrts for which PARP82 is set',
'PARP(90)=0.227 ! Multiple interactions: rescaling power',
'MSTP(95)=6 ! CR (color reconnection parameters)',
'PARP(77)=1.016 ! CR',
'PARP(78)=0.538 ! CR',
'PARP(80)=0.1 ! Prob. colored parton from BBR',
'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter',
'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter',
'PARP(62)=1.025 ! ISR cutoff',
'MSTP(91)=1 ! Gaussian primordial kT',
'PARP(93)=10.0 ! primordial kT-max',
'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model'),
processParameters = cms.vstring('MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2 !use width of bprime as defined by PMAS',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass',
'PMAS(6,1)=172.5 ! t quark mass',
'PMAS(7,1) = 825.0D0 ! bprime quark mass',
'PMAS(7,2) = 8.250D0 ',
'PMAS(7,3) = 82.50D0 ',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4 on/off switches for individual decay modes',
'MDME(57,1)=0 ! gamma b4',
'KFDP(58,2)=5 ! defines Z0 b',
'MDME(58,1)=1 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'MDME(63,1)=0 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 1.0D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 0.0D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.endjob_step,process.RAWSIMoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
| [
""
] | |
5f6f23a6e9cdb54c79639f0bfb90bd8b840c903a | 169d83d58bee9581a59bebfdeaa5c479f681222e | /cart_pole.py | 8fe219f55458ffbb984caa81a7df0c5aebcb8c14 | [] | no_license | Ronnypetson/DQN | c62a9cc829d863e864591de2dab422e45f886dbc | 53662d6e44fe4524285293765d869c7667c6537f | refs/heads/master | 2018-10-24T15:27:43.313302 | 2018-08-21T03:11:48 | 2018-08-21T03:11:48 | 120,924,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,121 | py | import tensorflow as tf
import gym
import numpy as np
import os
import random
from collections import deque
env_name = 'CartPole-v0'
state_dim = 4
ob_frames = 1
num_keys = 2
learning_rate = 0.01
batch_size = 64
replay_len = 100000
oldest_mem = 0
default_action = 1
empty_obs = np.zeros((ob_frames,state_dim))
mem = replay_len*[{'q_sa':0.0,'obs':empty_obs,'act':default_action,'r':0.0,'new_obs':empty_obs,'d':False}]
model_fn = 'checkpoint/'+env_name+'/'+env_name+'.ckpt'
def beep():
duration = 1 # second
freq = 440 # Hz
os.system('play --no-show-progress --null --channels 0.25 synth %s sine %f' % (duration, freq))
def Or(f):
for a in f:
if a:
return True
return False
def get_argmaxes(a): #
m = []
for i in range(batch_size):
b = a[2*i:2*i+num_keys]
m.append(2*i+np.argmax(b))
return m
def replace_mem(new_):
global oldest_mem
global mem
mem[oldest_mem] = new_
oldest_mem = (oldest_mem+1)%replay_len
def get_batch():
q_sa = []
ob = []
act = []
r = []
new_ob = []
d = []
for i in range(batch_size):
reg = random.choice(mem)
for j in range(num_keys):
q_sa.append(reg['q_sa'])
ob.append(reg['obs'])
a = np.zeros(num_keys)
a[reg['act']] = 1.0
act.append(a)
r.append(reg['r'])
new_ob.append(reg['new_obs'])
d.append(reg['d'])
return q_sa, ob, act, r, new_ob, d
def step(env,a,render=False):
obs = np.zeros((ob_frames,state_dim))
r = np.zeros(ob_frames)
d = ob_frames*[False]
for i in range(ob_frames):
if render: env.render()
obs[i],r[i],d[i],_ = env.step(a)
if d[i]:
break
r = np.sum(r)
d = Or(d)
return obs,r,d
def set_rep_mem(env):
global mem
obs = np.expand_dims(env.reset(),axis=0)
for i in range(replay_len):
a = env.action_space.sample()
new_obs,r,d = step(env,a)
mem[i] = {'q_sa': 0.0,'obs':obs,'act':a,'r':r,'new_obs':new_obs,'d':d}
if d:
obs = np.expand_dims(env.reset(),axis=0)
else:
obs = new_obs
X = tf.placeholder(tf.float32,[None,ob_frames,state_dim])
act = tf.placeholder(tf.float32,[None,num_keys])
Y = tf.placeholder(tf.float32,[None,1])
X_ = tf.contrib.layers.flatten(X)
act_ = tf.contrib.layers.flatten(act)
fc1 = tf.concat([X_,act_],1)
fc1 = tf.layers.dense(fc1,50,activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1,10,activation=tf.nn.relu)
fc3 = tf.layers.dense(fc2,1,activation=tf.nn.relu)
loss = tf.losses.mean_squared_error(fc3,Y)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
env = gym.make(env_name)
gamma = 0.99
e = 0.01
alpha = 1.0
single_action = np.identity(num_keys).tolist()
batch_action = batch_size*single_action
with tf.Session() as sess:
saver = tf.train.Saver()
if os.path.isfile(model_fn+'.meta'):
saver.restore(sess,model_fn)
else:
sess.run(tf.global_variables_initializer())
#set_rep_mem(env)
scores = deque(maxlen=100)
for t in range(500000):
obs = np.expand_dims(env.reset(),axis=0)
d = False
i = 0
while not d:
allQ = sess.run(fc3,feed_dict={X:num_keys*[obs],act:single_action})
allQ = np.transpose(allQ)[0]
#a = np.random.choice(np.flatnonzero(allQ == allQ.max()))
a = np.argmax(allQ)
if np.random.rand(1) < e:
a = env.action_space.sample()
new_obs,r,d = step(env,a) # render=(t/500 % 8 == 7)
new_mem = {'q_sa': allQ[a],'obs':obs,'act':a,'r':r,'new_obs':new_obs,'d':d}
replace_mem(new_mem) #
obs = new_obs
i += 1
scores.append(i)
print(np.mean(scores))
# Replay
q_sa, b_ob, b_act, b_r, b_new_ob, b_d = get_batch()
maxQ = sess.run(fc3,feed_dict={X:b_new_ob,act:batch_action}) ##
maxQ = np.transpose(maxQ)[0]
#
argmaxQ = get_argmaxes(maxQ) #
b_d = [b_d[j] for j in argmaxQ]
b_r = [b_r[j] for j in argmaxQ]
#q_sa = [q_sa[j] for j in argmaxQ]
y = np.zeros(batch_size)
for j in range(batch_size):
if b_d[j]:
y[j] = b_r[j]
else:
y[j] = b_r[j] + gamma*maxQ[argmaxQ[j]]
s_y = np.sum(y)/batch_size
e = 0.1/(1+np.exp(s_y/100))
#print(s_y)
x_ = [b_ob[j] for j in argmaxQ] #
ac_ = [b_act[j] for j in argmaxQ] #
#print(ac_)
y = np.expand_dims(y,axis=1)
sess.run(train,feed_dict={X:x_,act:ac_,Y:y})
if t%1000 == 99:
saver.save(sess,model_fn)
| [
"rsronnypetson4@gmail.com"
] | rsronnypetson4@gmail.com |
42d77cdb15f7031c1d699412730a8035bd7e471a | 367d2670c75d385d122bca60b9f550ca5b3888c1 | /gem5/env/lib/python3.6/site-packages/kombu/asynchronous/http/__init__.py | e776977dd40d3fa99f91d5b31d93c25a7d36b580 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | Anish-Saxena/aqua_rowhammer_mitigation | 4f060037d50fb17707338a6edcaa0ac33c39d559 | 3fef5b6aa80c006a4bd6ed4bedd726016142a81c | refs/heads/main | 2023-04-13T05:35:20.872581 | 2023-01-05T21:10:39 | 2023-01-05T21:10:39 | 519,395,072 | 4 | 3 | Unlicense | 2023-01-05T21:10:40 | 2022-07-30T02:03:02 | C++ | UTF-8 | Python | false | false | 591 | py | from kombu.asynchronous import get_event_loop
from .base import Request, Headers, Response
__all__ = ('Client', 'Headers', 'Response', 'Request')
def Client(hub=None, **kwargs):
"""Create new HTTP client."""
from .curl import CurlClient
return CurlClient(hub, **kwargs)
def get_client(hub=None, **kwargs):
"""Get or create HTTP client bound to the current event loop."""
hub = hub or get_event_loop()
try:
return hub._current_http_client
except AttributeError:
client = hub._current_http_client = Client(hub, **kwargs)
return client
| [
"asaxena317@krishna-srv4.ece.gatech.edu"
] | asaxena317@krishna-srv4.ece.gatech.edu |
00fb0b2202d07d72ab8075b038f6426190d4d82e | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Hard/1449.form-largest-integer-with-digits-that-add-up-to-target.py | fde6df1309dddc7154ccfbf41d760c6ba9bd1dbe | [] | no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | #
# @lc app=leetcode id=1449 lang=python3
#
# [1449] Form Largest Integer With Digits That Add up to Target
#
# https://leetcode.com/problems/form-largest-integer-with-digits-that-add-up-to-target/description/
#
# algorithms
# Hard (42.08%)
# Total Accepted: 6.5K
# Total Submissions: 15.5K
# Testcase Example: '[4,3,2,5,6,7,2,5,5]\n9'
#
# Given an array of integers cost and an integer target. Return the maximum
# integer you can paint under the following rules:
#
#
# The cost of painting a digit (i+1) is given by cost[i] (0 indexed).
# The total cost used must be equal to target.
# Integer does not have digits 0.
#
#
# Since the answer may be too large, return it as string.
#
# If there is no way to paint any integer given the condition, return "0".
#
#
# Example 1:
#
#
# Input: cost = [4,3,2,5,6,7,2,5,5], target = 9
# Output: "7772"
# Explanation: The cost to paint the digit '7' is 2, and the digit '2' is 3.
# Then cost("7772") = 2*3+ 3*1 = 9. You could also paint "977", but "7772" is
# the largest number.
# Digit cost
# 1 -> 4
# 2 -> 3
# 3 -> 2
# 4 -> 5
# 5 -> 6
# 6 -> 7
# 7 -> 2
# 8 -> 5
# 9 -> 5
#
#
# Example 2:
#
#
# Input: cost = [7,6,5,5,5,6,8,7,8], target = 12
# Output: "85"
# Explanation: The cost to paint the digit '8' is 7, and the digit '5' is 5.
# Then cost("85") = 7 + 5 = 12.
#
#
# Example 3:
#
#
# Input: cost = [2,4,6,2,4,6,4,4,4], target = 5
# Output: "0"
# Explanation: It's not possible to paint any integer with total cost equal to
# target.
#
#
# Example 4:
#
#
# Input: cost = [6,10,15,40,40,40,40,40,40], target = 47
# Output: "32211"
#
#
#
# Constraints:
#
#
# cost.length == 9
# 1 <= cost[i] <= 5000
# 1 <= target <= 5000
#
#
#
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
| [
"kevin.wkmiao@gmail.com"
] | kevin.wkmiao@gmail.com |
f704899af782cf07017450df852c1d1f05a0a89d | 3717822e2dc62100125906b7610c09aac0ef669e | /hw_3/lesson/server.py | 45e64de2b469405994a5c31828fc681346686da4 | [] | no_license | Nlegion/ClSe | d3fd5879becf0b85aa2b5da04077801afa2e6423 | 47a25518a4431693ba4768669f41fd4de95c1cfb | refs/heads/main | 2023-06-16T15:11:23.073744 | 2021-07-11T08:34:54 | 2021-07-11T08:34:54 | 377,102,136 | 0 | 0 | null | 2021-07-11T10:20:17 | 2021-06-15T09:08:18 | Python | UTF-8 | Python | false | false | 549 | py | from socket import *
import time
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', 8888))
s.listen(5)
while True:
client, adr = s.accept()
# print(f'Подключение по адресу{adr}')
# time_str = time.ctime(time.time())
# client.send(time_str.encode('utf-8'))
data = client.recv(1024)
decoded_data = data.decode('UTF-8')
print(f'Сообщение от клиента: {decoded_data}')
msg_to_client = 'Сообщение получено'
client.send(msg_to_client.encode('UTF-8'))
client.close()
| [
"nparanoid@gmail.com"
] | nparanoid@gmail.com |
acc50216c2e161d88c476973b950167d90d80243 | 7123affac23f7e35dcbf0e6e2d8bb31000b55cc5 | /hooks/reviewboard/check-approvers | 661c2887af76e501f8aaac50219c0dc50d4fb1e7 | [
"BSD-2-Clause"
] | permissive | mdxp/git-hooks | cea923d47dd74cae6a77a88054020f99eb288531 | df4a0a5bec1b8449175e76a153c99b8852e77b42 | refs/heads/master | 2021-01-15T20:52:39.919377 | 2012-02-17T00:42:17 | 2012-02-17T00:42:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,175 | #!/usr/bin/python2.6
#
# Copyright (c) 2012 by Delphix.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# This script verifies that each review associated with a set of bugs is
# well-formed. This comprises the following checks:
#
# 1. There are at least two reviews marked "ship it".
#
# 2. At least one of the approvers is in the specified reviewboard group.
#
# 3. For each user that has commented on a review, make sure that the last
# comment is marked "ship it" -- the exception being reviews from the
# submitter which are ignored.
#
# Examples:
#
# check-approvers gatekeepers 9378 10013
# check-approvers qa-gatekeepers 10234
#
# This script assumes that the bug list has successfully passed 'check-bugs'
# and therefore all bugs are covered by a pending review. If any pending
# review doesn't match the above criteria, an error is printed and a non-zero
# exit status is returned.
#
import sys
import rblib
import rbutils
def check_review(requestid):
ret = 0
request = server.get_review_request(requestid)
submitter = request['links']['submitter']['title']
# Iterate over all reviews and remember the last review by each user.
reviews = server.api_get_list(request['links']['reviews']['href'],
'reviews')
reviewer = {}
for r in reviews:
try:
#
# This will fail if the given user has been disabled. We can
# safely catch and ignore these failures, considering only active
# accounts for reviewers and approvers.
#
user = server.api_get(r['links']['user']['href'])['user']
username = user['username']
if username != submitter:
reviewer[username] = r
except rblib.APIError:
pass
approved = False
for u in reviewer:
r = reviewer[u]
if not r['ship_it']:
print "ERROR: review %d must be approved by '%s'" % \
(request['id'], u)
ret = 1
elif u in approvers:
approved = True
if not approved:
print "ERROR: review %d has not been " % (request['id']) + \
"approved by a gatekeeper"
ret = 1
if len(reviewer) < 2:
print "ERROR: review %d must have at least 2 reviewers" % \
(request['id'])
ret = 1
return ret
# Get the list of reviewboard approvers
server = rblib.ReviewBoardServer()
approvers = {}
try:
group = server.get_group(sys.argv[1])
members = server.api_get_list(group['links']['users']['href'], 'users')
for u in members:
approvers[u['username']] = True
except rblib.APIError:
pass
if not approvers:
print "ERROR: the approvers list is empty"
sys.exit(1)
toprocess = rbutils.get_reviews(sys.argv[2:])
exitstatus = 0
for rid in toprocess.keys():
if check_review(rid) != 0:
exitstatus = 1
sys.exit(exitstatus)
| [
"zubairkhan1@gmail.com"
] | zubairkhan1@gmail.com | |
c32489949c21f1d787027432e415ed51734417f1 | aef92848de596e2477a97e27437438e3ccdca9ae | /languagegames/wsgi.py | 05d3b555d9301e4200d73208a1b34d0f14933fcd | [] | no_license | DeasDeas/lang_games_dev | 5007a5a81751b052d6df0aba9fcc09b6349405fe | bc2e5857f0da24e55c7d1c4fe37e8ee5ea27d14a | refs/heads/master | 2023-04-19T06:12:23.030456 | 2021-05-02T11:28:52 | 2021-05-02T11:28:52 | 348,166,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for languagegames project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'languagegames.settings')
application = get_wsgi_application()
| [
"mikenic30@gmail.com"
] | mikenic30@gmail.com |
6c657d68c1dd56e03c8efa7e27ee1ef0eb9d352a | 574c7a1a8a905b2e5751d4aae48c8f24f354a328 | /mfdw_root/quotes/urls.py | 349cc97da9feed9f564a5e9d059c8c59c42f7fed | [
"Apache-2.0"
] | permissive | mohammadasim/mfdw_project | 45584674bed3988652f3891fd6f775edf9e5f779 | 5e610ebc34b98695083c6e65ff080abc769cb312 | refs/heads/master | 2021-09-28T14:21:18.841893 | 2020-03-01T06:36:30 | 2020-03-01T06:36:30 | 240,715,976 | 2 | 0 | Apache-2.0 | 2021-09-22T18:37:28 | 2020-02-15T13:21:05 | Python | UTF-8 | Python | false | false | 291 | py | from django.urls import path
from . import views
from .views import QuoteList, QuoteView
urlpatterns = [
path('', views.quote_req, name='quote-request'),
path('show/<int:pk>', QuoteView.as_view(), name='quote-details'),
path('show', QuoteList.as_view(), name='show-quotes'),
]
| [
"asimayub81@gmail.com"
] | asimayub81@gmail.com |
e9ff71e25c72bde3be187a7d428d1bc7887ae72e | c3e0f2d7e9a96c30a8ebeeebbad13a45b1ad0410 | /deepcheckpe.py | 78f7b3dcbc2206e59f50778d858b133e4eae375d | [
"MIT"
] | permissive | rahulremanan/deeplearning_AV | 7ea6c3b59c207991434456eadd78662c59394c5a | b4f2a65e445ad1d0f9a6357bd63cea7f2125f210 | refs/heads/master | 2021-01-20T10:18:10.888102 | 2017-05-05T05:59:45 | 2017-05-05T05:59:45 | 90,341,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,764 | py | #! /usr/bin/python2
import pefile
import os
import array
import math
import pickle
from sklearn.externals import joblib
import sys
import argparse
def get_entropy(data):
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[x if isinstance(x, int) else ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
def get_resources(pe):
"""Extract resources :
[entropy, size]"""
resources = []
if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
try:
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
size = resource_lang.data.struct.Size
entropy = get_entropy(data)
resources.append([entropy, size])
except Exception as e:
return resources
return resources
def get_version_info(pe):
"""Return version infos"""
res = {}
for fileinfo in pe.FileInfo:
if fileinfo.Key == 'StringFileInfo':
for st in fileinfo.StringTable:
for entry in st.entries.items():
res[entry[0]] = entry[1]
if fileinfo.Key == 'VarFileInfo':
for var in fileinfo.Var:
res[var.entry.items()[0][0]] = var.entry.items()[0][1]
if hasattr(pe, 'VS_FIXEDFILEINFO'):
res['flags'] = pe.VS_FIXEDFILEINFO.FileFlags
res['os'] = pe.VS_FIXEDFILEINFO.FileOS
res['type'] = pe.VS_FIXEDFILEINFO.FileType
res['file_version'] = pe.VS_FIXEDFILEINFO.FileVersionLS
res['product_version'] = pe.VS_FIXEDFILEINFO.ProductVersionLS
res['signature'] = pe.VS_FIXEDFILEINFO.Signature
res['struct_version'] = pe.VS_FIXEDFILEINFO.StrucVersion
return res
def extract_infos(fpath):
res = {}
pe = pefile.PE(fpath)
res['Machine'] = pe.FILE_HEADER.Machine
res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader
res['Characteristics'] = pe.FILE_HEADER.Characteristics
res['MajorLinkerVersion'] = pe.OPTIONAL_HEADER.MajorLinkerVersion
res['MinorLinkerVersion'] = pe.OPTIONAL_HEADER.MinorLinkerVersion
res['SizeOfCode'] = pe.OPTIONAL_HEADER.SizeOfCode
res['SizeOfInitializedData'] = pe.OPTIONAL_HEADER.SizeOfInitializedData
res['SizeOfUninitializedData'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData
res['AddressOfEntryPoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
res['BaseOfCode'] = pe.OPTIONAL_HEADER.BaseOfCode
try:
res['BaseOfData'] = pe.OPTIONAL_HEADER.BaseOfData
except AttributeError:
res['BaseOfData'] = 0
res['ImageBase'] = pe.OPTIONAL_HEADER.ImageBase
res['SectionAlignment'] = pe.OPTIONAL_HEADER.SectionAlignment
res['FileAlignment'] = pe.OPTIONAL_HEADER.FileAlignment
res['MajorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion
res['MinorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion
res['MajorImageVersion'] = pe.OPTIONAL_HEADER.MajorImageVersion
res['MinorImageVersion'] = pe.OPTIONAL_HEADER.MinorImageVersion
res['MajorSubsystemVersion'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion
res['MinorSubsystemVersion'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion
res['SizeOfImage'] = pe.OPTIONAL_HEADER.SizeOfImage
res['SizeOfHeaders'] = pe.OPTIONAL_HEADER.SizeOfHeaders
res['CheckSum'] = pe.OPTIONAL_HEADER.CheckSum
res['Subsystem'] = pe.OPTIONAL_HEADER.Subsystem
res['DllCharacteristics'] = pe.OPTIONAL_HEADER.DllCharacteristics
res['SizeOfStackReserve'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
res['SizeOfStackCommit'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
res['SizeOfHeapReserve'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
res['SizeOfHeapCommit'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
res['LoaderFlags'] = pe.OPTIONAL_HEADER.LoaderFlags
res['NumberOfRvaAndSizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes
# Sections
res['SectionsNb'] = len(pe.sections)
entropy = map(lambda x:x.get_entropy(), pe.sections)
res['SectionsMeanEntropy'] = sum(entropy)/float(len(entropy))
res['SectionsMinEntropy'] = min(entropy)
res['SectionsMaxEntropy'] = max(entropy)
raw_sizes = map(lambda x:x.SizeOfRawData, pe.sections)
res['SectionsMeanRawsize'] = sum(raw_sizes)/float(len(raw_sizes))
res['SectionsMinRawsize'] = min(raw_sizes)
res['SectionsMaxRawsize'] = max(raw_sizes)
virtual_sizes = map(lambda x:x.Misc_VirtualSize, pe.sections)
res['SectionsMeanVirtualsize'] = sum(virtual_sizes)/float(len(virtual_sizes))
res['SectionsMinVirtualsize'] = min(virtual_sizes)
res['SectionMaxVirtualsize'] = max(virtual_sizes)
#Imports
try:
res['ImportsNbDLL'] = len(pe.DIRECTORY_ENTRY_IMPORT)
imports = sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], [])
res['ImportsNb'] = len(imports)
res['ImportsNbOrdinal'] = len(filter(lambda x:x.name is None, imports))
except AttributeError:
res['ImportsNbDLL'] = 0
res['ImportsNb'] = 0
res['ImportsNbOrdinal'] = 0
#Exports
try:
res['ExportNb'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols)
except AttributeError:
# No export
res['ExportNb'] = 0
#Resources
resources= get_resources(pe)
res['ResourcesNb'] = len(resources)
if len(resources)> 0:
entropy = map(lambda x:x[0], resources)
res['ResourcesMeanEntropy'] = sum(entropy)/float(len(entropy))
res['ResourcesMinEntropy'] = min(entropy)
res['ResourcesMaxEntropy'] = max(entropy)
sizes = map(lambda x:x[1], resources)
res['ResourcesMeanSize'] = sum(sizes)/float(len(sizes))
res['ResourcesMinSize'] = min(sizes)
res['ResourcesMaxSize'] = max(sizes)
else:
res['ResourcesNb'] = 0
res['ResourcesMeanEntropy'] = 0
res['ResourcesMinEntropy'] = 0
res['ResourcesMaxEntropy'] = 0
res['ResourcesMeanSize'] = 0
res['ResourcesMinSize'] = 0
res['ResourcesMaxSize'] = 0
# Load configuration size
try:
res['LoadConfigurationSize'] = pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size
except AttributeError:
res['LoadConfigurationSize'] = 0
# Version configuration size
try:
version_infos = get_version_info(pe)
res['VersionInformationSize'] = len(version_infos.keys())
except AttributeError:
res['VersionInformationSize'] = 0
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Detect malicious files')
parser.add_argument('FILE', help='File to be tested')
args = parser.parse_args()
# Load classifier
clf = joblib.load(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifier/classifier.pkl'
))
data = extract_infos(args.FILE)
pe_features = map(lambda x:data[x], features)
res= clf.predict([pe_features])[0]
print('The file %s is %s' % (
os.path.basename(sys.argv[1]),
['malicious', 'legitimate'][res])
)
| [
"noreply@github.com"
] | noreply@github.com |
c013deed8361d585c85ccfe355c7c1dc549354c4 | e29f9878cbb84f135e95645c93e5a9fbf0ec5f37 | /node_modules/fsevents/build/config.gypi | 349fb041904c4311865b97e3299489870d574cfd | [
"MIT"
] | permissive | ishdaddy7/trip-planner-static | 9d0f1f3b6aa434a9d629e632a98dc3e6505e4550 | 3bcafae4b5ad818890eb371303e36e8522ee36e7 | refs/heads/master | 2021-01-19T05:31:29.252952 | 2016-06-27T20:47:37 | 2016-06-27T20:47:37 | 62,087,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,431 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/shan/.node-gyp/6.1.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/shan/Documents/MediaMath/Personal/Fullstack/trip-planner/node_modules/fsevents/lib/binding/Release/node-v48-darwin-x64/fse.node",
"module_name": "fse",
"module_path": "/Users/shan/Documents/MediaMath/Personal/Fullstack/trip-planner/node_modules/fsevents/lib/binding/Release/node-v48-darwin-x64",
"legacy_bundling": "",
"dry_run": "",
"save_dev": "true",
"viewer": "man",
"only": "",
"browser": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/shan/.nvm/versions/node/v6.1.0/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/Users/shan/.nvm/versions/node/v6.1.0/etc/npmrc",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/shan/.npmrc",
"init_module": "/Users/shan/.npm-init.js",
"user": "450657862",
"node_version": "6.1.0",
"save": "",
"editor": "vi",
"tag": "latest",
"progress": "true",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "1113146471",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/shan/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/3.8.6 node/v6.1.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/lb/f7wjnc351d17cc65msdsjs4hddrzk6/T",
"onload_script": "",
"prefix": "/Users/shan/.nvm/versions/node/v6.1.0",
"link": ""
}
}
| [
"seanhan1@gmail.com"
] | seanhan1@gmail.com |
6dbbd47ba3700bf7e791869bf7e6824c6fdb295c | 03d7dccd9417c1a901ab7e8974216a0c7dce6d5e | /pylti1p3/tool_config/dict.py | fc4b01e39c13d8e015815917afc695dd94d5c7ab | [
"MIT"
] | permissive | ziegenberg/pylti1.3 | 7dbcf83c376b8a50b267f342ef5aa771f6cd7795 | acb10c9b932683593ab6172c648105239f81308c | refs/heads/master | 2022-10-24T22:14:56.458373 | 2020-05-04T17:25:24 | 2020-05-04T17:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,633 | py | from .abstract import ToolConfAbstract
from ..registration import Registration
from ..deployment import Deployment
class ToolConfDict(ToolConfAbstract):
_config = None
_private_key = None
_public_key = None
def __init__(self, json_data):
"""
json_data is a dict where each key is issuer and value is issuer's configuration.
Configuration could be set in two formats:
1. { ... "iss": { ... "client_id: "client" ... }, ... }
In this case the library will work in the concept: one issuer ~ one client-id
2. { ... "iss": [ { ... "client_id: "client1" ... }, { ... "client_id: "client2" ... } ], ... }
In this case the library will work in concept: one issuer ~ many client-ids
Example:
{
"iss1": [{
"default": True,
"client_id": "client_id1",
"auth_login_url": "auth_login_url1",
"auth_token_url": "auth_token_url1",
"auth_audience": None,
"key_set_url": "key_set_url1",
"key_set": None,
"deployment_ids": ["deployment_id1", "deployment_id2"]
}, {
"default": False,
"client_id": "client_id2",
"auth_login_url": "auth_login_url2",
"auth_token_url": "auth_token_url2",
"auth_audience": None,
"key_set_url": "key_set_url2",
"key_set": None,
"deployment_ids": ["deployment_id3", "deployment_id4"]
}],
"iss2": [ .... ]
}
default (bool) - this iss config will be used in case if client-id was not passed on the login step
client_id - this is the id received in the 'aud' during a launch
auth_login_url - the platform's OIDC login endpoint
auth_token_url - the platform's service authorization endpoint
auth_audience - the platform's OAuth2 Audience (aud). Is used to get platform's access token,
Usually the same as "auth_token_url" but in the common case could be a different url
key_set_url - the platform's JWKS endpoint
key_set - in case if platform's JWKS endpoint somehow unavailable you may paste JWKS here
deployment_ids (list) - The deployment_id passed by the platform during launch
"""
super(ToolConfDict, self).__init__()
if not isinstance(json_data, dict):
raise Exception("Invalid tool conf format. Must be dict")
for iss, iss_conf in json_data.items():
if isinstance(iss_conf, dict):
self.set_iss_has_one_client(iss)
self._validate_iss_config_item(iss, iss_conf)
elif isinstance(iss_conf, list):
self.set_iss_has_many_clients(iss)
for v in iss_conf:
self._validate_iss_config_item(iss, v)
else:
raise Exception("Invalid tool conf format. Allowed types of elements: list or dict")
self._config = json_data
self._private_key = {}
self._public_key = {}
def _validate_iss_config_item(self, iss, iss_config_item):
if not isinstance(iss_config_item, dict):
raise Exception("Invalid configuration %s for the %s issuer. Must be dict" % (iss, str(iss_config_item)))
required_keys = ['auth_login_url', 'auth_token_url', 'client_id', 'deployment_ids']
for key in required_keys:
if key not in iss_config_item:
raise Exception("Key '%s' is missing in the %s config for the %s issuer"
% (key, str(iss_config_item), iss))
if not isinstance(iss_config_item['deployment_ids'], list):
raise Exception("Invalid deployment_ids value in the %s config for the %s issuer. Must be a list"
% (str(iss_config_item), iss))
def _get_registration(self, iss, iss_conf):
reg = Registration()
reg.set_auth_login_url(iss_conf['auth_login_url'])\
.set_auth_token_url(iss_conf['auth_token_url'])\
.set_client_id(iss_conf['client_id'])\
.set_key_set(iss_conf.get('key_set'))\
.set_key_set_url(iss_conf.get('key_set_url'))\
.set_issuer(iss)\
.set_tool_private_key(self.get_private_key(iss, iss_conf['client_id']))
auth_audience = iss_conf.get('auth_audience')
if auth_audience:
reg.set_auth_audience(auth_audience)
public_key = self.get_public_key(iss, iss_conf['client_id'])
if public_key:
reg.set_tool_public_key(public_key)
return reg
def _get_deployment(self, iss_conf, deployment_id):
if deployment_id not in iss_conf['deployment_ids']:
return None
d = Deployment()
return d.set_deployment_id(deployment_id)
def find_registration_by_issuer(self, iss, *args, **kwargs):
iss_conf = self.get_iss_config(iss)
return self._get_registration(iss, iss_conf)
def find_registration_by_params(self, iss, client_id, *args, **kwargs):
iss_conf = self.get_iss_config(iss, client_id)
return self._get_registration(iss, iss_conf)
def find_deployment(self, iss, deployment_id):
iss_conf = self.get_iss_config(iss)
return self._get_deployment(iss_conf, deployment_id)
def find_deployment_by_params(self, iss, deployment_id, client_id, *args, **kwargs):
iss_conf = self.get_iss_config(iss, client_id)
return self._get_deployment(iss_conf, deployment_id)
def set_public_key(self, iss, key_content, client_id=None):
if self.check_iss_has_many_clients(iss):
if not client_id:
raise Exception("Can't set public key: missing client_id")
if iss not in self._public_key:
self._public_key[iss] = {}
self._public_key[iss][client_id] = key_content
else:
self._public_key[iss] = key_content
def get_public_key(self, iss, client_id=None):
if self.check_iss_has_many_clients(iss):
if not client_id:
raise Exception("Can't get public key: missing client_id")
return self._public_key.get(iss, {}).get(client_id)
else:
return self._public_key.get(iss)
def set_private_key(self, iss, key_content, client_id=None):
if self.check_iss_has_many_clients(iss):
if not client_id:
raise Exception("Can't set private key: missing client_id")
if iss not in self._private_key:
self._private_key[iss] = {}
self._private_key[iss][client_id] = key_content
else:
self._private_key[iss] = key_content
def get_private_key(self, iss, client_id=None):
if self.check_iss_has_many_clients(iss):
if not client_id:
raise Exception("Can't get private key: missing client_id")
return self._private_key.get(iss, {}).get(client_id)
else:
return self._private_key.get(iss)
def get_iss_config(self, iss, client_id=None):
if iss not in self._config:
raise Exception('iss %s not found in settings' % iss)
if isinstance(self._config[iss], list):
items_len = len(self._config[iss])
for subitem in self._config[iss]:
if (client_id and subitem['client_id'] == client_id)\
or (not client_id and subitem.get('default', False))\
or (not client_id and items_len == 1):
return subitem
raise Exception('iss %s [client_id=%s] not found in settings' % (iss, client_id))
return self._config[iss]
def get_jwks(self, iss=None, client_id=None, **kwargs):
if iss or client_id:
return super(ToolConfDict, self).get_jwks(iss, client_id)
public_keys = []
for iss_item in self._public_key.values():
if isinstance(iss_item, dict):
for pub_key in iss_item.values():
if pub_key not in public_keys:
public_keys.append(pub_key)
else:
if iss_item not in public_keys:
public_keys.append(iss_item)
return {
'keys': [Registration.get_jwk(k) for k in public_keys]
}
| [
"dmitry.viskov@webenterprise.ru"
] | dmitry.viskov@webenterprise.ru |
07ddacbb56954526dfc54b49ec898b630b576e55 | 2d4b7280fac70fd922dc203f07d89241f3c21535 | /src/cloudify/aria_extension_cloudify/classic_modeling/policies.py | 238124b82d67385d4d632d119bc37bfe4460d753 | [
"Apache-2.0"
] | permissive | tliron/aria-ng | 602f0cad18df0332e25be03cc834e7a42cb7c674 | 55cf7af3b0a8fe62d422dd687dd7da3849824524 | refs/heads/master | 2020-08-02T18:10:30.735677 | 2016-11-12T14:19:32 | 2016-11-14T18:23:57 | 73,556,794 | 0 | 2 | null | 2016-11-12T14:17:45 | 2016-11-12T14:17:45 | null | UTF-8 | Python | false | false | 676 | py | #
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
SCALING_POLICY_NAME = 'cloudify.policies.scaling'
| [
"tal.liron@gmail.com"
] | tal.liron@gmail.com |
3f1212e0ae34cf0e0cbc7f81b7c67f782074169c | 8597d43d7990e1b93cda47646e7abffe565f9bbd | /constants.py | 60d81a9cac2ea8f4b15ac897e00fcd4cb0a69c7f | [
"Apache-2.0"
] | permissive | tkim949/REST_API_GCP | a34c55b3a43bc0ecd3f01f3b36fe2247c924df53 | a103af6d7a50484a03e12dfe8d4949ead4fce714 | refs/heads/master | 2023-05-11T02:52:29.728280 | 2020-06-16T02:27:28 | 2020-06-16T02:27:28 | 272,573,382 | 0 | 0 | Apache-2.0 | 2023-05-01T21:41:49 | 2020-06-16T00:36:24 | Python | UTF-8 | Python | false | false | 49 | py | shelf = "shelf"
product = "product"
user = "user" | [
"noreply@github.com"
] | noreply@github.com |
1c016fb55113529b8bfed2ec15edfaa96e64c745 | 7918d00a8235c1252fdd75e2db6de4d323740318 | /env/bin/easy_install-2.7 | 7a324fbf1f470f876325975cfe88a987d9231b97 | [] | no_license | tom-sb/Mayorista | 92e1a68402a35785f355fe11ad935071cc7264d3 | 635f26aae073da42ea58d3f056959a35e08a4bd3 | refs/heads/master | 2020-11-24T16:46:46.271448 | 2019-12-19T11:14:27 | 2019-12-19T11:14:27 | 228,254,669 | 0 | 2 | null | 2019-12-19T11:14:29 | 2019-12-15T21:22:34 | CSS | UTF-8 | Python | false | false | 285 | 7 | #!/home/tomsb/Escritorio/ing_soft2/IS2CS_PY/Mayorista/env/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"fvillanuevanut@gmail.com"
] | fvillanuevanut@gmail.com |
8be08dd6874505f2e4e7c8249bc1f549985c0504 | e19cbb9869a79fa1af1063a9291cd5624a5edd6f | /Stack:Queue/프린터.py | 67532af370e4dbce2618ecf2c5c7846e503ed09f | [] | no_license | YNNJN/Programmers | c62194163a085231cd22c946c320a2937d69e478 | 434a62a91a1dc5e16b32f4e64c77fd15776d51e1 | refs/heads/master | 2023-02-08T11:14:15.399122 | 2021-01-03T13:27:17 | 2021-01-03T13:27:17 | 256,219,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | #M1 인쇄 목록과 인덱스를 함께 기억 -> enumerate()
#M2 어차피 인쇄되는 건 우선순위가 가장 큰 값이니, 해당 값을 기준으로 조건을 분기
def solution(priorities, location):
ans = 0
m = max(priorities)
while True:
v = priorities.pop(0)
if m == v:
ans += 1
if location == 0:
break
else:
location -= 1
m = max(priorities)
else:
priorities.append(v)
if location == 0:
location = len(priorities) - 1
else:
location -= 1
return ans
| [
"dbswls1014@naver.com"
] | dbswls1014@naver.com |
2c07153d4d45ae259056bf66322d03559d0835bf | 38bf7319caa9033f5a7a432d2fe3914692e3e17d | /tools/viz/check_group.py | 997805dcc3a21576131ab019fa82bf4be5f466f9 | [
"MIT"
] | permissive | XinGuoZJU/gc-horizon-detector | 6e9fe7893637b9267cb0a9f32348736fa722fd07 | c41f5606aca756281fb518987652403aec7d6ed3 | refs/heads/master | 2020-07-02T21:02:19.133389 | 2019-11-16T03:36:30 | 2019-11-16T03:36:30 | 201,664,806 | 0 | 0 | null | 2019-08-10T17:54:58 | 2019-08-10T17:54:58 | null | UTF-8 | Python | false | false | 2,512 | py | import os
import json
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
def imshow(im):
plt.close()
sizes = im.shape
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width / height, 1, forward=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
plt.xlim([-0.5, sizes[1] - 0.5])
plt.ylim([sizes[0] - 0.5, -0.5])
plt.imshow(im)
if __name__ == '__main__':
data_name = 'SUNCG' # 'YUD', 'ScanNet', 'SceneCityUrban3D', 'SUNCG'
if data_name == 'YUD':
image_path = '/n/fs/vl/xg5/Datasets/YUD/YorkUrbanDB'
elif data_name == 'ScanNet':
image_path = '/n/fs/vl/xg5/Datasets/ScanNet/scannet-vp'
elif data_name == 'SceneCityUrban3D':
image_path = '/n/fs/vl/xg5/Datasets/SceneCityUrban3D/su3'
elif data_name == 'SUNCG':
image_path = '/n/fs/vl/xg5/Datasets/SUNCG/mlt_v2'
org_path = '/n/fs/vl/xg5/workspace/baseline/gc_horizon_detector/dataset/' + data_name + '/data/data.json'
save_path = '/n/fs/vl/xg5/workspace/baseline/gc_horizon_detector/dataset/' + data_name + '/viz_group'
with open(org_path, 'r') as f:
org_lines = f.readlines()
for num, context in enumerate(org_lines):
print(num)
data_dict = json.loads(context)
group = np.array(data_dict['group']).tolist()
org_line = np.array(data_dict['org_line']).tolist()
image_dir = data_dict['image_path']
image_name = os.path.join(image_path, image_dir)
image = io.imread(image_name).astype(float) / 255
img_dir = image_name.split('/')[-2]
savepath = os.path.join(save_path, img_dir)
os.makedirs(savepath, exist_ok=True)
save_name = os.path.join(save_path, image_dir)
color_list = ['y', 'b', 'm', 'k', 'r', 'c', 'g', 'w']
# draw
imshow(image)
for i in range(len(org_line)):
g = int(group[i])
if g == -1: # it is not necessary in this code
color = 'k--'
else:
color = color_list[g]
a, b = org_line[i]
plt.plot([a[1], b[1]], [a[0], b[0]], color, linewidth=0.5)
plt.scatter(a[1], a[0], c='#33FFFF', s=1.2)
plt.scatter(b[1], b[0], c='#33FFFF', s=1.2)
plt.savefig(save_name, dpi=500, bbox_inches=0)
| [
"xg5@head.ionic.cs.princeton.edu"
] | xg5@head.ionic.cs.princeton.edu |
a8a2f2d6780286dc62bb90f963f992dd1007ffe1 | ea19fbe94b9a3e54a02f52408409f98073923e06 | /manage.py | 153d0d1866b1be07e0cabe53a0e15a2d40902b42 | [] | no_license | i-sharmashubham/slambook | 415fef1dd3a4f65c7ff4db43c475229a0132a50e | 2a6f356e39d991ae0066857029b6f28926a0abcb | refs/heads/master | 2020-08-08T01:28:22.121288 | 2019-10-08T14:03:51 | 2019-10-08T14:03:51 | 213,657,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'slambook.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
621e29b29dc1192e480ec4fab3f0a6e5b0c78b37 | 4e214e3bd8cc413753603701a259416ced37aa25 | /debris/20170630.py | 6e0e3bfcde38ba991fc5788fad2d9140fb82f03b | [] | no_license | repose1019/MST | 149ebaa85cb985fbf9c394f4936b78d42f5f4b73 | 98b0b573939f84dc8c2ec1bae927b92e41a89ddc | refs/heads/master | 2020-12-02T16:16:46.253342 | 2017-08-11T10:43:27 | 2017-08-11T10:43:27 | 96,528,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | # -*-coding:utf-8-*-
# @author: 'Repose'
# @date: 2017/6/30
import numpy as np
import pandas as pd
data = np.random.randn(4,4)
print(data)
print(len(data)) | [
"846776066@qq.com"
] | 846776066@qq.com |
80f796be803c6cbe9307785b3beaf103fdaf5177 | 52266a44e2aca241707984e3b138775681b3e95f | /一本册子/字符串.py | 739ef5576c270d031768b4e1d83d68f15064ac44 | [] | no_license | Mr-hongji/pythonNote | 91b1252711ce0b919fc365932276b89d85d4c16b | ff5eda0c8f63345de4d98cff8f0f7ab5254c77a6 | refs/heads/master | 2020-04-11T14:49:39.637983 | 2019-05-26T09:21:09 | 2019-05-26T09:21:09 | 161,869,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | print 'hello'
print "I'm Shihongji"
'''
\被称作转译字符,除了用来表示引号,还有比如用
\\表示字符串中的\
\n表示字符串中的换行
'''
print 'I\'m a \"good\" people'
print 'I\'m a "good" people'
print '我是良民\\'
print '我是良民\n吗'
'''
作业
输出以下文字
1、He said, "I'm yours!"
2、\\_v_//
3、Stay hungry,
stay foolish.
---Steve Jobs
4、 *
***
****
***
*
'''
print 'He said, "I\'m yours!\"'
print "\\\\_v_//"
print "Stay hunngry,\nstay foolish.\n -- Steve Jobs"
print '*\n***\n****\n***\n*'
| [
"shihongji@xiaoneng.cn"
] | shihongji@xiaoneng.cn |
26f644c66a8b92892987b70efed6d22aee3270b8 | 6160586aa239eada16e735d40d57970dedbe1dfc | /modules/app_additional/app_custom/app_position_update_info.py | b2724f015a7037a4d90534964a519bb0702c5061 | [] | no_license | showgea/AIOT | 7f9ffcd49da54836714b3342232cdba330d11e6c | fe8275aba1c4b5402c7c2c2987509c0ecf49f330 | refs/heads/master | 2020-07-23T10:19:37.478456 | 2019-09-23T12:25:59 | 2019-09-23T12:25:59 | 207,525,184 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | import requests
from config import readcfg
header_Gary = readcfg.header_Gary
header_Jenny = readcfg.header_Jenny
url = readcfg.url
def app_position_update_info(positionId, positionName=None, isDefault=None):
url_ = url + "/app/v1.0/lumi/app/position/update/info"
json_ = {
"positionId": positionId,
"positionName": positionName,
"isDefault": isDefault
}
list_ = ["positionId", "positionName", "isDefault"]
num = 0
for i in (positionId, positionName, isDefault):
if i is None:
json_.pop(list_[num])
num += 1
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'}
print("请求数据:%s" % json_)
r = requests.post(url=url_, json=json_, headers=header_Gary, proxies=proxies, verify=False)
return r
if __name__ == '__main__':
result_main = app_position_update_info("real2.615945282455937024")
print(result_main.text)
| [
"tangguobing2011@163.com"
] | tangguobing2011@163.com |
a1c45b2d5e7c3c0c8691e443597d313e63999089 | dcae274bf977460d90cde490df9cc9254ae0070e | /holon/settings.py | ebc0a93616142a4025a8d8181d85b005c1f77fe3 | [] | no_license | muneson/holon | 78cd68ac337944ecff7ae5dfb71c0c21b7522b46 | 9db28ed25aa8828a44492e1b190ef5eea6d43a83 | refs/heads/master | 2021-01-24T15:53:32.219254 | 2013-04-27T10:16:02 | 2013-04-27T10:16:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,747 | py | # -*- coding: utf-8 -*-
import os
gettext = lambda s: s
#Dubble dirnames due to project dir
PROJECT_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# Django settings for holon project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Eirk Günther', 'egu@mensa.se'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'holon.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Stockholm'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'sv'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_ROOT = os.path.join(PROJECT_PATH, "media")
MEDIA_URL = "/media/"
STATIC_ROOT = os.path.join(PROJECT_PATH, "static")
STATIC_URL = "/static/"
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a$!)li(-9x6l3qo!qa@xqxl&(p*3m3b)-8s5g730w8mdy5)4y6'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'holon.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'holon.wsgi.application'
TEMPLATE_DIRS = (
# The docs say it should be absolute path: PROJECT_PATH is precisely one.
# Life is wonderful!
os.path.join(PROJECT_PATH, "templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Django Admin
'django.contrib.admin',
'django.contrib.admindocs',
#Django CMS
'cms',
'mptt',
'menus',
'south',
'sekizai',
#Django CMS Plugins
'filer',
'easy_thumbnails',
'cmsplugin_filer_file',
'cmsplugin_filer_folder',
'cmsplugin_filer_image',
'cmsplugin_filer_teaser',
'cmsplugin_filer_video',
'cms.plugins.googlemap',
'cms.plugins.link',
'cms.plugins.snippet',
'cms.plugins.text',
'cms.plugins.twitter',
#Django Reversion on all CMS docs
'reversion',
#Development features
'django_extensions',
'debug_toolbar'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#Django CMS Settings
CMS_TEMPLATES = (
('template_1.html', 'Template One'),
('template_2.html', 'Template Two'),
)
LANGUAGES = [
('sv', 'Svenska'),
('en', 'English'),
]
#DEBUG Toolbar
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.profiling.ProfilingDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.cache.CacheDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'HIDE_DJANGO_SQL': False,
'TAG': 'div',
'ENABLE_STACKTRACES' : True,
}
| [
"erik.gunther@gmail.com"
] | erik.gunther@gmail.com |
95a65891632e1c526dfe49cd5b082b05a23fb3a0 | d80173b86be50c7b8c8dec265bfe4e9b66575f7c | /objects.py | 80305cbd3829557b6a79312bc8f6e6372c6c0d8e | [] | no_license | Pk13055/bomberman | 11450bb673ab1ffbb827d9dddeac3583742ce7e5 | 5e4d4413f9572e520de5604174123393f4463e86 | refs/heads/master | 2021-01-19T06:02:39.774474 | 2017-10-20T14:08:16 | 2017-10-20T14:08:16 | 100,589,676 | 6 | 4 | null | 2017-10-20T14:08:17 | 2017-08-17T10:03:18 | Python | UTF-8 | Python | false | false | 3,723 | py | '''
contains the structure of each object
'''
import config
from config import x_fac, y_fac
import numpy as np
class Object:
'''# bombs, walls, bricks all will be of this type'''
def __init__(self, x, y, ch=config._empty):
'''# the x and y coords wrt top left of board'''
self._x = x
self._y = y
self.width = 4
self.height = 2
self.is_killable = False
self._ch = ch
self.structure = np.chararray((self.height, self.width))
self.structure[:, :] = self._ch
self._type = config.types[self._ch]
def get_type(self):
'''# returns whether "Bomber", "Enemy", etc'''
return self._type
def get_size(self):
'''# returns (height, willdth)'''
return self.structure.shape
def get_coords(self):
'''# returns (x, y)'''
return (self._x, self._y)
def update_location(self, board, new_x, new_y, init=False):
'''# update the location of the person'''
if board.draw_obj(type(self)(new_x, new_y)):
# if initial update, will not clear original
if not init:
board.clear_obj(self)
self._x, self._y = new_x, new_y
return True
return False
class Wall(Object):
'''# this is the repr of the wall object
it implements no methods and some data about each wall element'''
def __init__(self, n, m):
'''# preferred size = 2 x 4'''
super(Wall, self).__init__(n, m, config._wall)
self.height = int(m)
self.width = int(n)
def __repr__(self):
''' repr '''
for r in range(self.height):
print("\n")
for c in range(self.width):
try:
print(self.structure[r, c].decode(), end="")
except UnicodeDecodeError:
print(self.structure[r, c], end="")
return ""
class Bomb(Object):
'''# this class implements the bomb object'''
def __init__(self, x, y):
''' init '''
super(Bomb, self).__init__(x, y, config._bomb)
self.timer = 0
self.active = False
self.is_killable = True
self.structure[:, :] = np.matrix([['[', self._ch, self._ch, ']'],
['[', self._ch, self._ch, ']']])
self.blast_radius = [(x + 1 * x_fac, y), (x + 2 * x_fac, y),
(x - 1 * x_fac, y), (x - 2 * x_fac, y), (x,
y + 1 * y_fac), (x, y + 2 * y_fac),
(x, y - 1 * y_fac), (x, y - 2 * y_fac)]
self.owner = None
def detonate(self, time):
'''# begin detonating the bomb (happens one frame after)'''
self.active = True
self.timer = time
def countdown(self):
''' countdown the bomb when active '''
if self.active:
self.timer -= 1
self.structure[:, 1:3] = str(self.timer)
return True
if not self.timer:
self.structure[:, :] = config._expl
def __repr__(self):
''' repr '''
return "<Bomb (%d, %d) | Active : %s | %d frames left>" % \
(self._x, self._y, self.active, self.timer)
class Bricks(Object):
'''# this class implements the bricks Object'''
def __init__(self, x, y):
''' init '''
super(Bricks, self).__init__(x, y, config._bricks)
self.is_killable = True
self.structure[:, :] = self._ch
def __repr__(self):
''' repr '''
return "<Bomb (%d, %d) | Active : %s | %d frames left>" % \
(self._x, self._y, self.active, self.timer)
| [
"pkrockstar7@gmail.com"
] | pkrockstar7@gmail.com |
cea3add153f23a7f8860918d7802bf80c33719cf | 77dcbefdeabf554a61c0c20882e9f1f34f954132 | /venv/Scripts/easy_install-script.py | 0bed0bdf5a6df24a4e608cd3af96cc0a5dc203e8 | [] | no_license | JacobDuvall/the-summarizer | 49c1976b044017c6cd641a30107da5c1479b6df0 | 4dddeba2b1110da590bb6070f59ec622cc16fd4b | refs/heads/master | 2022-07-01T08:26:31.675468 | 2020-05-13T17:53:45 | 2020-05-13T17:53:45 | 258,939,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #!"C:\Users\jdale\OneDrive\School\Text Analytics\project_2\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"jdaleduvall@gmail.com"
] | jdaleduvall@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.