blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
636287a026b036c4db22cc1f8fbad2a93d1e3f6b
|
90e39e45d469bb5dd9cb36805a88c97f41c147de
|
/2-do_deploy_web_static.py
|
5fc6c711832e5b59a00fa0831b049af1e986aac4
|
[] |
no_license
|
Noeuclides/AirBnB_clone_v2
|
372b3d01ba76d41a79dca166d6ca7d471749a07d
|
13fac5127af0149e7bef9a94b70e6d2746eeb4fd
|
refs/heads/master
| 2020-07-03T19:16:10.404783
| 2019-09-11T00:42:29
| 2019-09-11T00:42:29
| 202,020,044
| 0
| 2
| null | 2019-08-19T03:08:39
| 2019-08-12T22:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
#!/usr/bin/python3
from datetime import datetime
from fabric.api import *
from os import path
'''automatize with fabric
'''
'''env.user = 'localhost'
'''
env.hosts = ['35.231.53.89', '35.190.176.186']
def do_pack():
'''making a pack on web_static folder
'''
now = datetime.now()
file = 'web_static_' + now.strftime("%Y%m%d%H%M%S") + '.' + 'tgz'
local("mkdir -p versions")
check = local("tar -cvzf versions/{} web_static".format(file))
if check is not None:
return file
else:
return None
def do_deploy(archive_path):
'''distribute an archive to web servers
'''
print(archive_path)
print(str(path.exists(archive_path)))
if str(path.exists(archive_path)) is False:
return False
oper = []
file = archive_path.split("/")
oper.append(put(archive_path, '/tmp'))
folder = file[1].split('.')
print(folder[0])
oper.append(
run("mkdir -p /data/web_static/releases/{}".format(
folder[0])))
oper.append(run(
"tar -xzf /tmp/{file} -C /data/web_static/releases/{}".format(
file[1], folder[0])))
oper.append(run("rm /tmp/{}".format(file[1])))
oper.append(run("mv /data/web_static/releases/{0}/web_static/* /data/web_static/releases/{0}".format(
folder[0])))
oper.append(run(
"rm -rf /data/web_static/releases/{}/web_static".format(
folder[0])))
oper.append(run("rm -rf /data/web_static/current"))
oper.append(run(
"ln -s /data/web_static/releases/{}/ /data/web_static/current".format(
folder[0])))
print(oper)
for op in oper:
if op is False:
return False
return True
|
[
"euclidesnoeuclides@gmail.com"
] |
euclidesnoeuclides@gmail.com
|
6af689639ddfcb358242510a287fa6c89aca2e3a
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/LArCalorimeter/LArCalibTools/share/LArMCConditions2Ntuple.py
|
1a35ffa4835cfb273b6320e18243c2bfdc57f847
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,483
|
py
|
import AthenaCommon.AtlasUnixGeneratorJob #use MC event selector
from string import split,join
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
#Input Parameters:
# PoolFiles: sequence of pool files to read from though CondProxyProvider
# if not given, read from COOL
#
# RunNumber: Input to COOL IOV-DB if reading from
#
# RootFile: root file for the ntuple
#
# Objects: List of objects written to ntuple (PEDESTAL OFC, RAMP,
DBTag='OFLCOND-SDR-BS14T-IBL-06'
if not 'InputDB' in dir():
InputDB="COOLOFL_LAR/OFLP200"
if not "OFCFolder" in dir():
OFCFolder="5samples1phase"
if not 'RunNumber' in dir():
RunNumber=2147483647
if not "RootFile" in dir():
RootFile="LArConditions.root"
if not "Objects" in dir():
Objects=["PEDESTAL","RAMP","OFC","MPHYSOVERMCAL","SHAPE","UA2MEV"]
if not "DBTag" in dir():
DBTag="LARCALIB-000-01"
def doObj(objName):
for o in Objects:
if o.upper().find(objName.upper())!=-1:
return True
return False
def getDBFolderAndTag(folder):
if "TagSuffix" in dir():
tag="<tag>"+join(split(folder, '/'),'') + TagSuffix+"</tag>"
else:
tag=""
return "<db>"+InputDB+"</db>"+folder+tag
from AthenaCommon.GlobalFlags import globalflags
globalflags.DataSource="geant4"
globalflags.InputFormat="pool"
from AthenaCommon.JobProperties import jobproperties
jobproperties.Global.DetDescrVersion = "ATLAS-GEO-18-01-03"
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOff()
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
#Set up GeoModel (not really needed but crashes without)
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
#Get identifier mapping (needed by LArConditionsContainer)
svcMgr.IOVDbSvc.GlobalTag=DBTag
include( "LArConditionsCommon/LArIdMap_comm_jobOptions.py" )
theApp.EvtMax = 1
svcMgr.EventSelector.RunNumber = RunNumber
conddb.addFolder("","<db>COOLOFL_LAR/OFLP200</db>/LAR/BadChannels/BadChannels<key>/LAR/BadChannels/BadChannels</key>")
conddb.addFolder("","<db>COOLOFL_LAR/OFLP200</db>/LAR/BadChannels/MissingFEBs<key>/LAR/BadChannels/MissingFEBs</key>")
conddb.addOverride('/LAR/Identifier/FebRodAtlas','FebRodAtlas-005')
conddb.addOverride('/LAR/Identifier/OnOffIdAtlas','OnOffIdAtlas-012')
if 'PoolFiles' in dir():
from AthenaCommon.ConfigurableDb import getConfigurable
from AthenaCommon.AppMgr import ServiceMgr
ServiceMgr.ProxyProviderSvc.ProviderNames += [ "CondProxyProvider" ]
ServiceMgr += getConfigurable( "CondProxyProvider" )()
svcMgr.CondProxyProvider.InputCollections=PoolFiles
if 'PoolCat' in dir():
svcMgr.PoolSvc.ReadCatalog+=["xmlcatalog_file:"+PoolCat]
loadCastorCat=False
if doObj("PEDESTAL"):
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/Pedestal"))
from LArCalibTools.LArCalibToolsConf import LArPedestals2Ntuple
LArPedestals2Ntuple=LArPedestals2Ntuple("LArPedestals2Ntuple")
LArPedestals2Ntuple.AddFEBTempInfo=False
topSequence+=LArPedestals2Ntuple
if doObj("AUTOCORR"):
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibOfl/AutoCorrs/AutoCorr"))
from LArCalibTools.LArCalibToolsConf import LArAutoCorr2Ntuple
LArAutoCorr2Ntuple=LArAutoCorr2Ntuple("LArAutoCorr2Ntuple")
LArAutoCorr2Ntuple.AddFEBTempInfo=False
topSequence+=LArAutoCorr2Ntuple
if doObj("OFC"):
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/HVScaleCorr"))
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/Noise"))
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/AutoCorr"))
from LArRecUtils.LArADC2MeVToolDefault import LArADC2MeVToolDefault
from LArRecUtils.LArAutoCorrNoiseToolDefault import LArAutoCorrNoiseToolDefault
theLArADC2MeVToolDefault = LArADC2MeVToolDefault()
ToolSvc += theLArADC2MeVToolDefault
theLArAutoCorrNoiseToolDefault = LArAutoCorrNoiseToolDefault()
theLArAutoCorrNoiseToolDefault.NSamples = 5
ToolSvc += theLArAutoCorrNoiseToolDefault
from LArRecUtils.LArOFCToolDefault import LArOFCToolDefault
theOFCTool = LArOFCToolDefault()
theOFCTool.Dump=True
ToolSvc += theOFCTool
from LArCalibTools.LArCalibToolsConf import LArOFC2Ntuple
LArOFC2Ntuple = LArOFC2Ntuple("LArOFC2Ntuple")
LArOFC2Ntuple.ContainerKey = "LArOFC"
LArOFC2Ntuple.AddFEBTempInfo=False
LArOFC2Ntuple.IsMC = True
LArOFC2Ntuple.OFCTool = theOFCTool
topSequence+=LArOFC2Ntuple
if (doObj("SHAPE")):
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/Shape"))
from LArCalibTools.LArCalibToolsConf import LArShape2Ntuple
LArShape2Ntuple = LArShape2Ntuple("LArShape2Ntuple")
LArShape2Ntuple.ContainerKey = "LArShape"
LArShape2Ntuple.AddFEBTempInfo=False
LArShape2Ntuple.IsMC = True
topSequence+=LArShape2Ntuple
if doObj("RAMP"):
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/Ramp"))
from LArCalibTools.LArCalibToolsConf import LArRamps2Ntuple
LArRamps2Ntuple=LArRamps2Ntuple("LArRamps2Ntuple")
LArRamps2Ntuple.NtupleName = "RAMPS"
LArRamps2Ntuple.RawRamp = False
LArRamps2Ntuple.IsMC = True
LArRamps2Ntuple.AddFEBTempInfo=False
topSequence+=LArRamps2Ntuple
if (doObj("UA2MEV")):
print 'DAC2uA check : ',getDBFolderAndTag("/LAR/ElecCalibMC/DAC2uA")
print 'uA2MeV check : ',getDBFolderAndTag("/LAR/ElecCalibMC/uA2MeV")
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/DAC2uA"))
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/uA2MeV"))
from LArCalibTools.LArCalibToolsConf import LAruA2MeV2Ntuple
LAruA2MeV2Ntuple=LAruA2MeV2Ntuple("LAruA2MeV2Ntuple")
LAruA2MeV2Ntuple.AddFEBTempInfo=False
topSequence+=LAruA2MeV2Ntuple
if (doObj("MPHYSOVERMCAL")):
conddb.addFolder("",getDBFolderAndTag("/LAR/ElecCalibMC/MphysOverMcal"))
from LArCalibTools.LArCalibToolsConf import LArMphysOverMcal2Ntuple
LArMphysOverMcal2Ntuple=LArMphysOverMcal2Ntuple("LArMphysOverMcal2Ntuple")
LArMphysOverMcal2Ntuple.AddFEBTempInfo=False
LArMphysOverMcal2Ntuple.IsMC=True
topSequence+=LArMphysOverMcal2Ntuple
if loadCastorCat:
svcMgr.PoolSvc.ReadCatalog += ['xmlcatalog_file:'+'/afs/cern.ch/atlas/conditions/poolcond/catalogue/poolcond/PoolCat_comcond_castor.xml']
theApp.HistogramPersistency = "ROOT"
from GaudiSvc.GaudiSvcConf import NTupleSvc
svcMgr += NTupleSvc()
svcMgr.NTupleSvc.Output = [ "FILE1 DATAFILE='"+RootFile+"' OPT='NEW'" ]
svcMgr.MessageSvc.OutputLevel = DEBUG
svcMgr.IOVDbSvc.DBInstance="OFLP200"
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
b968c173c17ee7e0970eab6d6e3ee7ba4a7e8ab9
|
ca77e9e45d666771c7b0897e7e3093b3d3c12f65
|
/random_scripts/update_costs/update_wo_costs.py
|
20ad1cd9856990dbe5c11c2b9e38c6a9d2275cf5
|
[] |
no_license
|
2gDigitalPost/custom
|
46175d3a3fc4c3be21dc20203ff0a48fb93b5639
|
6a3a804ef4ef6178044b70ad1e4bc5c56ab42d8d
|
refs/heads/master
| 2020-04-04T07:40:17.962611
| 2016-12-28T18:35:28
| 2016-12-28T18:35:28
| 39,648,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,309
|
py
|
import os, sys, math, hashlib, getopt, tacticenv, time
def kill_mul_spaces(origstrg):
newstrg = ''
for word in origstrg.split():
newstrg=newstrg+' '+word
return newstrg
def make_data_dict(file_name):
the_file = open(file_name, 'r')
fields = []
data_dict = {}
count = 0
for line in the_file:
line = line.rstrip('\r\n')
data = line.split('\t')
if count == 0:
for field in data:
field = kill_mul_spaces(field)
field = field.strip(' ')
fields.append(field)
else:
data_count = 0
this_code = ''
for val in data:
val = kill_mul_spaces(val)
val = val.strip(' ')
if data_count == 0:
data_dict[val] = {}
this_code = val
else:
data_dict[this_code][fields[data_count]] = val
data_count = data_count + 1
count = count + 1
the_file.close()
print "FIELDS = %s" % fields
return data_dict
opts, work_order_file = getopt.getopt(sys.argv[1], '-m')
print "work_order_file = %s" % work_order_file
opts, task_file = getopt.getopt(sys.argv[2], '-m')
print "task_file = %s" % task_file
opts, group_file = getopt.getopt(sys.argv[3], '-m')
print "group_file = %s" % group_file
opts, login_in_group_file = getopt.getopt(sys.argv[4], '-m')
print "login_in_group_file = %s" % login_in_group_file
opts, work_hour_file = getopt.getopt(sys.argv[5], '-m')
print "work_hour_file = %s" % work_hour_file
lookup_codes = {}
work_orders = make_data_dict(work_order_file)
#print "WORK ORDERS = %s" % work_orders
tasks = make_data_dict(task_file)
#print "TASKS = %s" % tasks
groups = make_data_dict(group_file)
#print "GROUPS = %s" % groups
login_in_groups = make_data_dict(login_in_group_file)
#print "LOGIN IN GROUPS = %s" % login_in_groups
work_hours = make_data_dict(work_hour_file)
#print "WORK HOURS = %s" % work_hours
work_order_codes = work_orders.keys()
task_codes = tasks.keys()
work_hour_codes = work_hours.keys()
out_lines = []
problem_lines = []
for woc in work_order_codes:
#Expected first
s_status = work_orders[woc]['s_status']
if s_status not in ['retired','r']:
work_group = work_orders[woc]['work_group']
estimated_work_hours = work_orders[woc]['estimated_work_hours']
if work_group not in [None,''] and estimated_work_hours not in [None,'',0,'0']:
estimated_work_hours = float(estimated_work_hours)
group_rate = groups[work_group]['hourly_rate']
if group_rate not in [None,'']:
group_rate = float(group_rate)
new_expected_cost = float(estimated_work_hours * group_rate)
out_lines.append("update work_order set expected_cost = '%s' where code = '%s';" % (new_expected_cost, woc))
else:
problem_lines.append("Work Order %s is incomplete. Work Group = %s, Est_WH = %s" % (woc, work_group, estimated_work_hours))
task_code = work_orders[woc]['task_code']
if task_code not in [None,'']:
summed_actual_cost = 0
if task_code in task_codes:
if tasks[task_code]['s_status'] not in ['retired','r']:
for whc in work_hour_codes:
if work_hours[whc]['task_code'] == task_code:
user = work_hours[whc]['login']
straight_time = work_hours[whc]['straight_time']
if straight_time not in [None,'',0,'0']:
straight_time = float(straight_time)
group_chosen = ''
group_rate = 0
for lg in login_in_groups.keys():
if login_in_groups[lg]['login'] == user:
if group_chosen == '':
group_chosen = login_in_groups[lg]['login_group']
if group_chosen in groups.keys():
group_rate = groups[group_chosen]['hourly_rate']
if group_rate not in [None,'',0,'0.0']:
group_rate = float(group_rate)
else:
group_rate = 0
else:
this_group = login_in_groups[lg]['login_group']
if this_group in groups.keys():
this_rate = groups[this_group]['hourly_rate']
if this_rate not in [None,'',0,'0.0']:
this_rate = float(this_rate)
else:
this_rate = 0
if this_rate > group_rate:
group_rate = this_rate
group_chosen = this_group
if group_rate not in [None,'']:
if group_rate == 0:
problem_lines.append("GROUP RATE WAS 0 for %s, user %s, group %s" % (whc, user, group_chosen))
else:
summed_actual_cost = summed_actual_cost + float(group_rate * straight_time)
if summed_actual_cost not in [None,'']:
out_lines.append("update work_order set actual_cost = '%s' where code = '%s';" % (summed_actual_cost, woc))
out_file = open('work_order_cost_fix','w')
for ol in out_lines:
out_file.write('%s\n' % ol)
out_file.close()
problem_file = open('work_order_cost_problems', 'w')
for pl in problem_lines:
problem_file.write('%s\n' % pl)
problem_file.close()
|
[
"topher.hughes@2gdigital.com"
] |
topher.hughes@2gdigital.com
|
335a917f993a2444982d969d5168c22b7ae98d6d
|
6d5fd2e7b9a66e17593a490a80e96e95d36436a3
|
/src/profiles/signals.py
|
81924a9d88eb4f0810a55822910c9384d971ed0a
|
[] |
no_license
|
Husain-Jinia/Django-Report-Generator
|
f38276b47c263824b2f6794d793ff63872ba31fc
|
277a06584e7d333d9380f213abc217e29ecafd17
|
refs/heads/master
| 2023-04-20T06:05:13.294836
| 2021-05-15T08:01:11
| 2021-05-15T08:01:11
| 365,412,503
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
from .models import Profile
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender=User)
def post_save_create_profile(sender, instance, created, **kwargs):
print(sender)
print(instance)
print(created)
if created:
Profile.objects.create(user=instance)
|
[
"husainjinia0201@gmail.com"
] |
husainjinia0201@gmail.com
|
66a4c5f13e1dc79c7ef110ee7f36ab90411658d1
|
3a6cf9261ca5e58468622f49cfa109d65f7b4eda
|
/src/python/spikes/stereo.py
|
3bfe579ce6e4881dbb41e9f01fcc2026dd9dddab
|
[] |
no_license
|
romilly/cluster-hat
|
a6784f85da5287466a73ef61a0111063bcd171b1
|
a872da5bfa6ab2cb666095ab6845bafa5d4badca
|
refs/heads/master
| 2021-05-09T13:30:34.743067
| 2018-01-30T15:11:35
| 2018-01-30T15:11:35
| 119,036,856
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
imgL = cv2.imread('images/p1-image1.jpg',0)
imgR = cv2.imread('images/p2-image1.jpg',0)
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
|
[
"romilly.cocking@gmail.com"
] |
romilly.cocking@gmail.com
|
a64456fc0046b8a42ad60ddaa19ba450e3c4bfac
|
4a7804ee05485c345b4e3c39a0c96ed4012542ac
|
/editor/emacs/emacs-python/actions.py
|
b0523c81855ca5f5c430977b55c02381e42c60ee
|
[] |
no_license
|
Erick-Pardus/Pardus
|
1fef143c117c62a40e3779c3d09f5fd49b5a6f5c
|
2693e89d53304a216a8822978e13f646dce9b1d3
|
refs/heads/master
| 2020-12-31T02:49:33.189799
| 2013-03-17T06:29:33
| 2013-03-17T06:29:33
| 17,247,989
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "python-mode.el-6.0.11"
def install():
pisitools.insinto("/usr/share/emacs/site-lisp/python", "*.el", "python-mode.el")
|
[
"namso-01@hotmail.it"
] |
namso-01@hotmail.it
|
0653972e0dd62e235f1b6c73af6da5b96e246c6f
|
1a812d520fa0788864cab3c6bbd4e2ba0e8872c2
|
/employeedataandprintthatdata.py
|
d97719e66d1ee36ecddc97ae0f16f35d728b4462
|
[] |
no_license
|
manutdmohit/pythonprogramexamples
|
b6f6906a6169ad2ecd9b16d95495474d570b065e
|
06ac4af8ce13872bbe843175a61d7ad77e0f92b6
|
refs/heads/main
| 2023-01-14T13:14:57.468947
| 2020-11-25T05:39:01
| 2020-11-25T05:39:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
eno=int(input('Enter employee number:'))
ename=input('Enter employee name:')
esal=float(input('Enter employee salary:'))
eaddr=input('Enter employee address:')
married=bool(input('Employee married?[True/False]:'))
print('Please confirm your provided information')
print('Employee Number:',eno)
print('Employee Name:',ename)
print('Employee Salary:',esal)
print('Employee Address:',eaddr)
print('Employee Married?:',married)
|
[
"noreply@github.com"
] |
manutdmohit.noreply@github.com
|
961781e9a4421f843daec46bf7d27a5b190cffc6
|
989b3499948137f57f14be8b2c77d0610d5975e6
|
/python-package/daily_study/python/question_python(resolved)/chapter4_conditional_and_loops(완결)/i_is_member.py
|
fb8ea88f0fd87a269fb0ec00839eb849b2386979
|
[] |
no_license
|
namkiseung/python_BasicProject
|
76b4c070934ad4cb9d16ce844efa05f64fb09ac0
|
460d05248b2d1431624aba960e28bece888643e4
|
refs/heads/master
| 2022-12-13T21:12:06.865241
| 2020-04-23T01:30:08
| 2020-04-23T01:30:08
| 142,980,920
| 1
| 1
| null | 2022-12-08T02:27:40
| 2018-07-31T07:49:17
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
def is_member(member_list, mem):
""" 리스트 member_list 와, 어떤 데이터 mem을 전달받고, mem이 member_list에 포함되어 있는지를 True/False로 반환하는 함수를 작성하자
sample in/out:
is_member([1, 5, 8, 3], 3) -> True
is_member([5, 8, 3], -1) -> False
"""
# 여기 작성
return mem in member_list
if __name__ == "__main__":
print is_member([1, 5, 8, 3], 3)# -> True
print is_member([5, 8, 3], -1) #-> False
pass
|
[
"rlzld100@gmail.com"
] |
rlzld100@gmail.com
|
4f2d7e9a93ccb1c73bfa12146ad9add11e573b27
|
d07a26e443538c5fc6b0711aff6e233daef79611
|
/LearnPythonGuessGame.py
|
e3a41526a4b12716d27871e2464f08f1855a7ba6
|
[] |
no_license
|
Zahidsqldba07/Python-learn
|
bd602d490ee53f8e5331e70f92919ca315944ff9
|
ffc1608695ed6c7c3d2b6789913e34235dcf468e
|
refs/heads/master
| 2023-03-16T02:18:19.155281
| 2020-09-19T09:12:48
| 2020-09-19T09:12:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
secret_word = "respect"
guess = ''
guess_count = 0
guess_limit = 7
out_of_guesses = False
while guess != secret_word and not (out_of_guesses):
if guess_count < guess_limit:
guess = input("What's the secret word?: ")
guess_count += 1
if guess != secret_word:
print("Hint: " + secret_word[int(guess_count)-1])
else:
out_of_guesses = True
if out_of_guesses:
print("All out of guesses, better luck next time!")
exit()
else:
print("Nice work!")
exit()
|
[
"noreply@github.com"
] |
Zahidsqldba07.noreply@github.com
|
1817dddcfb6a350fe4323472755486725543c750
|
d70db722710bccf7a834e8e4acdb376b151b20a1
|
/apps/finances/models.py
|
0f4b847dc96b1d4ee9872b62f624905c17cde98f
|
[] |
no_license
|
intentaware/Vader
|
b0d433f640b244d592126b2713506d214dc1d287
|
54d5d799beab1fc5cef99fb90d4e50e00720bfe0
|
refs/heads/master
| 2021-01-20T07:07:11.393929
| 2017-12-06T19:16:53
| 2017-12-06T19:16:53
| 30,995,526
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,356
|
py
|
import shortuuid
from django.db import models
from django.utils.text import slugify, Truncator
from django.contrib.postgres.fields import JSONField
from django_extensions.db.fields import ShortUUIDField
from apps.common.models import *
from apps.common.utils.money import convert_to_cents
from .mixins import Stripe, CURRENCY_CHOICES
class BasePaymentModel(Stripe, TimeStamped):
"""Basic Payment Model, inherits Stripe model, will be used for multiple
Attributes:
amount (Decimal): total amount charged to customer
attempted_on (Time): time on which the charge was attempted
attempts (Int): Number of times we tried to charge
charged_on (Time): If charge was succesful, populate the field with current time
gateway_response (Json): Response from the server
is_paid (Bool): if charge was succesful
service_charges (Decimal): Service charges if any, amount is inclusive of service_charges
taxes (Decimal): Taxes if any, Note: amount is inclusive of taxes
"""
amount = models.DecimalField(default=0.00, max_digits=20, decimal_places=4)
currency = models.CharField(
max_length=4,
choices=CURRENCY_CHOICES,
default='USD'
)
attempts = models.IntegerField(default=0)
#service charges
service_charges = models.DecimalField(
default=0.00,
max_digits=20,
decimal_places=4
)
taxes = models.DecimalField(default=0.0, max_digits=20, decimal_places=4)
#total_amount = models.DecimalField(default=0.00, max_digits=20, decimal_places=4)
# extra timestamps
attempted_on = models.DateTimeField(blank=True, null=True)
charged_on = models.DateTimeField(blank=True, null=True)
# json mapped response from stripe
gateway_response = JSONField(default={})
is_paid = models.BooleanField(default=False)
class Meta:
abstract = True
@property
def line_items_total(self):
return self.amount - self.service_charges - self.taxes
class Invoice(BasePaymentModel):
stripe_id = models.CharField(
max_length=256,
blank=True,
null=True,
help_text='id obtained from stripe'
)
company = models.ForeignKey('companies.Company', related_name='invoices')
class Module(TimeStamped):
[CORE, DMP, REPORTING] = range(3)
SEGMENT_CHOICES = [
(CORE, 'Core'),
(DMP, 'Data Management Platform'),
(REPORTING, 'Reporting'),
]
name = models.CharField(max_length=128, help_text='The name of the module')
segment = models.IntegerField(
choices=SEGMENT_CHOICES,
default=CORE,
help_text='The segment it is part of'
)
def __unicode__(self):
return self.name
class Plan(TimeStamped, Stripe):
[UNTIL_EXPIRY, DAY, WEEK, MONTH, YEAR] = range(5)
INTERVAL_CHOICES = [
(UNTIL_EXPIRY, 'untill expiry'),
(DAY, 'day'),
(WEEK, 'week'),
(MONTH, 'month'),
(YEAR, 'year'),
]
amount = models.DecimalField(default=0.00, max_digits=20, decimal_places=2)
currency = models.CharField(
max_length=4,
choices=CURRENCY_CHOICES,
default='USD'
)
name = models.CharField(max_length=128)
interval = models.IntegerField(
choices=INTERVAL_CHOICES,
default=UNTIL_EXPIRY
)
modules = models.ManyToManyField(Module, through='finances.PlanModule')
limit_campaigns = models.IntegerField(
default=0,
help_text='0 means unlimited'
)
limit_impressions = models.IntegerField(
default=0,
help_text='0 means unlimited'
)
stripe_id = ShortUUIDField(blank=True, null=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""Override the default save to hook the plans with Stripe.
Args:
*args: arguments, normally plain arguments
**kwargs: Keyword arguments
Returns:
name (obj): Django Plan model object
"""
plan = None
sd = self.stripe_dictionary
if sd and self.stripe_id:
try:
plan = self.stripe_plan
if int(plan.amount) != convert_to_cents(
self.amount
) or self.currency.lower() != plan.currency:
print 'not equal, creating new account'
self.stripe_id = shortuuid.uuid()
self.id = None
self.create_stripe_plan()
except self._stripe.error.InvalidRequestError:
self.create_stripe_plan()
return super(Plan, self).save(*args, **kwargs)
class Meta:
ordering = ['amount']
def create_stripe_plan(self, *args, **kwargs):
return self._stripe.Plan.create(**self.stripe_dictionary)
@property
def stripe_plan(self):
return self._stripe.Plan.retrieve(self.stripe_id)
def features(self):
from itertools import groupby
modules = Module.objects.all().values('id', 'name', 'segment')
plan_modules = self.modules.all().values('id', 'name', 'segment')
for m in modules:
if m in plan_modules:
m['is_included'] = True
else:
m['is_included'] = False
doc = dict()
for k, v in groupby(modules, lambda x: x['segment']):
doc[Module.SEGMENT_CHOICES[k][1]] = list(v)
return doc
@property
def stripe_dictionary(self):
doc = None
if not self.interval == 0:
doc = {
'id': self.stripe_id,
'name': '{name} ({currency})'.format(
name=self.name,
currency=self.currency
),
'amount': convert_to_cents(self.amount),
'currency': self.currency,
'interval': self.INTERVAL_CHOICES[self.interval][1],
'statement_descriptor': Truncator(
'IA: {name}'.format(
name=self.name
)
).chars(22)
}
return doc
class PlanModule(TimeStamped):
plan = models.ForeignKey(Plan)
module = models.ForeignKey(Module)
class Meta:
unique_together = ['plan', 'module']
|
[
"yousuf.jawwad@gmail.com"
] |
yousuf.jawwad@gmail.com
|
19261cb62700033a9cef08d8687bae4821b6f92d
|
21569b68b510b55bdc2acb1ff5ae521b31d44a79
|
/bin/pyrsa-encrypt-bigfile
|
9afaf7317207ef369910d93588778e7aefc825d6
|
[] |
no_license
|
howarder3/Rpi3_study
|
a99faef434ae4f751d4d9f339aca918186f7cb3e
|
533ba60ae4d11b5e3cebc12283e067ccee5a5cfd
|
refs/heads/master
| 2020-03-18T18:11:01.030936
| 2018-05-27T20:46:40
| 2018-05-27T20:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
#!/home/pi/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt_bigfile
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt_bigfile())
|
[
"howarder3@gmail.com"
] |
howarder3@gmail.com
|
|
667907153fb3690183536d53d10538fd0e5ee2f8
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories_2to3/197978/Grasp-and-lift-EEG-challenge-master/genInfos.py
|
3fe287f7ae615d7d863ba13934411a5cad7ad2b9
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,710
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 21:35:28 2015.
@author: fornax
"""
import numpy as np
import pandas as pd
from glob import glob
from mne import concatenate_raws
from preprocessing.aux import creat_mne_raw_object
# #### define lists #####
subjects = list(range(1, 13))
lbls_tot = []
subjects_val_tot = []
series_val_tot = []
ids_tot = []
subjects_test_tot = []
series_test_tot = []
# #### generate predictions #####
for subject in subjects:
print('Loading data for subject %d...' % subject)
# ############### READ DATA ###############################################
fnames = glob('data/train/subj%d_series*_data.csv' % (subject))
fnames.sort()
fnames_val = fnames[-2:]
fnames_test = glob('data/test/subj%d_series*_data.csv' % (subject))
fnames_test.sort()
raw_val = concatenate_raws([creat_mne_raw_object(fname, read_events=True)
for fname in fnames_val])
raw_test = concatenate_raws([creat_mne_raw_object(fname, read_events=False)
for fname in fnames_test])
# extract labels for series 7&8
labels = raw_val._data[32:]
lbls_tot.append(labels.transpose())
# aggregate infos for validation (series 7&8)
raw_series7 = creat_mne_raw_object(fnames_val[0])
raw_series8 = creat_mne_raw_object(fnames_val[1])
series = np.array([7] * raw_series7.n_times +
[8] * raw_series8.n_times)
series_val_tot.append(series)
subjs = np.array([subject]*labels.shape[1])
subjects_val_tot.append(subjs)
# aggregate infos for test (series 9&10)
ids = np.concatenate([np.array(pd.read_csv(fname)['id'])
for fname in fnames_test])
ids_tot.append(ids)
raw_series9 = creat_mne_raw_object(fnames_test[1], read_events=False)
raw_series10 = creat_mne_raw_object(fnames_test[0], read_events=False)
series = np.array([10] * raw_series10.n_times +
[9] * raw_series9.n_times)
series_test_tot.append(series)
subjs = np.array([subject]*raw_test.n_times)
subjects_test_tot.append(subjs)
# save validation infos
subjects_val_tot = np.concatenate(subjects_val_tot)
series_val_tot = np.concatenate(series_val_tot)
lbls_tot = np.concatenate(lbls_tot)
toSave = np.c_[lbls_tot, subjects_val_tot, series_val_tot]
np.save('infos_val.npy', toSave)
# save test infos
subjects_test_tot = np.concatenate(subjects_test_tot)
series_test_tot = np.concatenate(series_test_tot)
ids_tot = np.concatenate(ids_tot)
toSave = np.c_[ids_tot, subjects_test_tot, series_test_tot]
np.save('infos_test.npy', toSave)
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
7d61b22340803854812ce2fb50445f429aebeeb0
|
df44affab179c2546fb3e0d1dc29eebcfdf51c1c
|
/toughradius/common/smsapi.py
|
06534cfab472a0cec1e3f4ef54e6c8980f14269e
|
[] |
no_license
|
sailorhdx/taurusradius
|
121c508e7faffaddcd5326d2b6d3710eaf0ed08e
|
92d30820611a0c9102ae41713ea3c35437a3c6ee
|
refs/heads/master
| 2021-01-22T02:28:31.543338
| 2017-06-17T02:15:33
| 2017-06-17T02:15:33
| 92,362,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
#!/usr/bin/env python
# coding=utf-8
import time
import json
import base64
from urllib import urlencode
from toughradius.toughlib import apiutils
from toughradius.toughlib import logger
from toughradius.toughlib import utils
from toughradius.toughlib.smsutils import smscn
from toughradius.toughlib.smsutils import qcloud
from toughradius.toughlib.smsutils import sendcloud
from toughradius.toughlib.smsutils import toughcloud
from toughradius.toughlib.btforms import rules
from cyclone import httpclient
from twisted.internet import defer
class SmsApi(object):
def __init__(self):
self.gateways = ['toughcloud',
'smscn',
'qcloud',
'sendcloud']
self.smscalls = {}
def get_instance(self, gateway, apikey, apisecret):
if gateway in self.smscalls:
return self.smscalls[gateway]
if gateway == 'smscn':
self.smscalls[gateway] = smscn.SmsApi(apikey, apisecret)
elif gateway == 'qcloud':
self.smscalls[gateway] = qcloud.SmsApi(apikey, apisecret)
elif gateway == 'sendcloud':
self.smscalls[gateway] = sendcloud.SmsApi(apikey, apisecret)
elif gateway == 'toughcloud':
self.smscalls[gateway] = toughcloud.SmsApi(apikey, apisecret)
return self.smscalls.get(gateway)
@defer.inlineCallbacks
def send_sms(self, gateway, apikey, apisecret, sendphone, tplid, args = [], kwargs = {}):
if gateway not in self.gateways:
raise ValueError(u'gateway [%s] not support' % gateway)
if not rules.is_mobile.valid(sendphone):
raise ValueError(u'sendsms: %s mobile format error' % sendphone)
try:
api = self.get_instance(gateway, apikey, apisecret)
resp = yield api.send_sms(sendphone, tplid, args=args, kwargs=kwargs)
defer.returnValue(resp)
except Exception as err:
logger.exception(err)
defer.returnValue(False)
_smsapi = SmsApi()
send_sms = _smsapi.send_sms
|
[
"sailorhdx@hotmail.com"
] |
sailorhdx@hotmail.com
|
b8a0233512848689eab5dea8d359062c641e2a1d
|
6a2bda031f53b057e7aac3aeebd070151f5923f1
|
/zmqpy/zmqpy.py
|
d012bf64ed8ad12196b425161f1ed1cec45fec26
|
[
"BSD-2-Clause"
] |
permissive
|
pfw/zmqpy
|
ab34b9f9f7e662e5d056a5a35078c27f4c9b5d9b
|
185758349176709da43327e1f9b7c7c04d4ca850
|
refs/heads/master
| 2020-12-24T20:14:41.117019
| 2012-12-10T18:43:17
| 2012-12-10T18:43:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,024
|
py
|
# coding: utf-8
from ._cffi import C, ffi, zmq_version, new_uint64_pointer, \
new_int64_pointer, \
new_int_pointer, \
new_binary_data, \
value_uint64_pointer, \
value_int64_pointer, \
value_int_pointer, \
value_binary_data
from .constants import *
from .error import *
from .utils import jsonapi
class Context(object):
_state = {}
def __init__(self, iothreads=1):
if not iothreads > 0:
raise ZMQError(EINVAL)
self.__dict__ = self._state
self.zmq_ctx = C.zmq_init(iothreads)
self.iothreads = iothreads
self._closed = False
self.n_sockets = 0
self.max_sockets = 32
self._sockets = {}
self.sockopts = {LINGER: 1}
self.linger = 1
def term(self):
if self.closed:
return
for k, s in self._sockets.items():
if not s.closed:
s.close()
del self._sockets[k]
C.zmq_term(self.zmq_ctx)
self.zmq_ctx = None
self._closed = True
self.n_sockets = 0
@property
def closed(self):
return self._closed
def _add_socket(self, socket):
self._sockets[self.n_sockets] = socket
self.n_sockets += 1
return self.n_sockets
def _rm_socket(self, n):
del self._sockets[n]
def socket(self, sock_type):
if self._closed:
raise ZMQError(ENOTSUP)
socket = Socket(self, sock_type)
for option, option_value in self.sockopts.items():
socket.setsockopt(option, option_value)
return socket
def set_linger(self, value):
self.sockopts[LINGER] = value
self.linger = value
def new_pointer_from_opt(option, length=0):
if option in uint64_opts:
return new_uint64_pointer()
elif option in int64_opts:
return new_int64_pointer()
elif option in int_opts:
return new_int_pointer()
elif option in binary_opts:
return new_binary_data(length)
else:
raise ValueError('Invalid option')
def value_from_opt_pointer(option, opt_pointer, length=0):
if option in uint64_opts:
return int(opt_pointer[0])
elif option in int64_opts:
return int(opt_pointer[0])
elif option in int_opts:
return int(opt_pointer[0])
elif option in binary_opts:
return ffi.string(opt_pointer)
else:
raise ValueError('Invalid option')
def initialize_opt_pointer(option, value, length=0):
if option in uint64_opts:
return value_uint64_pointer(value)
elif option in int64_opts:
return value_int64_pointer(value)
elif option in int_opts:
return value_int_pointer(value)
elif option in binary_opts:
return value_binary_data(value, length)
else:
raise ValueError('Invalid option')
class Socket(object):
def __init__(self, context, sock_type):
self.context = context
self.sock_type = sock_type
self.zmq_socket = C.zmq_socket(context.zmq_ctx, sock_type)
if not self.zmq_socket:
raise ZMQError()
self._closed = False
self._attrs = {}
self.n = self.context._add_socket(self)
self.last_errno = None
@property
def closed(self):
return self._closed
def close(self):
if not self._closed:
C.zmq_close(self.zmq_socket)
self._closed = True
def bind(self, address):
ret = C.zmq_bind(self.zmq_socket, address)
return ret
def connect(self, address):
ret = C.zmq_connect(self.zmq_socket, address)
return ret
def setsockopt(self, option, value):
length = None
if isinstance(value, str):
length = len(value)
low_level_data = initialize_opt_pointer(option, value, length)
low_level_value_pointer = low_level_data[0]
low_level_sizet = low_level_data[1]
ret = C.zmq_setsockopt(self.zmq_socket,
option,
ffi.cast('void*', low_level_value_pointer),
low_level_sizet)
return ret
def getsockopt(self, option, length=0):
low_level_data = new_pointer_from_opt(option, length=length)
low_level_value_pointer = low_level_data[0]
low_level_sizet_pointer = low_level_data[1]
ret = C.zmq_getsockopt(self.zmq_socket,
option,
low_level_value_pointer,
low_level_sizet_pointer)
if ret < 0:
self.last_errno = C.zmq_errno()
return -1
return value_from_opt_pointer(option, low_level_value_pointer)
def send(self, message, flags=0, copy=False):
zmq_msg = ffi.new('zmq_msg_t*')
c_message = ffi.new('char[]', message)
C.zmq_msg_init_size(zmq_msg, len(message))
C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
if zmq_version == 2:
ret = C.zmq_send(self.zmq_socket, zmq_msg, flags)
else:
ret = C.zmq_sendmsg(self. zmq_socket, zmq_msg, flags)
C.zmq_msg_close(zmq_msg)
if ret < 0:
self.last_errno = C.zmq_errno()
return ret
def recv(self, flags=0):
zmq_msg = ffi.new('zmq_msg_t*')
C.zmq_msg_init(zmq_msg)
if zmq_version == 2:
ret = C.zmq_recv(self.zmq_socket, zmq_msg, flags)
else:
ret = C.zmq_recvmsg(self.zmq_socket, zmq_msg, flags)
if ret < 0:
C.zmq_msg_close(zmq_msg)
raise zmqpy.ZMQError(_errno=C.zmq_errno())
value = ffi.buffer(C.zmq_msg_data(zmq_msg), int(C.zmq_msg_size(zmq_msg)))[:]
C.zmq_msg_close(zmq_msg)
return value
def make_zmq_pollitem(socket, flags):
zmq_socket = socket.zmq_socket
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = zmq_socket
zmq_pollitem.fd = 0
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def _poll(zmq_pollitem_list, poller, timeout=-1):
if zmq_version == 2:
timeout = timeout * 1000
items = ffi.new('zmq_pollitem_t[]', zmq_pollitem_list)
list_length = ffi.cast('int', len(zmq_pollitem_list))
c_timeout = ffi.cast('long', timeout)
C.zmq_poll(items, list_length, c_timeout)
result = []
for index in range(len(items)):
if items[index].revents > 0:
result.append((poller._sockets[items[index].socket],
items[index].revents))
return result
# Code From PyZMQ
class Poller(object):
def __init__(self):
self.sockets = {}
self._sockets = {}
self.c_sockets = {}
def register(self, socket, flags=POLLIN|POLLOUT):
if flags:
self.sockets[socket] = flags
self._sockets[socket.zmq_socket] = socket
self.c_sockets[socket] = make_zmq_pollitem(socket, flags)
elif socket in self.sockets:
# uregister sockets registered with no events
self.unregister(socket)
else:
# ignore new sockets with no events
pass
def modify(self, socket, flags=POLLIN|POLLOUT):
self.register(socket, flags)
def unregister(self, socket):
del self.sockets[socket]
del self._sockets[socket.zmq_socket]
del self.c_sockets[socket]
def poll(self, timeout=None):
if timeout is None:
timeout = -1
timeout = int(timeout)
if timeout < 0:
timeout = -1
items = _poll(self.c_sockets.values(),
self,
timeout=timeout)
return items
|
[
"felipecruz@loogica.net"
] |
felipecruz@loogica.net
|
0e10c4dc821d92d19a19781c29184f2c21a3a2f8
|
a2e0e03e31f892454e537df32e3e1e1d94764fa0
|
/virtual/bin/gunicorn_paster
|
a5acb6c5f33e8c0f85c1fc0f5f42198bd48c6b30
|
[
"MIT"
] |
permissive
|
MichelAtieno/Instagram-Clone
|
557272585a3fff6f7a7c552b08cc5ef5e2c129da
|
7250579e4f91084ad9bf8bd688df3f556dfef64a
|
refs/heads/master
| 2020-03-30T16:23:19.351522
| 2018-10-09T09:42:05
| 2018-10-09T09:42:05
| 151,406,356
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
#!/home/michel/Desktop/Python-Django/Instagram/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"mishqamish@gmail.com"
] |
mishqamish@gmail.com
|
|
3a4c7f9d712049cf02648c56d53ff66b940cd9fb
|
05d692469305dd1adb9ebc46080525bb4515b424
|
/Exception handling/tryfinally5.py
|
7a605945006ab61d29eedb42aaf62afea001654d
|
[] |
no_license
|
rajdharmkar/pythoncode
|
979805bc0e672f123ca1460644a4bd71d7854fd5
|
15b758d373f27da5680a711bf12c07e86758c447
|
refs/heads/master
| 2020-08-07T18:30:55.575632
| 2019-10-14T12:46:09
| 2019-10-14T12:46:09
| 213,551,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
try:
fob = open ( 'test.txt', 'w' )
fob.write ( "It's my test file to verify try-finally in exception handling!!"
)
print 'try block executed'
finally:
fob.close ()
print 'finally block executed'
|
[
"rajdharmkar@gmail.com"
] |
rajdharmkar@gmail.com
|
6893b1b04629476fddf2845af7cfe5908b9cb720
|
72e11a80587342b3f278d4df18406cd4ce7531e8
|
/hgdemandimport/demandimportpy3.py
|
e2ea27fa0f1166fc55324efb1bbdaf6c4a5029c6
|
[] |
no_license
|
EnjoyLifeFund/Debian_py36_packages
|
740666f290cef73a4f634558ccf3fd4926addeda
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
refs/heads/master
| 2021-08-24T02:17:24.349195
| 2017-12-06T06:18:35
| 2017-12-06T06:18:35
| 113,167,612
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,498
|
py
|
# demandimportpy3 - global demand-loading of modules for Mercurial
#
# Copyright 2017 Facebook Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Lazy loading for Python 3.6 and above.
This uses the new importlib finder/loader functionality available in Python 3.5
and up. The code reuses most of the mechanics implemented inside importlib.util,
but with a few additions:
* Allow excluding certain modules from lazy imports.
* Expose an interface that's substantially the same as demandimport for
Python 2.
This also has some limitations compared to the Python 2 implementation:
* Much of the logic is per-package, not per-module, so any packages loaded
before demandimport is enabled will not be lazily imported in the future. In
practice, we only expect builtins to be loaded before demandimport is
enabled.
"""
# This line is unnecessary, but it satisfies test-check-py3-compat.t.
from __future__ import absolute_import
import contextlib
import importlib.abc
import importlib.machinery
import importlib.util
import sys
_deactivated = False
class _lazyloaderex(importlib.util.LazyLoader):
"""This is a LazyLoader except it also follows the _deactivated global and
the ignore list.
"""
def exec_module(self, module):
"""Make the module load lazily."""
if _deactivated or module.__name__ in ignore:
self.loader.exec_module(module)
else:
super().exec_module(module)
# This is 3.6+ because with Python 3.5 it isn't possible to lazily load
# extensions. See the discussion in https://python.org/sf/26186 for more.
_extensions_loader = _lazyloaderex.factory(
importlib.machinery.ExtensionFileLoader)
_bytecode_loader = _lazyloaderex.factory(
importlib.machinery.SourcelessFileLoader)
_source_loader = _lazyloaderex.factory(importlib.machinery.SourceFileLoader)
def _makefinder(path):
return importlib.machinery.FileFinder(
path,
# This is the order in which loaders are passed in in core Python.
(_extensions_loader, importlib.machinery.EXTENSION_SUFFIXES),
(_source_loader, importlib.machinery.SOURCE_SUFFIXES),
(_bytecode_loader, importlib.machinery.BYTECODE_SUFFIXES),
)
ignore = []
def init(ignorelist):
global ignore
ignore = ignorelist
def isenabled():
return _makefinder in sys.path_hooks and not _deactivated
def disable():
try:
while True:
sys.path_hooks.remove(_makefinder)
except ValueError:
pass
def enable():
sys.path_hooks.insert(0, _makefinder)
@contextlib.contextmanager
def deactivated():
# This implementation is a bit different from Python 2's. Python 3
# maintains a per-package finder cache in sys.path_importer_cache (see
# PEP 302). This means that we can't just call disable + enable.
# If we do that, in situations like:
#
# demandimport.enable()
# ...
# from foo.bar import mod1
# with demandimport.deactivated():
# from foo.bar import mod2
#
# mod2 will be imported lazily. (The converse also holds -- whatever finder
# first gets cached will be used.)
#
# Instead, have a global flag the LazyLoader can use.
global _deactivated
demandenabled = isenabled()
if demandenabled:
_deactivated = True
try:
yield
finally:
if demandenabled:
_deactivated = False
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
68b377124eb26ae187dc04f00fc3c6cc81fed129
|
a21d2fb3f111f30b842a4c3a5c6940d1a003b94d
|
/Python3/Foundation/Day 8/进程 join.py
|
883a86cf1f1208853b0f0f2f71b0dd2e70e1d4ae
|
[] |
no_license
|
hygnic/MyPython
|
438f16206770a006a3b7bcf2ada9150c71ce8af9
|
26aaa57728ad545af5920ff2015eae258712d077
|
refs/heads/master
| 2021-07-01T02:10:15.810495
| 2021-02-26T08:07:54
| 2021-02-26T08:07:54
| 220,612,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
# User: hygnic
# Date: 2018/9/8
# User: hygnic
# Date: 2018/9/8
import os
import time
from multiprocessing import Process
# help(os)
def func1(args):
print(args)
time.sleep(2)
print('son process: ', os.getpid())
def func2(filename, content):
with open(filename, 'w') as content_wp:
content_wp.write(content)
if __name__ == '__main__':
# 注册进程
j_list = []
for i in range(10): # 开启多个子进程
f1 = Process(target=func1, args=('*' * i,)) # 单个参数时有一个逗号,元组
# p2 = Process(target=func, args=('实参', '实参2')) 通过这种方式开启多个子进程
f1.start() # 开启一个子进程 内部会调用run()方法
j_list.append(f1) # 表中全是一个个进程
f2 = Process(target=func2, args=('info', 'func2 content'))
f2.start()
# print(j_list)
# 阻塞当前进程,直到调用join方法的那个进程执行完,再继续执行当前进程。将异步改为同步
[f1.join() for f1 in j_list] # 列表表达式
print('Done! father process: ', os.getpid())
|
[
"hygnic@outlook.com"
] |
hygnic@outlook.com
|
d070ea5b57e7c9f251743e491b019532adcef562
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_indispositions.py
|
fabc644b17f9f752e045cebb4233bf3276caa5da
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#calss header
class _INDISPOSITIONS():
def __init__(self,):
self.name = "INDISPOSITIONS"
self.definitions = indisposition
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['indisposition']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
11b3528b2e69e8e20b3ffec5e3cabb26665f60f8
|
7653ddbbc2256fae9cc62251f0241d0e9696df7d
|
/pyshtools/spectralanalysis/cross_spectrum.py
|
5b48e4b63cf25c38d0ad3ff3a882735c27d890b2
|
[
"BSD-3-Clause"
] |
permissive
|
SHTOOLS/SHTOOLS
|
c3415b38da290805ecdfd59699587e5ac5233cc8
|
93e77dcc6b36b2363f07d79d07ec47d86e6cba65
|
refs/heads/master
| 2023-08-31T01:35:49.211882
| 2023-08-28T10:50:08
| 2023-08-28T10:50:08
| 24,725,612
| 315
| 117
|
BSD-3-Clause
| 2023-08-28T10:50:10
| 2014-10-02T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,773
|
py
|
import numpy as _np
from scipy.special import factorial as _factorial
def cross_spectrum(clm1, clm2, normalization='4pi', degrees=None, lmax=None,
convention='power', unit='per_l', base=10.):
"""
Return the cross-spectrum of the spherical harmonic coefficients as a
function of spherical harmonic degree.
Usage
-----
array = cross_spectrum(clm1, clm2, [normalization, degrees, lmax,
convention, unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm1 : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the first set of spherical harmonic coefficients.
clm2 : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the second set of spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized coefficients,
respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2-norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Notes
-----
This function returns either the cross-power spectrum, cross-energy
spectrum, or l2-cross-norm spectrum. Total cross-power is defined as the
integral of the clm1 times the conjugate of clm2 over all space, divided
by the area the functions span. If the mean of the functions is zero,
this is equivalent to the covariance of the two functions. The total
cross-energy is the integral of clm1 times the conjugate of clm2 over all
space and is 4pi times the total power. The l2-cross-norm is the
sum of clm1 times the conjugate of clm2 over all angular orders as a
function of spherical harmonic degree.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to
the total spectrum from all angular orders over an infinitessimal
logarithmic degree band. The contrubution in the band dlog_a(l) is
spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where
spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The normalization must be '4pi', 'ortho', " +
"'schmidt', or 'unnorm'. Input value was {:s}."
.format(repr(normalization)))
if convention.lower() not in ('power', 'energy', 'l2norm'):
raise ValueError("convention must be 'power', 'energy', or " +
"'l2norm'. Input value was {:s}"
.format(repr(convention)))
if unit.lower() not in ('per_l', 'per_lm', 'per_dlogl'):
raise ValueError("unit must be 'per_l', 'per_lm', or 'per_dlogl'." +
"Input value was {:s}".format(repr(unit)))
if _np.iscomplexobj(clm1) is not _np.iscomplexobj(clm2):
raise ValueError('clm1 and clm2 must both be either real or ' +
'complex. \nclm1 is complex : {:s}\n'
.format(repr(_np.iscomplexobj(clm1))) +
'clm2 is complex : {:s}'
.format(repr(_np.iscomplexobj(clm2))))
if lmax is None:
lmax = len(clm1[0, :, 0]) - 1
if degrees is None:
degrees = _np.arange(lmax+1)
if _np.iscomplexobj(clm1):
array = _np.empty(len(degrees), dtype=_np.complex128)
else:
array = _np.empty(len(degrees))
if normalization.lower() == 'unnorm':
if convention.lower() == 'l2norm':
raise ValueError("convention can not be set to 'l2norm' when " +
"using unnormalized harmonics.")
for i, l in enumerate(degrees):
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if _np.iscomplexobj(clm1):
array[i] = (conv[0:l + 1] * clm1[0, l, 0:l + 1] *
clm2[0, l, 0:l + 1].conjugate()).real.sum() + \
(conv[1:l + 1] * clm1[1, l, 1:l + 1] *
clm2[1, l, 1:l + 1].conjugate()).real.sum()
else:
conv[1:l + 1] = conv[1:l + 1] / 2.
array[i] = (conv[0:l + 1] * clm1[0, l, 0:l+1]**2).sum() + \
(conv[1:l + 1] * clm2[1, l, 1:l+1]**2).sum()
else:
for i, l in enumerate(degrees):
if _np.iscomplexobj(clm1):
array[i] = (clm1[0, l, 0:l + 1] *
clm2[0, l, 0:l + 1].conjugate()).sum() + \
(clm1[1, l, 1:l + 1] *
clm2[1, l, 1:l + 1].conjugate()).sum()
else:
array[i] = (clm1[0, l, 0:l + 1] * clm2[0, l, 0:l + 1]).sum() \
+ (clm1[1, l, 1:l + 1] * clm2[1, l, 1:l + 1]).sum()
if convention.lower() == 'l2norm':
return array
else:
if normalization.lower() == '4pi':
pass
elif normalization.lower() == 'schmidt':
array /= (2. * degrees + 1.)
elif normalization.lower() == 'ortho':
array /= (4. * _np.pi)
if convention.lower() == 'energy':
array *= 4. * _np.pi
if unit.lower() == 'per_l':
pass
elif unit.lower() == 'per_lm':
array /= (2. * degrees + 1.)
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base)
return array
|
[
"mark.a.wieczorek@gmail.com"
] |
mark.a.wieczorek@gmail.com
|
b9c464b3bef52750e1eb2aeee448f5b6b3831cf5
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/acl/acl.py
|
3d4dcafee709a227b8b3d2212c124493b9b78f7a
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,907
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ACL(Mo):
meta = ClassMeta("cobra.model.acl.ACL")
meta.isAbstract = True
meta.moClassName = "aclACL"
meta.moClassName = "aclACL"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Access control list"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Instr")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.concreteSubClasses.add("cobra.model.ipv6acl.ACL")
meta.concreteSubClasses.add("cobra.model.ipv4acl.ACL")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 28441, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
97538fec829806a6dc0663c869c8d080db247647
|
f62aa26461e381435c86019ca2f502d10ff75b88
|
/catalog/migrations/0006_auto_20170121_1008.py
|
aa8cfe9522994670ffaf090ae699983f1dd31edd
|
[] |
no_license
|
viktortat/CAP
|
edb2aef09169d9bcf04b541682d8dcb067edf1c6
|
60221d8fa1d1ccb209e40001554cb004480dd2d5
|
refs/heads/master
| 2021-06-12T13:03:33.917809
| 2017-04-30T19:58:38
| 2017-04-30T19:58:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-21 07:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_auto_20170113_0259'),
]
operations = [
migrations.AddField(
model_name='catalogsite',
name='pub_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата'),
),
migrations.AlterField(
model_name='catalogsite',
name='price',
field=models.FloatField(default=0),
),
]
|
[
"l2maximum@mail.ru"
] |
l2maximum@mail.ru
|
d6f5168e7ed6ddd0d588ee89ae179faafdae37c6
|
d78dfc5089717fc242bbd7097f507d811abb4260
|
/USA/script.icechannel.Thevideome.settings/default.py
|
b7d904cce475928faec807af89e23a0002229f7a
|
[] |
no_license
|
tustxk/AddOnRepo
|
995b980a9ec737e2c25bed423fc83f710c697e40
|
6b86a06cb37e6e10b4119584dd7311ebc2318e54
|
refs/heads/master
| 2022-10-08T21:34:34.632346
| 2016-10-28T09:48:01
| 2016-10-28T09:48:01
| 70,684,775
| 1
| 1
| null | 2022-10-01T16:27:13
| 2016-10-12T09:31:16
|
Python
|
UTF-8
|
Python
| false
| false
| 169
|
py
|
addon_id="script.icechannel.Thevideome.settings"
addon_name="iStream - Thevideome - Settings"
import xbmcaddon
addon = xbmcaddon.Addon(id=addon_id)
addon.openSettings()
|
[
"ke.xiao@netxeon.com"
] |
ke.xiao@netxeon.com
|
57adbfd2865b7cf8540897ff6ca3685bbaf4dfb0
|
164457b943d0b426e9a5e2eb57779e4e37f2d1bb
|
/the_tale/accounts/workers/accounts_manager.py
|
84d4482f67e8a0b8ffab01b81c7cb415dffd6c34
|
[
"BSD-2-Clause-Views"
] |
permissive
|
lshestov/the-tale
|
64334fd99a442ad736d9e8a38e8f0fb52d0ebab6
|
6229edfec6420307975269be9926c68ecdefb930
|
refs/heads/master
| 2021-01-18T08:38:44.147294
| 2015-10-27T18:43:10
| 2015-10-27T18:43:10
| 50,228,827
| 0
| 0
| null | 2016-01-23T07:38:54
| 2016-01-23T07:38:54
| null |
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
# coding: utf-8
import time
import datetime
from dext.settings import settings
from the_tale.common.utils.workers import BaseWorker
from the_tale.common import postponed_tasks
from the_tale.accounts.prototypes import AccountPrototype, RandomPremiumRequestPrototype
from the_tale.accounts.conf import accounts_settings
class Worker(BaseWorker):
GET_CMD_TIMEOUT = 60
def clean_queues(self):
super(Worker, self).clean_queues()
self.stop_queue.queue.purge()
def initialize(self):
self.initialized = True
postponed_tasks.PostponedTaskPrototype.reset_all()
self.logger.info('ACCOUNT_MANAGER INITIALIZED')
def process_no_cmd(self):
# is send premium expired notifications needed
if (time.time() - float(settings.get(accounts_settings.SETTINGS_PREV_PREIMIUM_EXPIRED_NOTIFICATION_RUN_TIME_KEY, 0)) > 23.5*60*60 and
accounts_settings.PREMIUM_EXPIRED_NOTIFICATION_RUN_TIME <= datetime.datetime.now().hour <= accounts_settings.PREMIUM_EXPIRED_NOTIFICATION_RUN_TIME+1):
settings[accounts_settings.SETTINGS_PREV_PREIMIUM_EXPIRED_NOTIFICATION_RUN_TIME_KEY] = str(time.time())
self.run_send_premium_expired_notifications()
return
self.run_random_premium_requests_processing()
def run_send_premium_expired_notifications(self):
AccountPrototype.send_premium_expired_notifications()
def run_random_premium_requests_processing(self):
while True:
request = RandomPremiumRequestPrototype.get_unprocessed()
if request is None:
return
self.logger.info('process random premium request %d' % request.id)
if not request.process():
self.logger.info('request %d not processed' % request.id)
return
else:
self.logger.info('request %d processed' % request.id)
def cmd_task(self, task_id):
return self.send_cmd('task', {'task_id': task_id})
def process_task(self, task_id):
task = postponed_tasks.PostponedTaskPrototype.get_by_id(task_id)
task.process(self.logger)
task.do_postsave_actions()
def cmd_run_account_method(self, account_id, method_name, data):
return self.send_cmd('run_account_method', {'account_id': account_id,
'method_name': method_name,
'data': data})
def process_run_account_method(self, account_id, method_name, data):
if account_id is not None:
account = AccountPrototype.get_by_id(account_id)
getattr(account, method_name)(**data)
account.save()
else:
# here we can process classmethods, if they appear in future
pass
def cmd_stop(self):
return self.send_cmd('stop')
def process_stop(self):
self.initialized = False
self.stop_required = True
self.stop_queue.put({'code': 'stopped', 'worker': 'accounts_manager'}, serializer='json', compression=None)
self.logger.info('ACCOUNTS MANAGER STOPPED')
|
[
"a.eletsky@gmail.com"
] |
a.eletsky@gmail.com
|
7cf3515e7f6034a2c7c8f4d75546e29fa79cc092
|
1e58c8aaff5bb1273caaa73c49c07fd61ebd4439
|
/wavencoder/__init__.py
|
ff0dd47ac2d71605c97213e27e6d38be784f8314
|
[
"MIT"
] |
permissive
|
samsudinng/wavencoder
|
9870d6dd86cb126b170c9a6af93acee4acbbd633
|
a64e16444ed25b5491fd2ba0c9f1409671e12e5e
|
refs/heads/master
| 2023-03-01T22:42:42.477643
| 2021-02-08T11:23:00
| 2021-02-08T11:23:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
__version__ = '0.0.6'
from wavencoder.models.wav2vec import Wav2Vec
from wavencoder.models.wav2vec2 import Wav2Vec2Model
from wavencoder.models.sincnet import SincNet, SincConvLayer
from wavencoder.models.lstm_classifier import LSTM_Classifier
from wavencoder.models.lstm_classifier import LSTM_Attn_Classifier
from wavencoder.models.baseline import CNN1d
from wavencoder.models.attention import DotAttention, SoftAttention
from wavencoder.models.rawnet import RawNet2Model
from wavencoder.trainer.classification_trainer import train
from wavencoder.trainer.classification_trainer import test_predict_classifier
from wavencoder.trainer.classification_trainer import test_evaluate_classifier
from wavencoder.transforms.noise import AdditiveNoise
from wavencoder.transforms.speed import SpeedChange
from wavencoder.transforms.clip import Clipping
from wavencoder.transforms.pad_crop import Pad, Crop, PadCrop
from wavencoder.transforms.reverberation import Reverberation
from wavencoder.transforms.compose import Compose
|
[
"shangethrajaa@gmail.com"
] |
shangethrajaa@gmail.com
|
70456061b62a6c44867abca2486de5c1e3cbbd30
|
2316ce8a21d44a5d09284968ef42530633dc10d2
|
/sample_code/ep264/rev04/t.py
|
bb11896bd80b2ae0441665c1f237b272afbbc397
|
[] |
no_license
|
AlexanderWinkelmeier/explains
|
160de2c41fc5fc0156b482b41f89644dc585c4f3
|
d47ec53e384e4303a2d8e71fab9073a1a8d2d6bc
|
refs/heads/master
| 2023-07-30T04:55:31.234482
| 2021-09-15T02:59:42
| 2021-09-15T02:59:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
import os.path
import tempfile
SOME_FILE = 'foo.txt'
fd, temp_path = tempfile.mkstemp(dir=os.path.dirname(SOME_FILE))
try:
with open(fd, 'w') as f:
f.write('these are the new contents\n')
os.replace(temp_path, SOME_FILE)
except BaseException:
os.remove(temp_path)
raise
|
[
"int3l@users.noreply.github.com"
] |
int3l@users.noreply.github.com
|
ac47410c081854dcc9bc0251f7925ae5e152c61f
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fv/poddhcpserverinfo.py
|
578744b8cd584e1c2bc24ce6e7cb39c73bd5bc04
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,213
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class PodDhcpServerInfo(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.fv.PodDhcpServerInfo")
meta.moClassName = "fvPodDhcpServerInfo"
meta.rnFormat = "podDhcpServerInfo-%(nodeId)s"
meta.category = MoCategory.REGULAR
meta.label = "Dhcp Server info of the current POD"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.PodConnPDef")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.rnPrefixes = [
('podDhcpServerInfo-', True),
]
prop = PropMeta("str", "PodDhcpServerDn", "PodDhcpServerDn", 47391, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("PodDhcpServerDn", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dhcpIssues", "dhcpIssues", 47392, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("incorrect-pod-dhcp-server-configuration", "nodeid-of-fabricpoddhcpserver-configured-is-not-a-vtor", 1)
prop._addConstant("none", "none", 0)
meta.props.add("dhcpIssues", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "nodeId", "nodeId", 44472, PropCategory.REGULAR)
prop.label = "node id of Dhcp server"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16000)]
prop.defaultValue = 1
prop.defaultValueStr = "1"
meta.props.add("nodeId", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "serverType", "serverType", 44473, PropCategory.REGULAR)
prop.label = "Dhcp server Type Primary/Secondary"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("primary", "primary", 1)
prop._addConstant("secondary", "secondary", 2)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("serverType", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "nodeId"))
def __init__(self, parentMoOrDn, nodeId, markDirty=True, **creationProps):
namingVals = [nodeId]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
0457cef64ea7b68406f6e46a7669f6fc1dce58d8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2518/49823/278350.py
|
2cb366ad69dc313778213eda8c71db7c66cfe53f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
def al(a,b):
l=[]
a=sorted(a)
b=sorted(b)
p,r=0,0
for i in range(len(a)):
while(b[p]<a[i] and p<len(b)-1):
p+=1
if(p==0):
d=abs(b[p]-a[i])
else:
d=min(abs(a[i]-b[p-1]),abs(b[p]-a[i]))
r=max(r,d)
print(r)
if __name__ == '__main__':
al([int(i) for i in input().split(',')],[int(i) for i in input().split(',')])
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
87d5ac6dbfe5558297a98172e06f7a77e461a57f
|
cb56e1554f43ef93b470019e5a36ddc26680d837
|
/DjangoAyushh/first_site/first_site/wsgi.py
|
2ae3b1b5a0a4a9042a3af49b19677cd72ff23d53
|
[] |
no_license
|
Ayush900/initiating-django
|
6790ed4fde82a18af661922a7e3f7165a6d10b98
|
ea7a2c3f3467dc92f229468fb3de274e1143a3c8
|
refs/heads/master
| 2020-07-05T20:52:05.169025
| 2019-08-16T18:14:49
| 2019-08-16T18:14:49
| 202,770,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for first_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_site.settings')
application = get_wsgi_application()
|
[
"ayush.mehrotra900@gmail.com"
] |
ayush.mehrotra900@gmail.com
|
3b368f4bde0884863f26ff8302e96f97e0254648
|
5897a1b176f9c404fe421c61342f20820f685c07
|
/RIS/OSL/zbrush/normalMap.py
|
0d231757d9fd9b6a6e7b2b9114f427cc59a996c7
|
[] |
no_license
|
NCCA/Renderman
|
d3269e0c7b5e58a69d8744051100013339516ad0
|
ebf17298e0ee56899a3288a3ff8eb4c4a0f806e8
|
refs/heads/master
| 2023-06-09T07:51:29.251270
| 2023-05-25T11:13:37
| 2023-05-25T11:13:37
| 51,373,926
| 17
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,215
|
py
|
#!/usr/bin/python
import prman
# import the python functions
import sys
sys.path.append("../../common")
from functions import drawTeapot
import Obj
from Camera import Camera
from Vec4 import Vec4
from Transformation import *
import random
ri = prman.Ri() # create an instance of the RenderMan interface
filename = "__render"
# this is the begining of the rib archive generation we can only
# make RI calls after this function else we get a core dump
ri.Begin("__render")
# now we add the display element using the usual elements
# FILENAME DISPLAY Type Output format
ri.Display("zbrush.exr", "it", "rgba")
ri.Format(1024, 720, 1)
# setup the raytrace / integrators
ri.Hider("raytrace", {"int incremental": [1]})
ri.PixelVariance(0.01)
ri.ShadingRate(0.1)
ri.Integrator("PxrPathTracer", "integrator")
# now set the projection to perspective
ri.Projection(ri.PERSPECTIVE, {ri.FOV: 30})
# Simple translate for our camera
cam = Camera(Vec4(0, 0.9, -3.9), Vec4(0, 0.2, 0), Vec4(0, 1, 0))
cam.place(ri)
# now we start our world
ri.WorldBegin()
# Lighting We need geo to emit light
ri.TransformBegin()
ri.AttributeBegin()
ri.Declare("domeLight", "string")
lightTx = Transformation()
lightTx.setPosition(0, 1, 0)
lightTx.setRotation(90, 0, 0)
lightTx.setScale(1, 1, 1)
ri.ConcatTransform(lightTx.getMatrix())
ri.Light("PxrDomeLight", "domeLight", {"float exposure": [1.0], "string lightColorMap": ["../../disney/studio2.tx"]})
ri.AttributeEnd()
ri.TransformEnd()
# load mesh
troll = Obj.Obj("../../meshes/troll.obj")
tx = Transformation()
ri.Pattern("PxrTexture", "TrollColour", {"string filename": "../../meshes/TrollColour.tx"})
ri.Pattern("PxrTexture", "TrollSpecular", {"string filename": "../../meshes/TrollSpec.tx"})
ri.Pattern("PxrTexture", "TrollNMap", {"string filename": "../../meshes/TrollNormal.tx"})
ri.Pattern("PxrNormalMap", "TrollBump", {"string filename": "../../meshes/TrollNormal.tx", "float bumpScale": [2]})
ri.Bxdf(
"PxrDisney",
"bxdf",
{
"reference color baseColor": ["TrollColour:resultRGB"],
"reference color subsurfaceColor": ["TrollSpecular:resultRGB"],
"float subsurface": [0.4],
"reference normal bumpNormal": ["TrollBump:resultN"],
"float metallic": [0.1],
"float specular": [0.1],
"float roughness": [0.3],
},
)
ypos = 0.55
ri.TransformBegin()
tx.setPosition(-1, ypos, 0)
tx.setRotation(0, -45, 0)
ri.ConcatTransform(tx.getMatrix())
troll.Polygon(ri)
ri.TransformEnd()
ri.TransformBegin()
tx.setPosition(0, ypos, 0)
tx.setRotation(0, 45, 0)
ri.ConcatTransform(tx.getMatrix())
troll.Polygon(ri)
ri.TransformEnd()
ri.TransformBegin()
tx.setPosition(1, ypos, 0)
tx.setRotation(0, 200, 0)
ri.ConcatTransform(tx.getMatrix())
troll.Polygon(ri)
ri.TransformEnd()
# floor
ri.TransformBegin()
ri.Bxdf(
"PxrDisney",
"bxdf",
{
"color baseColor": [1, 1, 1],
"float roughness": [0.2],
},
)
# ri.Bxdf( "PxrDiffuse","bxdf", { "reference color diffuseColor" : ["colourChecker:resultRGB"] })
s = 12.0
face = [-s, 0, -s, s, 0, -s, -s, 0, s, s, 0, s]
ri.Patch("bilinear", {"P": face})
ri.TransformEnd()
# end our world
ri.WorldEnd()
# and finally end the rib file
ri.End()
|
[
"jmacey@bournemouth.ac.uk"
] |
jmacey@bournemouth.ac.uk
|
53e96ad958d483b7b85fb9c3119b9e98031ef73c
|
922b6d67ca8dcc1573bddd0aa7193107f42b6207
|
/dms/web/base.py
|
4d5a1c7e76049df5a818300081908190a9e6437b
|
[
"MIT"
] |
permissive
|
zhmsg/dms
|
40c91ea3945fd8dfcd0b056f4bcf324774c4e88c
|
a1ae1430893d9dde8f45bba0e50818f0224fcd8a
|
refs/heads/master
| 2023-09-03T23:00:44.615748
| 2023-08-25T00:13:59
| 2023-08-25T00:13:59
| 42,572,830
| 0
| 2
|
MIT
| 2022-07-06T19:54:32
| 2015-09-16T07:50:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
# !/usr/bin/env python
# coding: utf-8
from flask import Blueprint, g, Response, jsonify, redirect
from flask_login import login_required
import functools
from flask_helper.view import View as OView
from dms.utils.log import getLogger
from dms.utils.manager import Explorer
__author__ = 'zhouhenglc'
class RegisterData(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = object.__new__(cls, *args)
return cls._instance
def __init__(self):
self._dict = {}
def get(self, key, default=None):
return self._dict.get(key, default)
def set(self, key, value):
self._dict[key] = value
def set_default(self, key, default):
if key not in self._dict:
self._dict[key] = default
def append(self, key, value):
_values = self.get(key)
if not _values:
_values = []
_values.append(value)
self.set(key, _values)
def update(self, key, **kwargs):
_values = self.get(key)
if not _values:
_values = {}
_values.update(**kwargs)
self.set(key, _values)
REGISTER_DATA = RegisterData()
explorer = Explorer.get_instance()
class View(OView):
def __init__(self, name, import_name, *args, **kwargs):
self.auth_required = kwargs.pop('auth_required', True)
self.required_resource = kwargs.pop('required_resource', [])
super().__init__(name, import_name, *args, **kwargs)
if self.auth_required:
@self.before_request
@login_required
def before_request():
for rr in self.required_resource:
if rr in explorer.missing_config:
redirect_url = "/config?keys=%s" % \
",".join(explorer.missing_config[rr])
return redirect(redirect_url)
def get_global_endpoint(self, endpoint=None, view_func=None):
if endpoint:
sub_endpoint = endpoint
elif view_func:
sub_endpoint = view_func.func_name
else:
return None
g_endpoint = "%s.%s" % (self.name, sub_endpoint)
return g_endpoint
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
if view_func:
@functools.wraps(view_func)
def inner(*args, **kwargs):
r = view_func(*args, **kwargs)
if isinstance(r, Response):
return r
elif isinstance(r, bool):
return 'True' if r else 'False'
elif isinstance(r, dict):
return jsonify(r)
elif isinstance(r, list):
rs = []
for item in r:
if hasattr(item, 'to_dict'):
rs.append(item.to_dict())
else:
rs.append(item)
return jsonify(rs)
elif hasattr(r, 'to_json'):
return r.to_json()
elif hasattr(r, 'to_dict'):
return jsonify(r.to_dict())
return r
OView.add_url_rule(self, rule, endpoint, inner, **options)
else:
OView.add_url_rule(self, rule, endpoint, view_func, **options)
|
[
"zhouhenglc@inspur.com"
] |
zhouhenglc@inspur.com
|
6dd5e90c13cbc8921188a2a55e954bfeb8c45d71
|
21b5ad37b812ed78799d4efc1649579cc83d32fb
|
/pro/migrations/0007_auto_20200222_1157.py
|
4b4329dc3c21c4faddc276aeb4688a4472386e24
|
[] |
no_license
|
SaifulAbir/django-js-api
|
b6f18c319f8109884e71095ad49e08e50485bb25
|
fbf174b9cde2e7d25b4898f511df9c6f96d406cf
|
refs/heads/master
| 2023-02-12T16:09:21.508702
| 2021-01-14T09:05:15
| 2021-01-14T09:05:15
| 329,713,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Generated by Django 3.0.3 on 2020-02-22 11:57
from django.db import migrations, models
import p7.validators
class Migration(migrations.Migration):
dependencies = [
('pro', '0006_merge_20200222_1117'),
]
operations = [
migrations.RenameField(
model_name='professional',
old_name='user_id',
new_name='user',
),
migrations.AlterField(
model_name='professional',
name='password',
field=models.CharField(max_length=255, validators=[p7.validators.check_valid_password, p7.validators.MinLengthValidator(8)]),
),
]
|
[
"rashed@ishraak.com"
] |
rashed@ishraak.com
|
0663ca2468470dd94deb42af8ca3dab1a2cc3333
|
8e97cb7c8668a9061683ea3ba893dab32029fac9
|
/pytorch_toolkit/person_reidentification/data/datamanager.py
|
75b80c905990d162e028c8e00d6e2abce522f5de
|
[
"Apache-2.0"
] |
permissive
|
DmitriySidnev/openvino_training_extensions
|
e01703bea292f11ffc20d50a1a06f0565059d5c7
|
c553a56088f0055baba838b68c9299e19683227e
|
refs/heads/develop
| 2021-06-14T06:32:12.373813
| 2020-05-13T13:25:15
| 2020-05-13T13:25:15
| 180,546,423
| 0
| 1
|
Apache-2.0
| 2019-04-15T13:39:48
| 2019-04-10T09:17:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,895
|
py
|
"""
MIT License
Copyright (c) 2018 Kaiyang Zhou
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torchreid.data.datamanager import DataManager
from torchreid.data.datasets import __image_datasets
from .datasets.globalme import GlobalMe
from .transforms import build_transforms
from .sampler import build_train_sampler
__image_datasets['globalme'] = GlobalMe
def init_image_dataset(name, **kwargs):
"""Initializes an image dataset."""
avai_datasets = list(__image_datasets.keys())
if name not in avai_datasets:
raise ValueError('Invalid dataset name. Received "{}", '
'but expected to be one of {}'.format(name, avai_datasets))
return __image_datasets[name](**kwargs)
class ImageDataManagerWithTransforms(DataManager):
data_type = 'image'
def __init__(self, root='', sources=None, targets=None, height=256, width=128, transforms='random_flip',
norm_mean=None, norm_std=None, use_gpu=True, split_id=0, combineall=False,
batch_size_train=32, batch_size_test=32, workers=4, num_instances=4, train_sampler='',
cuhk03_labeled=False, cuhk03_classic_split=False, market1501_500k=False, apply_masks_to_test=False):
super(ImageDataManagerWithTransforms, self).__init__(
sources=sources, targets=targets, height=height, width=width,
transforms=None, norm_mean=norm_mean, norm_std=norm_std, use_gpu=use_gpu
)
self.transform_tr, self.transform_te = build_transforms(
self.height, self.width, transforms=transforms,
norm_mean=norm_mean, norm_std=norm_std,
apply_masks_to_test=apply_masks_to_test
)
print('=> Loading train (source) dataset')
trainset = []
for name in self.sources:
trainset_ = init_image_dataset(
name,
transform=self.transform_tr,
mode='train',
combineall=combineall,
root=root,
split_id=split_id,
cuhk03_labeled=cuhk03_labeled,
cuhk03_classic_split=cuhk03_classic_split,
market1501_500k=market1501_500k
)
trainset.append(trainset_)
trainset = sum(trainset)
self._num_train_pids = trainset.num_train_pids
self._num_train_cams = trainset.num_train_cams
train_sampler = build_train_sampler(
trainset.train, train_sampler,
batch_size=batch_size_train,
num_instances=num_instances
)
self.trainloader = torch.utils.data.DataLoader(
trainset,
sampler=train_sampler,
batch_size=batch_size_train,
shuffle=False,
num_workers=workers,
pin_memory=self.use_gpu,
drop_last=True
)
print('=> Loading test (target) dataset')
self.testloader = {name: {'query': None, 'gallery': None} for name in self.targets}
self.testdataset = {name: {'query': None, 'gallery': None} for name in self.targets}
for name in self.targets:
# build query loader
queryset = init_image_dataset(
name,
transform=self.transform_te,
mode='query',
combineall=combineall,
root=root,
split_id=split_id,
cuhk03_labeled=cuhk03_labeled,
cuhk03_classic_split=cuhk03_classic_split,
market1501_500k=market1501_500k
)
self.testloader[name]['query'] = torch.utils.data.DataLoader(
queryset,
batch_size=batch_size_test,
shuffle=False,
num_workers=workers,
pin_memory=self.use_gpu,
drop_last=False
)
# build gallery loader
galleryset = init_image_dataset(
name,
transform=self.transform_te,
mode='gallery',
combineall=combineall,
verbose=False,
root=root,
split_id=split_id,
cuhk03_labeled=cuhk03_labeled,
cuhk03_classic_split=cuhk03_classic_split,
market1501_500k=market1501_500k
)
self.testloader[name]['gallery'] = torch.utils.data.DataLoader(
galleryset,
batch_size=batch_size_test,
shuffle=False,
num_workers=workers,
pin_memory=self.use_gpu,
drop_last=False
)
self.testdataset[name]['query'] = queryset.query
self.testdataset[name]['gallery'] = galleryset.gallery
print('\n')
print(' **************** Summary ****************')
print(' train : {}'.format(self.sources))
print(' # train datasets : {}'.format(len(self.sources)))
print(' # train ids : {}'.format(self.num_train_pids))
print(' # train images : {}'.format(len(trainset)))
print(' # train cameras : {}'.format(self.num_train_cams))
print(' test : {}'.format(self.targets))
print(' *****************************************')
print('\n')
|
[
"48012821+AlexanderDokuchaev@users.noreply.github.com"
] |
48012821+AlexanderDokuchaev@users.noreply.github.com
|
6383995e35ee51c384da1285d358de91724811e2
|
2432996ac1615cd36d61f0feeff8a359d2b438d8
|
/env/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-rdflib.py
|
1ef29499af98b492b37a7bc902fb9532e1abc901
|
[
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
Parveshdhull/AutoTyper
|
dd65d53ece7c13fbc1ead7ce372947483e05e2e3
|
7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c
|
refs/heads/main
| 2023-05-08T14:10:35.404160
| 2023-05-07T20:43:15
| 2023-05-07T20:43:15
| 315,415,751
| 26
| 18
|
Apache-2.0
| 2023-05-07T20:43:16
| 2020-11-23T19:13:05
|
Python
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_submodules
hiddenimports = collect_submodules('rdflib.plugins')
|
[
"parvesh.dhullmonu@gmail.com"
] |
parvesh.dhullmonu@gmail.com
|
cf1e3075185cefc817f86f6636ba6ca84b9a73ae
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/eve/devtools/script/behaviortools/clientdebugadaptors.py
|
2a22a85a0875ed2b83664cddb9e4a59eb4130b2b
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,637
|
py
|
#Embedded file name: eve/devtools/script/behaviortools\clientdebugadaptors.py
import logging
from brennivin.messenger import Messenger
import eve.common.script.net.eveMoniker as moniker
from eve.devtools.script.behaviortools.debugwindow import BehaviorDebugWindow
import uthread2
logger = logging.getLogger(__name__)
EVENT_BEHAVIOR_DEBUG_UPDATE = 'OnBehaviorDebugUpdate'
EVENT_BEHAVIOR_DEBUG_CONNECT_REQUEST = 'OnBehaviorDebugConnectRequest'
EVENT_BEHAVIOR_DEBUG_DISCONNECT_REQUEST = 'OnBehaviorDebugDisconnectRequest'
class UpdateListener(object):
def __init__(self):
self.messenger = Messenger()
self.behaviorDebuggersByItemId = {}
sm.RegisterForNotifyEvent(self, EVENT_BEHAVIOR_DEBUG_UPDATE)
sm.RegisterForNotifyEvent(self, EVENT_BEHAVIOR_DEBUG_CONNECT_REQUEST)
sm.RegisterForNotifyEvent(self, EVENT_BEHAVIOR_DEBUG_DISCONNECT_REQUEST)
def AddObserverForItemId(self, itemId, handler):
if itemId in self.messenger.signalsByMessageName:
self.messenger.signalsByMessageName[itemId].clear()
self.messenger.SubscribeToMessage(itemId, handler)
def RemoveObserverForItemId(self, itemId, handler):
try:
self.messenger.UnsubscribeFromMessage(itemId, handler)
except:
logger.error('Failed to remove observer itemID=%s handler=%s', itemId, handler)
def OnBehaviorDebugUpdate(self, itemID, *args, **kwargs):
self.messenger.SendMessage(itemID, *args, **kwargs)
def TryConnectDebugger(self, itemID):
try:
debugger = ClientBehaviorDebugger(itemID)
debugger.Connect()
self.behaviorDebuggersByItemId[itemID] = debugger
except:
logger.exception('failed to connect to debugger for itemID=%s', itemID)
def OnBehaviorDebugConnectRequest(self, itemIDs):
itemIDs = sorted(itemIDs)
for itemID in itemIDs:
self.TryConnectDebugger(itemID)
def TryDisconnectDebugger(self, itemID):
try:
debugger = self.behaviorDebuggersByItemId.pop(itemID)
debugger.Disconnect()
except:
logger.exception('failed to disconnect to debugger for itemID=%s', itemID)
def OnBehaviorDebugDisconnectRequest(self, itemIDs):
for itemID in itemIDs:
self.TryDisconnectDebugger(itemID)
def HasDebugger(self, itemID):
return itemID in self.behaviorDebuggersByItemId
updateListener = UpdateListener()
class ClientBehaviorDebugger(object):
def __init__(self, itemID):
self.itemID = itemID
self.tree = []
self.treeMap = {}
self.events = []
self.debugWindow = None
self.isConnected = False
def Connect(self):
logger.debug('Debugger connecting to behavior of entity %s', self.itemID)
updateListener.AddObserverForItemId(self.itemID, self.OnBehaviorDebugUpdate)
entityLocation = moniker.GetEntityLocation()
treeData = entityLocation.EnableBehaviorDebugging(self.itemID)
self.isConnected = True
uthread2.StartTasklet(self.SetupDebugTree, treeData)
def Disconnect(self):
logger.debug('Debugger disconnecting from behavior of entity %s', self.itemID)
try:
updateListener.RemoveObserverForItemId(self.itemID, self.OnBehaviorDebugUpdate)
entityLocation = moniker.GetEntityLocation()
entityLocation.DisableBehaviorDebugging(self.itemID)
self.isConnected = False
if self.debugWindow is not None:
self.debugWindow.Close()
sm.UnregisterForNotifyEvent(self, 'OnSessionChanged')
except:
logger.exception('Failed while disconnecting :(')
def OnBehaviorDebugUpdate(self, events, taskStatuses, tasksSeen, blackboards, *args, **kwargs):
if self.debugWindow is None:
return
self.debugWindow.LoadEvents(events)
self.debugWindow.UpdateStatuses(taskStatuses)
self.debugWindow.UpdateTasksSeen(tasksSeen)
self.debugWindow.LoadBlackboard(blackboards)
def SetupDebugTree(self, treeData):
self.debugWindow = BehaviorDebugWindow.Open(windowID='BehaviorDebugWindow_%d' % self.itemID)
self.debugWindow.SetController(self)
self.debugWindow.LoadBehaviorTree(treeData)
sm.RegisterForNotifyEvent(self, 'OnSessionChanged')
def IsConnected(self):
return self.isConnected
def OnSessionChanged(self, isRemote, sess, change):
if 'solarsystemid2' in change:
if self.debugWindow is not None:
self.debugWindow.Close()
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
f410057cae7ae8e1339c8dac17c74dc88a9d8708
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/decoding/GAD/fairseq/data/audio/raw_audio_dataset.py
|
1d92e4966bddce95d492eae411952a4a9ca2c9bd
|
[
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,267
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
from .. import FairseqDataset
logger = logging.getLogger(__name__)
class RawAudioDataset(FairseqDataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
input = {"source": collated_sources}
if self.pad:
input["padding_mask"] = padding_mask
return {"id": torch.LongTensor([s["id"] for s in samples]), "net_input": input}
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
)
self.fnames = []
self.line_inds = set()
skipped = 0
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_sample_size is not None and sz < min_sample_size:
skipped += 1
continue
self.fnames.append(items[0])
self.line_inds.add(i)
self.sizes.append(sz)
logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
wav, curr_sample_rate = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {"id": index, "source": feats}
|
[
"tage@microsoft.com"
] |
tage@microsoft.com
|
5bd1c4c635fe32c3791141e9bc42704f35a43e4b
|
06ae8168b7067c8f77f06a48a22d158af1657651
|
/models.py
|
98bc2e3f49a110d63721b232a0145760a06b1461
|
[] |
no_license
|
Jiangjao/teaProject
|
61e3cab41fab4b1aa8d2b1cfd6c6337c01196497
|
9f14d59d974bf82158a43d19c42b977b393857d2
|
refs/heads/master
| 2023-08-12T11:38:56.561815
| 2021-10-11T06:30:17
| 2021-10-11T06:30:17
| 347,795,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
from django.db import models
class Chemistry(models.Model):
cid = models.IntegerField(blank=True, null=True)
structure = models.CharField(max_length=255, blank=True, null=True)
molecularformula = models.CharField(primary_key=True, max_length=255)
molecularweight = models.CharField(max_length=255, blank=True, null=True)
extra_word = models.TextField(blank=True, null=True)
cas = models.CharField(db_column='CAS', max_length=255, blank=True, null=True) # Field name made lowercase.
pubchem = models.CharField(db_column='PubChem', max_length=255, blank=True, null=True) # Field name made lowercase.
einecs = models.CharField(db_column='EINECS', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'chemistry'
class CodeImages(models.Model):
code_images = models.CharField(primary_key=True, max_length=255)
entryname = models.CharField(max_length=255, blank=True, null=True)
compoundformula = models.CharField(max_length=255, blank=True, null=True)
einecs = models.CharField(db_column='EINECS', max_length=255, blank=True, null=True) # Field name made lowercase.
cid = models.IntegerField(blank=True, null=True)
extraword = models.TextField(db_column='extraWord', blank=True, null=True) # Field name made lowercase.
chinese_name = models.CharField(db_column='Chinese_name', max_length=255, blank=True, null=True) # Field name made lowercase.
mocular_weight = models.FloatField(db_column='mocular weight', blank=True, null=True) # Field renamed to remove unsuitable characters.
cas = models.CharField(db_column='CAS', max_length=255, blank=True, null=True) # Field name made lowercase.
cid_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'code_images'
class Test(models.Model):
name = models.CharField(primary_key=True, max_length=255)
class Meta:
managed = False
db_table = 'test'
|
[
"918718278@qq.com"
] |
918718278@qq.com
|
b246744037954e4d56a3d62e35f360a31c14f200
|
787022de03a2dd6998c1518673830395b389e3df
|
/migration/migrator/migrations/system/20190708143708_add_submitty_admin_json.py
|
bbd3ab8fcc84b2fbfb1be63377cd4a2f875ea629
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Submitty/Submitty
|
e6b8731656291a025aa77f928eb067bc9a307540
|
b223d9e952bcdb8664721a55593bc75e0e3c8c4f
|
refs/heads/main
| 2023-08-31T23:56:11.291752
| 2023-08-31T19:12:18
| 2023-08-31T19:12:18
| 16,236,118
| 592
| 727
|
BSD-3-Clause
| 2023-09-13T05:36:08
| 2014-01-25T17:43:57
|
PHP
|
UTF-8
|
Python
| false
| false
| 715
|
py
|
"""
Migration for the Submitty system.
adds submitty admin json
"""
from pathlib import Path
import shutil
import json
import os
def up(config):
submitty_admin_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_admin.json'))
if not os.path.isfile(submitty_admin_filename):
submitty_admin_json = {
'submitty_admin_username': '',
'submitty_admin_password': ''
}
with open(submitty_admin_filename,'w') as open_file:
json.dump(submitty_admin_json, open_file, indent=2)
shutil.chown(submitty_admin_filename, 'root', 'submitty_daemon')
os.chmod(submitty_admin_filename, 0o440)
def down(config):
pass
|
[
"bmcutler@users.noreply.github.com"
] |
bmcutler@users.noreply.github.com
|
e11c58e1dcc7848596649d5524206b7ba632f80d
|
a732c1380c8dc829df5ba57b67456a9b603b0cf4
|
/model.py
|
b9386587de7cded1f73f81a5ce43913598a42395
|
[] |
no_license
|
ilkaynazli/job-search-planner
|
1266433ce6bb8c249c65dfcdb1d01e4a8d97095d
|
6ac5f3c82de1c33d564eea627468e54c99daf968
|
refs/heads/master
| 2020-04-15T07:59:26.225728
| 2019-01-19T05:16:40
| 2019-01-19T05:16:40
| 164,510,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,025
|
py
|
"""
the database model of job search planner web app
"""
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
"""Import SQLAlchemy object from flask_sqlalchemy library and make the
connection to PostgreSQL"""
db = SQLAlchemy() #create an instance of SQLAlchemy object
class User(db.Model):
"""Users of the website"""
__tablename__ = 'users'
user_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
username = db.Column(db.String(25), nullable=False, unique=True)
password = db.Column(db.String(150), nullable=False)
email = db.Column(db.String(50), nullable=False)
applications = db.relationship('Application')
def __repr__(self):
"""Human readable data"""
return f"<User id: {self.user_id}, \
username: {self.username},\
password: {self.password},\
email: {self.email}>"
class Company(db.Model):
"""Company names, etc for the jobs applied"""
__tablename__ = 'companies'
company_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
company_name = db.Column(db.String(50), nullable=False)
company_website = db.Column(db.String(150), nullable=True)
applications = db.relationship('Application')
def __repr__(self):
"""Human readable data"""
return f"<Company id: {self.company_id}, \
name: {self.company_name},\
website: {self.company_website}>"
class Application(db.Model):
"""Applications to the companies"""
__tablename__ = 'applications'
application_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
user_id = db.Column(db.Integer,
db.ForeignKey('users.user_id'),
nullable=False
)
company_id = db.Column(db.Integer,
db.ForeignKey('companies.company_id'),
nullable=False
)
date_applied = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) #check this one later!!!!
position = db.Column(db.String(50), nullable=False)
resume = db.Column(db.String(50), nullable=True) #the location of the file
cover_letter = db.Column(db.String(50), nullable=True) #the location of the file
summary = db.Column(db.Text, nullable=True)
referer_id = db.Column(db.Integer,
db.ForeignKey('referers.referer_id'),
nullable=False
)
user = db.relationship('User')
company = db.relationship('Company')
referer = db.relationship('Referer')
def __repr__(self):
"""Human readable data"""
return f"<Application id: {self.application_id}, \
company id: {self.company_id},\
user id: {self.user_id}, \
date applied: {self.date_applied},\
position: {self.position},\
resume: {self.resume},\
cover letter: {self.cover_letter},\
summary: {self.summary},\
referer id: {self.referer_id}>"
class Referer(db.Model):
"""Contact in company applied"""
__tablename__ = 'referers'
referer_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
referer_name = db.Column(db.String(75), nullable=False)
application = db.relationship('Application')
def __repr__(self):
"""Human readable data"""
return f"<Referer id: {self.referer_id}, \
referer name: {self.referer_name}>"
class Interview(db.Model):
"""Topics asked in interview"""
__tablename__ = 'interviews'
interview_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
topic_id = db.Column(db.Integer,
db.ForeignKey('topics.topic_id'),
nullable=False
)
application_id = db.Column(db.Integer,
db.ForeignKey('applications.application_id'),
nullable=False
)
improvement = db.Column(db.Boolean, nullable=False)
application = db.relationship('Application')
topic = db.relationship('Topic')
def __repr__(self):
"""Human readable data"""
return f"<interview id: {self.interview_id},\
topic id: {self.topic_id},\
application id: {self.application_id},\
improvement: {self.improvement}>"
class Topic(db.Model):
"""Interview topics that could be asked"""
__tablename__ = 'topics'
topic_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
)
topic = db.Column(db.String(150), nullable=False)
def __repr__(self):
"""Human readable data"""
return f"<Topic id: {self.topic_id},\
topic: {self.topic}>"
def connect_to_db(app, db_name):
"""Connect to database"""
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///' + db_name
app.config['SQLALCHEMY_ECHO'] = True #For debugging purposes keep this True
db.app = app
db.init_app(app)
db_name = 'jobs'
if __name__ == '__main__':
"""For running this interactively"""
from server import app
connect_to_db(app, db_name)
db.create_all()
# example_data()
print('Connected to database.')
|
[
"ilkayncelik@gmail.com"
] |
ilkayncelik@gmail.com
|
ba9793454b72cf6087c048cea652467469da0dc2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_vagrant.py
|
25425a414477258cae648761022543eca0a49624
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
#calss header
class _VAGRANT():
def __init__(self,):
self.name = "VAGRANT"
self.definitions = [u'a person who is poor and does not have a home or job: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9bc4cb5d38e560f98cf8e7fd5812eddb7adfb613
|
b6c09a1b87074d6e58884211ce24df8ec354da5c
|
/1637. 两点之间不包含任何点的最宽垂直面积.py
|
ca285a4325e0ec01f5b84e4b3523ea887d7501c8
|
[] |
no_license
|
fengxiaolong886/leetcode
|
a0ee12d67c4a10fb12d6ca4369762ab5b090cab1
|
4c0897bc06a297fa9225a0c46d8ec9217d876db8
|
refs/heads/master
| 2023-03-18T22:16:29.212016
| 2021-03-07T03:48:16
| 2021-03-07T03:48:16
| 339,604,263
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
"""
给你 n 个二维平面上的点 points ,其中 points[i] = [xi, yi] ,请你返回两点之间内部不包含任何点的 最宽垂直面积 的宽度。
垂直面积 的定义是固定宽度,而 y 轴上无限延伸的一块区域(也就是高度为无穷大)。 最宽垂直面积 为宽度最大的一个垂直面积。
请注意,垂直区域 边上 的点 不在 区域内。
"""
def maxWidthOfVerticalArea(points):
points.sort()
res = 0
n = len(points)
for i in range(1, n):
# print(points[i][0])
res = max(points[i][0] - points[i-1][0], res)
return res
print(maxWidthOfVerticalArea(points = [[8,7],[9,9],[7,4],[9,7]]))
print(maxWidthOfVerticalArea(points = [[3,1],[9,0],[1,0],[1,4],[5,3],[8,8]]))
|
[
"xlfeng886@163.com"
] |
xlfeng886@163.com
|
ade449d196c2f24f481058c91a30d29132d82299
|
14bca3c05f5d8de455c16ec19ac7782653da97b2
|
/lib/kubernetes/client/models/v1_service_reference.py
|
819f741043d5bbd68d7c07a9e9013f4996efcf56
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hovu96/splunk_as_a_service_app
|
167f50012c8993879afbeb88a1f2ba962cdf12ea
|
9da46cd4f45603c5c4f63ddce5b607fa25ca89de
|
refs/heads/master
| 2020-06-19T08:35:21.103208
| 2020-06-16T19:07:00
| 2020-06-16T19:07:00
| 196,641,210
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,993
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ServiceReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, name=None, namespace=None):
"""
V1ServiceReference - a model defined in Swagger
"""
self._name = None
self._namespace = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def name(self):
"""
Gets the name of this V1ServiceReference.
Name is the name of the service
:return: The name of this V1ServiceReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ServiceReference.
Name is the name of the service
:param name: The name of this V1ServiceReference.
:type: str
"""
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1ServiceReference.
Namespace is the namespace of the service
:return: The namespace of this V1ServiceReference.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1ServiceReference.
Namespace is the namespace of the service
:param namespace: The namespace of this V1ServiceReference.
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServiceReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"robert.fujara@gmail.com"
] |
robert.fujara@gmail.com
|
c1fe45b3e7445a6563381aa858ccbee35fc7fb33
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007/programming/languages/perl/XML-SAX/actions.py
|
5573fe066768c8ff99b6aad789159fe54b90d0fb
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import pisitools
from pisi.actionsapi import perlmodules
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def install():
perlmodules.install()
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
9cb597f1e7850ac04cafc87043fdb422489ce144
|
335944885d937316236102a80f76a696b48b51e1
|
/scripts/segmentation_pipeline/lifted_features.py
|
d07d668c6b6cd5f4aa9b74a9dc94ef84ed3125cd
|
[
"MIT"
] |
permissive
|
chaubold/nifty
|
b33153c3ba2dd7907c1f365b76a31471f9313581
|
c351624a7f14278eb241fb730f44bdd275563dec
|
refs/heads/master
| 2021-01-11T17:43:23.443748
| 2017-01-22T12:48:21
| 2017-01-22T12:48:21
| 79,821,703
| 0
| 3
| null | 2017-12-22T10:47:30
| 2017-01-23T16:16:55
|
C++
|
UTF-8
|
Python
| false
| false
| 7,996
|
py
|
import vigra
import nifty
import nifty.graph
import nifty.graph.rag
import nifty.graph.agglo
import numpy
import h5py
import sys
nrag = nifty.graph.rag
nagglo = nifty.graph.agglo
from reraise import *
from tools import *
@reraise_with_stack
def multicutFromLocalProbs(raw, rag, localProbs, liftedEdges):
u = liftedEdges[:,0]
v = liftedEdges[:,1]
# accumulate length (todo, implement function to just accumulate length)
eFeatures, nFeatures = nrag.accumulateMeanAndLength(rag, raw, [100,100],1)
eSizes = eFeatures[:,1]
eps = 0.0001
clipped = numpy.clip(localProbs, eps, 1.0-eps)
features = []
for beta in (0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7):
for powers in (0.0, 0.005, 0.1, 0.15, 0.2):
# the weight of the weight
wWeight = eSizes**powers
print "\n\nBETA ",beta
w = numpy.log((1.0-clipped)/(clipped)) + numpy.log((1.0-beta)/(beta)) * wWeight
obj = nifty.graph.multicut.multicutObjective(rag, w)
factory = obj.multicutIlpCplexFactory()
solver = factory.create(obj)
visitor = obj.multicutVerboseVisitor()
#ret = solver.optimize(visitor=visitor)
ret = solver.optimize()
res = ret[u] != ret[v]
features.append(res[:,None])
features = numpy.concatenate(features, axis=1)
mean = numpy.mean(features, axis=1)[:,None]
features = numpy.concatenate([features, mean], axis=1)
return features
@reraise_with_stack
def ucmFromLocalProbs(raw, rag, localProbs, liftedEdges, liftedObj):
u = liftedEdges[:,0]
v = liftedEdges[:,1]
# accumulate length (todo, implement function to just accumulate length)
eFeatures, nFeatures = nrag.accumulateMeanAndLength(rag, raw, [100,100],1)
eSizes = eFeatures[:,1]
nSizes = eFeatures[:,1]
feats = nifty.graph.lifted_multicut.liftedUcmFeatures(
objective=liftedObj,
edgeIndicators=localProbs,
edgeSizes=eSizes,
nodeSizes=nSizes,
sizeRegularizers=[0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45,
0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
0.9, 0.95]
)
return feats
@reraise_with_stack
def ucmFromHessian(raw, rag, liftedEdges, liftedObj):
u = liftedEdges[:,0]
v = liftedEdges[:,1]
feats = []
for sigma in [1.0, 2.0, 3.0, 4.0, 5.0]:
pf = vigra.filters.hessianOfGaussianEigenvalues(raw, sigma)[:,:,0]
eFeatures, nFeatures = nrag.accumulateMeanAndLength(rag, pf, [100,100],1)
edgeIndicator = eFeatures[:,0]
eSizes = eFeatures[:,1]
nSizes = eFeatures[:,1]
featsB = nifty.graph.lifted_multicut.liftedUcmFeatures(
objective=liftedObj,
edgeIndicators=edgeIndicator,
edgeSizes=eSizes,
nodeSizes=nSizes,
sizeRegularizers=[0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45,
0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
0.9, 0.95]
)
feats.append(featsB)
# different axis ordering as usual
return numpy.concatenate(feats,axis=0)
@reraise_with_stack
def liftedFeaturesFromLocalProbs(raw, rag, localProbs, liftedEdges, liftedObj, featureFile):
mcFeatureFile = featureFile + "mc.h5"
if not hasH5File(mcFeatureFile):
mcFeatures = multicutFromLocalProbs(raw=raw, rag=rag, localProbs=localProbs,
liftedEdges=liftedEdges)
f5 = h5py.File(mcFeatureFile, 'w')
f5['data'] = mcFeatures
f5.close()
else:
mcFeatures = h5Read(mcFeatureFile)
ucmFeatureFile = featureFile + "ucm.h5"
if not hasH5File(ucmFeatureFile):
ucmFeatures = ucmFromLocalProbs(raw=raw, rag=rag, localProbs=localProbs,
liftedEdges=liftedEdges,
liftedObj=liftedObj)
f5 = h5py.File(ucmFeatureFile, 'w')
f5['data'] = ucmFeatures
f5.close()
else:
ucmFeatures = h5Read(ucmFeatureFile)
# combine
features = numpy.concatenate([mcFeatures, ucmFeatures.swapaxes(0,1)],axis=1)
f5 = h5py.File(featureFile, 'w')
f5['data'] = features
f5.close()
@reraise_with_stack
def accumlatedLiftedFeatures(raw, pmap, rag, liftedEdges, liftedObj):
uv = liftedEdges
u = uv[:,0]
v = uv[:,1]
# geometric edge features
geometricFeaturs = nifty.graph.rag.accumulateGeometricNodeFeatures(rag,
blockShape=[75, 75],
numberOfThreads=1)
nodeSize = geometricFeaturs[:,0]
nodeCenter = geometricFeaturs[:,1:2]
nodeAxisA = geometricFeaturs[:,2:4]
nodeAxisA = geometricFeaturs[:,4:6]
diff = (nodeCenter[u,:] - nodeCenter[v,:])**2
diff = numpy.sum(diff,axis=1)
allEdgeFeat = [
# sizes
(nodeSize[u] + nodeSize[v])[:,None],
(numpy.abs(nodeSize[u] - nodeSize[v]))[:,None],
(nodeSize[u] * nodeSize[v])[:,None],
(numpy.minimum(nodeSize[u] , nodeSize[v]))[:,None],
(numpy.maximum(nodeSize[u] , nodeSize[v]))[:,None],
diff[:,None]
]
pixelFeats = [
raw[:,:,None],
]
if pmap is not None:
pixelFeats.append(pmap[:,:,None])
for sigma in (1.0, 2.0, 4.0):
pf = [
vigra.filters.hessianOfGaussianEigenvalues(raw, 1.0*sigma),
vigra.filters.structureTensorEigenvalues(raw, 1.0*sigma, 2.0*sigma),
vigra.filters.gaussianGradientMagnitude(raw, 1.0*sigma)[:,:,None],
vigra.filters.gaussianSmoothing(raw, 1.0*sigma)[:,:,None]
]
pixelFeats.extend(pf)
if pmap is not None:
pixelFeats.append(vigra.filters.gaussianSmoothing(pmap, 1.0*sigma)[:,:,None])
pixelFeats = numpy.concatenate(pixelFeats, axis=2)
for i in range(pixelFeats.shape[2]):
pixelFeat = pixelFeats[:,:,i]
nodeFeat = nifty.graph.rag.accumulateNodeStandartFeatures(
rag=rag, data=pixelFeat.astype('float32'),
minVal=pixelFeat.min(),
maxVal=pixelFeat.max(),
blockShape=[75, 75],
numberOfThreads=10
)
uFeat = nodeFeat[u,:]
vFeat = nodeFeat[v,:]
fList =[
uFeat + vFeat,
uFeat * vFeat,
numpy.abs(uFeat-vFeat),
numpy.minimum(uFeat,vFeat),
numpy.maximum(uFeat,vFeat),
]
edgeFeat = numpy.concatenate(fList, axis=1)
allEdgeFeat.append(edgeFeat)
allEdgeFeat = numpy.concatenate(allEdgeFeat, axis=1)
return allEdgeFeat
@reraise_with_stack
def liftedFeatures(raw, pmap, rag, liftedEdges, liftedObj, distances, featureFile):
ucmFeatureFile = featureFile + "ucm.h5"
if not hasH5File(ucmFeatureFile):
ucmFeatures = ucmFromHessian(raw=raw, rag=rag,
liftedEdges=liftedEdges,
liftedObj=liftedObj)
f5 = h5py.File(ucmFeatureFile, 'w')
f5['data'] = ucmFeatures
f5.close()
else:
ucmFeatures = h5Read(ucmFeatureFile)
accFeatureFile = featureFile + "acc.h5"
if not hasH5File(accFeatureFile):
accFeatrues = accumlatedLiftedFeatures(raw=raw, pmap=pmap,
rag=rag,
liftedEdges=liftedEdges,
liftedObj=liftedObj)
f5 = h5py.File(accFeatureFile, 'w')
f5['data'] = accFeatrues
f5.close()
else:
accFeatrues = h5Read(accFeatureFile)
# combine
features = numpy.concatenate([accFeatrues,distances[:,None], ucmFeatures.swapaxes(0,1)],axis=1)
f5 = h5py.File(featureFile, 'w')
f5['data'] = features
f5.close()
|
[
"thorsten.beier@iwr.uni-heidelberg.de"
] |
thorsten.beier@iwr.uni-heidelberg.de
|
b5b422227dabc1bfe8fc6b9334c87ba02f816f48
|
5abdbe26ad89d50761e505d02c35ea184d79f712
|
/users/views.py
|
a97471d6b3ea8fef81e3388ad3b5f6e8a24fd6db
|
[] |
no_license
|
liyongjun-brayan/xuexi
|
5c00abaeadb46caa4a63fdcd316fabd2d1ebdb15
|
b5356a5115b34dc1d5f627215aef780d7d5a0693
|
refs/heads/master
| 2021-06-25T10:25:12.602434
| 2019-08-27T02:27:23
| 2019-08-27T02:27:23
| 204,632,981
| 1
| 0
| null | 2021-06-10T21:54:15
| 2019-08-27T06:16:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
def logout_view(request):
"""注销用户"""
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
# 注册新用户
if request.method != 'POST':
# 显示空的注册表
form = UserCreationForm()
else:
# 处理填写好的表单
form= UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# 让用户自动登陆,再重定向到主页
authenticated_user= authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
|
[
"johndoe@example.com"
] |
johndoe@example.com
|
823fbcd52596c818081a713b22fef9460caaa729
|
0abcbbac1efc95877f159c65d6f898e749b1bf09
|
/MyMusicApp/blog/migrations/0002_auto_20190913_2052.py
|
ccada31f79d7ce09bbd01e6324ea7b32fcd3506e
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"bzip2-1.0.6",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"Python-2.0",
"TCL",
"LicenseRef-scancode-python-cwi"
] |
permissive
|
kells4real/MusicApp
|
5055e465b46c39e0687c98b7a8adbb2203ac9156
|
4e4ba065c4f472243413551f63dc4e9eddf7f4a7
|
refs/heads/master
| 2022-10-07T15:49:38.406106
| 2019-10-24T19:55:52
| 2019-10-24T19:55:52
| 197,428,434
| 0
| 1
|
MIT
| 2022-10-02T02:23:26
| 2019-07-17T16:48:16
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
# Generated by Django 2.2.2 on 2019-09-13 19:52
import blog.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, default='default.jpg', null=True, upload_to=blog.models.image_upload),
),
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(blank=True, max_length=40, null=True, unique=True),
),
]
|
[
"kells4real@gmail.com"
] |
kells4real@gmail.com
|
ef65cb5846d5c0c185b517fe86785d6f5e79bb80
|
84290c584128de3e872e66dc99b5b407a7a4612f
|
/Writing Functions in Python/More on Decorators/Print the return type.py
|
5e5dcd69342c2f3687cdaafabdcedb9d3637648a
|
[] |
no_license
|
BautizarCodigo/DataAnalyticEssentials
|
91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789
|
7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57
|
refs/heads/main
| 2023-04-11T04:42:17.977491
| 2021-03-21T19:05:17
| 2021-03-21T19:05:17
| 349,784,608
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
def print_return_type(func):
# Define wrapper(), the decorated function
def wrapper(*args, **kwargs):
# Call the function being decorated
result = func(*args, **kwargs)
print('{}() returned type {}'.format(
func.__name__, type(result)
))
return result
# Return the decorated function
return wrapper
@print_return_type
def foo(value):
return value
print(foo(42))
print(foo([1, 2, 3]))
print(foo({'a': 42}))
|
[
"78171986+BautizarCodigo@users.noreply.github.com"
] |
78171986+BautizarCodigo@users.noreply.github.com
|
672ab02fe434eb1a41749a43cc63853910b29c5f
|
a19275ff09caf880e135bce76dc7a0107ec0369e
|
/catkin_ws/src/robot_python/nodes/send_single_cmd_gaebo_node.py
|
0e95c32d3222631780ea123c1986f759e3d06a86
|
[] |
no_license
|
xtyzhen/Multi_arm_robot
|
e201c898a86406c1b1deb82326bb2157d5b28975
|
15daf1a80c781c1c929ba063d779c0928a24b117
|
refs/heads/master
| 2023-03-21T14:00:24.128957
| 2021-03-10T12:04:36
| 2021-03-10T12:04:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,747
|
py
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
#本文档用于发送关节角度
#程序员:陈永厅
#版权:哈尔滨工业大学
#日期:初稿:2019.11.6
import rospy
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import Float64
import os
import numpy as np
from robot_python import FileOpen
def talker():
#建立节点
pub1 = rospy.Publisher("armc/joint1_effort_controller/command", Float64, queue_size=1)
pub2 = rospy.Publisher("armc/joint2_effort_controller/command", Float64, queue_size=1)
pub3 = rospy.Publisher("armc/joint3_effort_controller/command", Float64, queue_size=1)
pub4 = rospy.Publisher("armc/joint4_effort_controller/command", Float64, queue_size=1)
pub5 = rospy.Publisher("armc/joint5_effort_controller/command", Float64, queue_size=1)
pub6 = rospy.Publisher("armc/joint6_effort_controller/command", Float64, queue_size=1)
pub7 = rospy.Publisher("armc/joint7_effort_controller/command", Float64, queue_size=1)
rospy.init_node("joint_position_command", anonymous=True)
rate = rospy.Rate(100) # 10hz
#读取命令文件
file_path = os.path.abspath("..")
file_name = 'data/position.txt'
path = os.path.join(file_path,file_name)
command_pos = np.array(FileOpen.read(path))
#print command_pos.shape()
#重写数据
kk = len(command_pos[:, 0])
n = len(command_pos[0, :])
print "数据个数:%d" % kk
print "数据长度:%d" % n
command_data = np.zeros([kk,n])
for i in range(kk):
for j in range(n):
command_data[i,j] = command_pos[i,j]
k = 0
while not rospy.is_shutdown():
if k == kk:
break
tip_str = "第 %s 次命令:" % k
rospy.loginfo(tip_str)
joint1_data = Float64()
joint2_data = Float64()
joint3_data = Float64()
joint4_data = Float64()
joint5_data = Float64()
joint6_data = Float64()
joint7_data = Float64()
joint1_data.data = command_data[k, 0]
joint2_data.data = command_data[k, 1]
joint3_data.data = command_data[k, 2]
joint4_data.data = command_data[k, 3]
joint5_data.data = command_data[k, 4]
joint6_data.data = command_data[k, 5]
joint7_data.data = command_data[k, 6]
print "send data:%s" % command_data[k, :]
pub1.publish(joint1_data)
pub2.publish(joint2_data)
pub3.publish(joint3_data)
pub4.publish(joint4_data)
pub5.publish(joint5_data)
pub6.publish(joint6_data)
pub7.publish(joint7_data)
rate.sleep()
k = k + 1
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
[
"qyz146006@163.com"
] |
qyz146006@163.com
|
ff78462a33b2ecb3dd40c291fb4d19cfd65795d3
|
50639b8c539b9d69539f9b9016527f831cee213d
|
/LC/LC17-LetterCombinationsOfPhoneNumber.py
|
2031ec7bbc8fc07ceae9ae08c38a89b1b4edec00
|
[] |
no_license
|
yaelBrown/pythonSandbox
|
fe216b2c17d66b6dde22dd45fe2a91f1315f2db4
|
abac8cabeb7a2b4fbbe1fc8655f7f52a182eaabe
|
refs/heads/master
| 2023-08-10T09:42:26.249444
| 2023-08-03T21:54:37
| 2023-08-03T21:54:37
| 194,980,832
| 0
| 1
| null | 2023-05-02T18:01:41
| 2019-07-03T04:42:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent. Return the answer in any order.
A mapping of digits to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
"""
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if digits == "": return []
map = {
"0": "",
"1": "",
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz"
}
out = []
for d in digits:
temp = map[d]
#?
|
[
"yaelrbrown@gmail.com"
] |
yaelrbrown@gmail.com
|
60c2e8e9ff8c5b9bd21b22fa945733c893bb0522
|
3e0e674e07e757dfb23e18ba30dbb440c0966848
|
/树二.py
|
d983ceabbf739253db8b0bd723fcaccfa61ffb57
|
[] |
no_license
|
Jasonmes/Algorithm--Advanced
|
0bfaa844127ff146997d2dd19b4943be29467fad
|
3f29b07b6d55197c5d21f44a474f6e96021cd5b0
|
refs/heads/master
| 2020-03-27T12:34:49.537881
| 2018-08-29T06:26:38
| 2018-08-29T06:26:38
| 146,554,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Jason Mess
Tree = [['a', 'b'], ['c'], ['b', ['e', 'f']]]
print(Tree[0][1])
print(Tree[2][1][0])
|
[
"wow336@163.com"
] |
wow336@163.com
|
6ddae08c21df8c42e44f5e6d4404af25f79849a0
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/64_2.py
|
5b45ab1e95ee997b47876fb571f3e9db42c2eeed
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,023
|
py
|
Python program to print Calendar without calendar or datetime module
Given the month and year. The task is to show the calendar of that month and
in the given year without using any module or pre-defined functions.
**Examples:**
**Input :**
mm(1-12) :9
yy :2010
**Output :**
September 2010
Su Mo Tu We Th Fr Sa
01 02 03 04
05 06 07 08 09 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30
**Approach:**
In the program below, we first calculate the number of odd days to find the
day of the date 01-mm-yyyy.Then, we take in the year(yy) and the month(mm) as
input and displays the calendar for that month of the year.
Below is the implementation of the given approach.
__
__
__
__
__
__
__
# Python code to print Calendar
# Without use of Calendar module
mm = 2
yy = 2020
month ={1:'January', 2:'February', 3:'March',
4:'April', 5:'May', 6:'June', 7:'July',
8:'August', 9:'September', 10:'October',
11:'November', 12:'December'}
# code below for calculation of odd days
day =(yy-1)% 400
day = (day//100)*5 + ((day % 100) - (day %
100)//4) + ((day % 100)//4)*2
day = day % 7
nly =[31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31]
ly =[31, 29, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31]
s = 0
if yy % 4 == 0:
for i in range(mm-1):
s+= ly[i]
else:
for i in range(mm-1):
s+= nly[i]
day += s % 7
day = day % 7
# variable used for white space filling
# where date not present
space =''
space = space.rjust(2, ' ')
# code below is to print the calendar
print(month[mm], yy)
print('Su', 'Mo', 'Tu', 'We', 'Th', 'Fr',
'Sa')
if mm == 9 or mm == 4 or mm == 6 or mm
== 11:
for i in range(31 + day):
if i<= day:
print(space, end =' ')
else:
print("{:02d}".format(i-day), end =' ')
if (i + 1)% 7 == 0:
print()
elif mm == 2:
if yy % 4 == 0:
p = 30
else:
p = 29
for i in range(p + day):
if i<= day:
print(space, end =' ')
else:
print("{:02d}".format(i-day), end =' ')
if (i + 1)% 7 == 0:
print()
else:
for i in range(32 + day):
if i<= day:
print(space, end =' ')
else:
print("{:02d}".format(i-day), end =' ')
if (i + 1)% 7 == 0:
print()
---
__
__
**Output:**
February 2020
Su Mo Tu We Th Fr Sa
01
02 03 04 05 06 07 08
09 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
243b00fb792df0d908725a77d369f7a886e958ca
|
7319bdc1aa1edd9e37424da47264882753dda919
|
/monitor_nomina.py
|
fde617e7fa6aa3fb079d6c0dc9c7e6ee000411ae
|
[] |
no_license
|
njmube/satconnect
|
4ff81ac132811d2784d82a872be34590f53021db
|
de421f546a6f7f4cc5f247d1b2ba91ac272bdcb9
|
refs/heads/master
| 2023-03-18T12:58:18.379008
| 2017-10-24T07:14:05
| 2017-10-24T07:14:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
# -*- coding: utf-8 -*-
from LibTools.filesystem import Carpeta
from slaves import SentinelNomina
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_nomina)
sentinela = SentinelNomina(carpeta)
sentinela.start_Monitoring()
|
[
"="
] |
=
|
7091475a03d37a18e9d953f65307c93e950ce3ad
|
fee71dd79c16f8e4aa4be46aa25863a3e8539a51
|
/ear/core/bs2051.py
|
058eefc981611aa995294b0783b491c5ba08e367
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
ebu/ebu_adm_renderer
|
d004ed857b3004c9de336426f402654779a0eaf8
|
ef2189021203101eab323e1eccdd2527b32a5024
|
refs/heads/master
| 2023-08-09T09:13:06.626698
| 2022-12-07T12:22:39
| 2022-12-07T12:22:39
| 123,921,945
| 61
| 13
|
BSD-3-Clause-Clear
| 2023-08-30T17:17:05
| 2018-03-05T13:15:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
import pkg_resources
from ..compatibility import load_yaml
from .geom import PolarPosition
from .layout import Channel, Layout
def _dict_to_channel(d):
position = PolarPosition(azimuth=d["position"]["az"],
elevation=d["position"]["el"],
distance=1.0)
return Channel(
name=d["name"],
is_lfe=d.get("is_lfe", False),
polar_position=position,
polar_nominal_position=position,
az_range=tuple(d.get("az_range", (position.azimuth, position.azimuth))),
el_range=tuple(d.get("el_range", (position.elevation, position.elevation))),
)
def _dict_to_layout(d):
return Layout(
name=d["name"],
channels=list(map(_dict_to_channel, d["channels"])),
)
def _load_layouts():
fname = "data/2051_layouts.yaml"
with pkg_resources.resource_stream(__name__, fname) as layouts_file:
layouts_data = load_yaml(layouts_file)
layouts = list(map(_dict_to_layout, layouts_data))
for layout in layouts:
errors = []
layout.check_positions(callback=errors.append)
assert errors == []
layout_names = [layout.name for layout in layouts]
layouts_dict = {layout.name: layout for layout in layouts}
return layout_names, layouts_dict
layout_names, layouts = _load_layouts()
def get_layout(name):
"""Get data for a layout specified in BS.2051.
Parameters:
name (str): Full layout name, e.g. "4+5+0"
Returns:
Layout: object representing the layout; real speaker positions are set
to the nominal positions.
"""
if name not in layout_names:
raise KeyError("Unknown layout name '{name}'.".format(name=name))
return layouts[name]
|
[
"tom@tomn.co.uk"
] |
tom@tomn.co.uk
|
bb98f35adc8e0f2ec79f4ea7a0b2314a9ec8bec0
|
0a85e9ecb51c89110794aeb399fc3ccc0bff8c43
|
/InterviewCake/Practice Problems/reverse_string_inPlace.py
|
482b60e1d1415f53519182dd35b2f0e7cd6af001
|
[] |
no_license
|
jordan-carson/Data_Structures_Algos
|
6d246cd187e3c3e36763f1eedc535ae1b95c0b18
|
452bb766607963e5ab9e39a354a24ebb26ebaaf5
|
refs/heads/master
| 2020-12-02T23:19:11.315890
| 2020-09-15T01:23:29
| 2020-09-15T01:23:29
| 231,147,094
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
STRING = ['a', 'b', 'c', 'd']
def reverse_string(string_list):
left_index = 0
right_index = len(string_list) - 1
while left_index < right_index:
string_list[left_index], string_list[right_index] = \
string_list[right_index], string_list[left_index]
left_index += 1
right_index -= 1
return string_list
if __name__ == '__main__':
print(reverse_string(STRING))
|
[
"jordanlouiscarson@gmail.com"
] |
jordanlouiscarson@gmail.com
|
7b35ac2384529e8bb902194f56b1d0d824520edc
|
016109b9f052ffd037e9b21fa386b36089b05813
|
/hashP4.py
|
559f024f058f63f9e587e9c5a8b7a38c51b5ec47
|
[] |
no_license
|
nsshayan/DataStructuresAndAlgorithms
|
9194508c5227c5c8c60b9950917a4ea8da8bbab2
|
2f7ee1bc8f4b53c35d1cce62e898a9695d99540a
|
refs/heads/master
| 2022-09-29T21:15:33.803558
| 2022-09-08T17:14:59
| 2022-09-08T17:14:59
| 73,257,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
A,k = map(int,raw_input().rstrip().split(" "))
nos = map(int,raw_input().rstrip().split(" "))
hashMap = [0 for y in range(1000002)]
for i in range(A):
hashMap[nos[i]] += 1
left = 0
right = 1000001
flag = 0
while left < right:
if hashMap[left] == 0 or hashMap[right]==0:
while hashMap[left]==0:
left += 1
while hashMap[right] == 0:
right -= 1
if (left + right ) == k and left != right:
flag = 1
break
elif left+right > k:
right -= 1
elif left + right < k:
left += 1
if left+right == k and left == right and hashMap[left] > 1:
flag = 1
if flag == 1:
print "YES"
else :
print "NO"
|
[
"nsshayan89@gmail.com"
] |
nsshayan89@gmail.com
|
2d04eb4a6d7119cd114da0714ffeaa23551be0a1
|
ad5ad404d24f1ef195d069b2e9d36b1a22cfd25d
|
/kde/applications/kiten/kiten.py
|
68d4236f5c283e083b03af733ec7b7b92ed78a0d
|
[
"BSD-2-Clause"
] |
permissive
|
arruor/craft-blueprints-kde
|
6643941c87afd09f20dd54635022d8ceab95e317
|
e7e2bef76d8efbc9c4b84411aa1e1863ac8633c1
|
refs/heads/master
| 2020-03-22T17:54:38.445587
| 2018-07-10T11:47:21
| 2018-07-10T11:47:21
| 140,423,580
| 0
| 0
| null | 2018-07-10T11:43:08
| 2018-07-10T11:43:07
| null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "Kiten"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = "default"
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = "default"
self.runtimeDependencies["libs/qt5/qtbase"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kcompletion"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kcrash"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = "default"
self.runtimeDependencies["kde/frameworks/tier3/khtml"] = "default"
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = "default"
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
|
[
"vonreth@kde.org"
] |
vonreth@kde.org
|
7c5e77e8e8708914b94c95c7da9fc3574ad25c8c
|
a14795a79fd8f39cede7fa5eb86f9717b5c289c2
|
/backend/course/api/v1/serializers.py
|
977b3866deffb183b0133225485e9b022f8b7e3e
|
[] |
no_license
|
crowdbotics-apps/dearfuturescientist-21123
|
fcdbe95a9cd9e8713198b6accbeeb56aa5b0b2d4
|
5b282411ebaf39580b938f6678afc8a36e34aba4
|
refs/heads/master
| 2022-12-30T20:23:25.888830
| 2020-10-05T19:00:56
| 2020-10-05T19:00:56
| 301,510,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
263ca80ed3ebdcc465692fef40cd71b494ac004c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03807/s835726532.py
|
c603899438bd501bb5871b424daa8724dfe35dfc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
N = int(input())
a = list(map(int,input().split()))
odd = 0
for i in range(N):
if a[i] % 2:
odd += 1
if odd % 2:
print('NO')
else:
print('YES')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fcb2745a3b28acb9bdab55a49b61a805e5d2198f
|
55493112595d303d39b90ca9112e1d0a52f435e4
|
/WorkforceManagement/views/Computer_View.py
|
4fc447fa4d1e6adaa0a611f92c7069d1ab909d56
|
[] |
no_license
|
NSS-Spontaneous-Spoonbills/Sprint2
|
a06c2ea08dbe58289984591b5ef412242924f86f
|
7fd603ee531556b32b100c5a9f109b0e9207f369
|
refs/heads/master
| 2020-03-25T11:38:55.449223
| 2018-08-13T21:00:35
| 2018-08-13T21:00:35
| 143,741,505
| 0
| 1
| null | 2018-08-13T21:26:08
| 2018-08-06T14:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from WorkforceManagement.models import Computer
from WorkforceManagement.forms import *
def Computer_List_View(request):
"""Displays all computers in the database
Author: Erin Meaker
"""
computers = Computer.objects.all()
return render(request, 'WorkforceManagement/Computer_List.html', {'computers': computers})
def Computer_Detail_View(request, pk):
"""Displays details about a specific computer
Author: Erin Meaker
"""
computer = get_object_or_404(Computer, pk=pk)
return render(request, 'WorkforceManagement/Computer_Detail.html', {'computer': computer})
def Computer_New_View(request):
"""Displays form for adding new computer to the database
Author: Erin Meaker
"""
if request.method == "POST":
form = Computer_New_Form(request.POST)
new_comp = form.save(commit=False)
new_comp.save()
return redirect('computer_detail', pk=new_comp.pk)
else:
form = Computer_New_Form()
return render(request, 'WorkforceManagement/Computer_Update.html', {'form': form})
def Computer_Update_View(request, pk):
"""Displays form for updating the computers
Author: Erin Meaker
"""
computer = get_object_or_404(Computer, pk=pk)
if request.method == "POST":
form = Computer_Update_Form(request.POST, instance=computer)
computer = form.save(commit=False)
computer.save()
return redirect('computer_detail', pk=computer.pk)
else:
form = Computer_Update_Form(instance=computer)
return render(request, 'WorkforceManagement/Computer_Update.html', {'form': form})
def Computer_Delete_View(request, pk):
"""Displays template for deleting a computer
Author: Erin Meaker"""
computer = get_object_or_404(Computer, pk=pk)
computer.delete()
return redirect('computer_list')
|
[
"erinmeaker@gmail.com"
] |
erinmeaker@gmail.com
|
0f20818aacacd277b492468e80b7128771cc7584
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/1704.py
|
2ef79a2cad74434c186149c67d373ceeab96e152
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
def areRecycled(number1, number2):
recycled = False
numero1 = number1
for i in range(len(number2)):
numero1.insert(0,numero1.pop())
if numero1 == number2:
return True
return False
archi = open("C-small-attempt2.in","r")
cant = open("output.dat","w")
cases = int(archi.readline().split()[0])
for i in range(cases):
cont = 0
label = "Case #" + str(i+1) + ": "
numeros = archi.readline().replace('\n','').split(" ")
limInferior = int(numeros[0])
limSuperior = int(numeros[1])
j=limInferior
while j < limSuperior:
k=j+1;
while k<= limSuperior:
if areRecycled(list(str(k)),list(str(j))):
cont = cont + 1
k = k + 1
j = j + 1
label = label + str(cont) + '\n'
cant.writelines(label)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
4d7ab7bfcefd8572eb06e3978ebf7097d6c4a4f4
|
232fc2c14942d3e7e28877b502841e6f88696c1a
|
/dizoo/multiagent_particle/config/cooperative_navigation_collaq_config.py
|
59f41109f0f514f61ca8866df2a01ca581003b23
|
[
"Apache-2.0"
] |
permissive
|
shengxuesun/DI-engine
|
ebf84221b115b38b4b3fdf3079c66fe81d42d0f7
|
eb483fa6e46602d58c8e7d2ca1e566adca28e703
|
refs/heads/main
| 2023-06-14T23:27:06.606334
| 2021-07-12T12:36:18
| 2021-07-12T12:36:18
| 385,454,483
| 1
| 0
|
Apache-2.0
| 2021-07-13T02:56:27
| 2021-07-13T02:56:27
| null |
UTF-8
|
Python
| false
| false
| 2,129
|
py
|
from easydict import EasyDict
n_agent = 5
num_landmarks = n_agent
collector_env_num = 4
evaluator_env_num = 2
cooperative_navigation_collaq_config = dict(
env=dict(
n_agent=n_agent,
num_landmarks=num_landmarks,
max_step=100,
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
manager=dict(shared_memory=False, ),
n_evaluator_episode=5,
stop_value=0,
),
policy=dict(
cuda=True,
on_policy=True,
model=dict(
agent_num=n_agent,
obs_shape=2 + 2 + (n_agent - 1) * 2 + num_landmarks * 2,
alone_obs_shape=2 + 2 + (num_landmarks) * 2,
global_obs_shape=n_agent * 2 + num_landmarks * 2 + n_agent * 2,
action_shape=5,
hidden_size_list=[128, 128, 64],
attention=True,
self_feature_range=[2, 4], # placeholder
ally_feature_range=[4, n_agent * 2 + 2], # placeholder
attention_size=32,
),
agent_num=n_agent,
learn=dict(
update_per_collect=100,
batch_size=32,
learning_rate=0.0001,
target_update_theta=0.001,
discount_factor=0.99,
),
collect=dict(
n_sample=600,
unroll_len=16,
env_num=collector_env_num,
),
eval=dict(env_num=evaluator_env_num, ),
other=dict(eps=dict(
type='exp',
start=1.0,
end=0.05,
decay=100000,
), ),
),
)
cooperative_navigation_collaq_config = EasyDict(cooperative_navigation_collaq_config)
main_config = cooperative_navigation_collaq_config
cooperative_navigation_collaq_create_config = dict(
env=dict(
import_names=['dizoo.multiagent_particle.envs.particle_env'],
type='cooperative_navigation',
),
env_manager=dict(type='subprocess'),
policy=dict(type='collaq'),
)
cooperative_navigation_collaq_create_config = EasyDict(cooperative_navigation_collaq_create_config)
create_config = cooperative_navigation_collaq_create_config
|
[
"niuyazhe@sensetime.com"
] |
niuyazhe@sensetime.com
|
984769b8bfd917b7f3a450664dda8ca833caabdc
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/components/safe_browsing/content/web_ui/DEPS
|
c4dfe28ac40a5b9fd60086f5f0bb2d45f1b6d99f
|
[
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 409
|
include_rules = [
"+components/enterprise/common/proto/connectors.pb.h",
"+components/grit/components_resources.h",
"+components/password_manager/core/browser/hash_password_manager.h",
"+components/user_prefs",
"+components/safe_browsing/core/proto/csd.pb.h",
"+components/strings/grit/components_strings.h",
"+components/grit/components_scaled_resources.h",
"+components/safe_browsing_db",
]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
699b7062a1c9a0e705a481a5c8cf42e5a18dc7f6
|
ef20884169d10ec9ac4d1d3b77ee35245d248294
|
/practice/first_step_with_tensorflow/kmean_create_data.py
|
b95cc97c8c9d36f85fbdcbe9af721f29fd09ec7d
|
[] |
no_license
|
heaven324/Deeplearning
|
64016671879cdf1742eff6f374cfb640cfc708ae
|
a7a8d590fa13f53348f83f8c808538affbc7b3e8
|
refs/heads/master
| 2023-05-05T08:54:27.888155
| 2021-05-22T08:25:47
| 2021-05-22T08:25:47
| 188,010,607
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
import numpy as np
num_points = 2000
vectors_set = []
for i in range(num_points):
if np.random.random() > 0.5:
vectors_set.append([np.random.normal(0.0, 0.9), np.random.normal(0.0, 0.9)])
else:
vectors_set.append([np.random.normal(3.0, 0.5), np.random.normal(1.0, 0.5)])
# 난수 생성 확인
#print(vectors_set)
|
[
"wjdtjdgh2005@gmail.com"
] |
wjdtjdgh2005@gmail.com
|
83c63b60c22628725f344b1bf4635e30bbf5aae9
|
577fd6f5ce00ba4b530937e84f3b426b30cd9d08
|
/Checkiolearn/Polygon/sun_angle.py
|
ecd226f204d9bf718eb6cd5d5451c14c7f50b0f1
|
[] |
no_license
|
YxiangJ/Python
|
33e2d0d4c26ce35ccd3504b73de15e45adb6946c
|
bcb1a0ace39fbcbe868a341652085c0ddf307c17
|
refs/heads/master
| 2018-09-24T08:24:13.692535
| 2018-06-07T01:11:00
| 2018-06-07T01:11:00
| 126,120,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
def sun_angle(time):
# replace this for solution
l = time.split(':')
result = (int(l[0]) - 6) * 15 + int(l[1]) / 4
if int(l[0]) > 18 or int(l[0]) < 6:
return "I don't see the sun!"
else:
return result
if __name__ == '__main__':
print("Example:")
print(sun_angle("07:00"))
# These "asserts" using only for self-checking and not necessary for auto-testing
assert sun_angle("07:00") == 15
assert sun_angle("01:23") == "I don't see the sun!"
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"284953505@qq.com"
] |
284953505@qq.com
|
c14d81b13ff0bfca027e09587f8f586914771894
|
8051c715e86095c1a0f2d6dcee78150417562d00
|
/app/api/response_api.py
|
8ea2f772957ae7aa5d8b6a8b84bed6bcac25e956
|
[
"BSD-3-Clause"
] |
permissive
|
minkione/Apfell
|
45bd47249afa59389ab8237558c52d3f083cae29
|
096b6524c44b0673f11d18bd2388193d074380d6
|
refs/heads/master
| 2020-03-28T12:22:37.741190
| 2018-09-10T02:42:06
| 2018-09-10T02:42:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,766
|
py
|
from app import apfell, db_objects
from sanic.response import json
from app.database_models.model import Task, Response
import base64
from sanic_jwt.decorators import protected, inject_user
from app.api.file_api import create_filemeta_in_database_func, download_file_to_database_func
import json as js
# This gets all responses in the database
@apfell.route(apfell.config['API_BASE'] + "/responses/", methods=['GET'])
@inject_user()
@protected()
async def get_all_responses(request, user):
try:
all_responses = await db_objects.execute(Response.select())
except Exception as e:
return json({'status': 'error',
'error': 'Cannot get responses'})
return json([c.to_json() for c in all_responses])
# Get a single response
@apfell.route(apfell.config['API_BASE'] + "/response/<rid:int>", methods=['GET'])
@inject_user()
@protected()
async def get_one_response(request, user, rid):
try:
resp = await db_objects.get(Response, id=rid)
except Exception as e:
return json({'status': 'error', 'error': 'Cannot get that response'})
return json(resp.to_json())
# implant calling back to update with base64 encoded response from executing a task
# We don't add @protected or @injected_user here because the callback needs to be able to post here for responses
@apfell.route(apfell.config['API_BASE'] + "/responses/<tid:int>", methods=['POST'])
async def update_task_for_callback(request, tid):
data = request.json
decoded = base64.b64decode(data['response']).decode("utf-8")
try:
task = await db_objects.get(Task, id=tid)
except Exception as e:
return json({'status': 'error',
'error': 'Task does not exist'})
try:
if 'response' not in data:
return json({'status': 'error', 'error': 'task response not in data'})
if task.command.cmd == "download":
try:
download_response = js.loads(decoded)
if 'total_chunks' in download_response:
return await create_filemeta_in_database_func(download_response)
elif 'chunk_data' in download_response:
return await download_file_to_database_func(download_response)
except Exception as e:
pass
resp = await db_objects.create(Response, task=task, response=decoded)
task.status = "processed"
await db_objects.update(task)
status = {'status': 'success'}
resp_json = resp.to_json()
return json({**status, **resp_json}, status=201)
except Exception as e:
print(e)
return json({'status': 'error',
'error': 'Failed to update task',
'msg': str(e)})
|
[
"codybthomas@gmail.com"
] |
codybthomas@gmail.com
|
e3e25ce23370e068912110921559d559bca593e6
|
1a5c27bc6e2d39a258dd517d2dc3570c13e42a70
|
/flaskext/utils.py
|
ff2d1dcf02a01b52fcfe2121292f09a4dde4989a
|
[
"MIT"
] |
permissive
|
fumingshih/flask-peewee
|
0f8e169ca7ab2d7ab437a5620a2ff2f082d668dd
|
4f44ec5583abba5099880a2a2af76404223a594b
|
refs/heads/master
| 2021-01-18T11:00:19.120283
| 2011-11-09T14:36:02
| 2011-11-09T14:36:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
import math
import random
import re
import sys
from hashlib import sha1
from flask import abort, request, render_template
from peewee import Model, DoesNotExist, SelectQuery
def get_object_or_404(query_or_model, **query):
try:
return query_or_model.get(**query)
except DoesNotExist:
abort(404)
def object_list(template_name, qr, var_name='object_list', **kwargs):
pq = PaginatedQuery(qr, kwargs.pop('paginate_by', 20))
kwargs[var_name] = pq.get_list()
return render_template(template_name, pagination=pq, page=pq.get_page(), **kwargs)
class PaginatedQuery(object):
page_var = 'page'
def __init__(self, query_or_model, paginate_by):
self.paginate_by = paginate_by
if isinstance(query_or_model, SelectQuery):
self.query = query_or_model
self.model = self.query.model
else:
self.model = query_or_model
self.query = self.model.select()
def get_page(self):
return int(request.args.get(self.page_var) or 1)
def get_pages(self):
return math.ceil(float(self.query.count()) / self.paginate_by)
def get_list(self):
return self.query.paginate(self.get_page(), self.paginate_by)
def get_next():
if not request.query_string:
return request.path
return '%s?%s' % (request.path, request.query_string)
def slugify(s):
return re.sub('[^a-z0-9_\-]+', '-', s.lower())
def load_class(s):
path, klass = s.rsplit('.', 1)
__import__(path)
mod = sys.modules[path]
return getattr(mod, klass)
# borrowing these methods, slightly modified, from django.contrib.auth
def get_hexdigest(salt, raw_password):
return sha1(salt + raw_password).hexdigest()
def make_password(raw_password):
salt = get_hexdigest(str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(salt, raw_password)
return '%s$%s' % (salt, hsh)
def check_password(raw_password, enc_password):
salt, hsh = enc_password.split('$', 1)
return hsh == get_hexdigest(salt, raw_password)
|
[
"coleifer@gmail.com"
] |
coleifer@gmail.com
|
ba897465ddc7bea4ef33e45bb292ec6dcdea5381
|
392495a85f77e72e7c3562576aa362d7860c17ee
|
/backend/setup.py
|
244a73c8d40ebb0836da93cb7d08757fdc76199d
|
[] |
no_license
|
messa/aiohttp-nextjs-graphql-demo-forum
|
ef51c26720a6f67a36f08d5caeba4e2d9bef0332
|
38fb66d011faec881b184e132aa7347517ee99e6
|
refs/heads/master
| 2020-04-16T22:38:08.171305
| 2019-02-04T02:18:35
| 2019-02-04T02:18:35
| 165,976,811
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='forum-backend',
version='0.0.1',
packages=find_packages(exclude=['doc', 'tests*']),
install_requires=[
'aiohttp',
'aiohttp-graphql',
'pyyaml',
],
entry_points={
'console_scripts': [
'forum-backend=forum_backend:main',
],
})
|
[
"petr.messner@gmail.com"
] |
petr.messner@gmail.com
|
410aa5e90d452ce0c150cc25c78df4ee555a14c6
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc094/arc095_a/7981214.py
|
7f6c3dcab7bd1fb3884adf64c039c5841bf608cf
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886
| 2021-03-01T12:52:26
| 2021-03-01T12:52:26
| 227,364,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
N=int(input())
a=list(map(int,input().split()))
b=sorted(a)
ans=[b[N//2],b[(N//2) -1]]
for i in range(N):
if a[i] >= ans[0]:
print(ans[1])
else:
print(ans[0])
|
[
"kouhei.k.0116@gmail.com"
] |
kouhei.k.0116@gmail.com
|
63bd83adcb7f9700378098678b26a5b39b3d7a86
|
719853613b5b96f02072be1fde736d883e799f02
|
/server/intrinsic/management/commands/intrinsic_import_ec2.py
|
a6bd9aeef70b6ccd8ad1fe6dbb896cfbc53d5e39
|
[
"MIT",
"CC-BY-2.0"
] |
permissive
|
anmolkabra/opensurfaces
|
5ba442123586533a93eb29890fa1694e3efdbfe8
|
a42420083a777d7e1906506cc218f681c5cd145b
|
refs/heads/master
| 2020-03-20T01:11:05.182880
| 2018-06-13T14:55:45
| 2018-06-13T14:55:45
| 137,068,945
| 0
| 0
|
MIT
| 2018-06-12T12:32:53
| 2018-06-12T12:32:52
| null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
import glob
import time
import timeit
from django.core.management.base import BaseCommand
from intrinsic.tasks import import_ec2_task
class Command(BaseCommand):
args = ''
help = 'Import image algorithms run on ec2'
def handle(self, *args, **options):
indir = '/vol/completed-tasks'
scheduled_fnames = {}
sleep_time = 2
total_count = None
start_time = None
first = True
while True:
files = glob.glob("%s/*.pickle" % indir)
c = 0
for fname in files:
if fname in scheduled_fnames:
scheduled_fnames[fname] -= sleep_time
else:
scheduled_fnames[fname] = 0
if scheduled_fnames[fname] <= 0:
import_ec2_task.delay(fname)
scheduled_fnames[fname] = 3600
c += 1
# ignore the first time
if first:
total_count = 0
start_time = timeit.default_timer()
rate = "N/A"
first = False
else:
total_count += c
time_elapsed = max(timeit.default_timer() - start_time, 1e-3)
rate = "%.3f" % (float(total_count) / time_elapsed)
if c > 0:
sleep_time = max(sleep_time // 2, 2)
else:
sleep_time = min(sleep_time * 2, 3600)
time.sleep(sleep_time)
print "%s new files (average %s files/s); sleep for %s seconds..." % (
c, rate, sleep_time)
|
[
"sbell@cs.cornell.edu"
] |
sbell@cs.cornell.edu
|
7c0088fc02afdb9058cbb4fdf743efb97e73fad2
|
f76f83dcdfdbfe254ab67e26b244475d2e810819
|
/conttudoweb/inventory/migrations/0016_auto_20200723_1607.py
|
3116c549689509a9211c9601d3096006c7d686c2
|
[] |
no_license
|
ConTTudOweb/ConTTudOwebProject
|
fda13ece406e1904d6efe4c3ceebd30e3d168eae
|
18c3b8da1f65714eb01a420a0dbfb5305b9461f3
|
refs/heads/master
| 2022-12-14T22:05:00.243429
| 2021-03-15T23:32:41
| 2021-03-15T23:32:41
| 138,349,067
| 1
| 3
| null | 2022-12-08T07:49:21
| 2018-06-22T21:19:03
|
Python
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
# Generated by Django 3.0.8 on 2020-07-23 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0015_auto_20200723_1600'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(max_length=120, unique=True, verbose_name='descrição'),
),
]
|
[
"sandrofolk@hotmail.com"
] |
sandrofolk@hotmail.com
|
1b78135398abeca244e835d6de11727d963c8134
|
49ee49ee34fa518b0df934081f5ea44a0faa3451
|
/study-crow-framework/crow/examples/example_test.py
|
d252df0b805e995dadd5e2d37ab2bed1e000c5f6
|
[
"BSD-3-Clause",
"MIT",
"ISC"
] |
permissive
|
kingsamchen/Eureka
|
a9458fcc7d955910bf2cefad3a1561cec3559702
|
e38774cab5cf757ed858547780a8582951f117b4
|
refs/heads/master
| 2023-09-01T11:32:35.575951
| 2023-08-27T15:21:42
| 2023-08-27T15:22:31
| 42,903,588
| 28
| 16
|
MIT
| 2023-09-09T07:33:29
| 2015-09-22T01:27:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
import urllib
assert "Hello World!" == urllib.urlopen('http://localhost:18080').read()
assert "About Crow example." == urllib.urlopen('http://localhost:18080/about').read()
assert 404 == urllib.urlopen('http://localhost:18080/list').getcode()
assert "3 bottles of beer!" == urllib.urlopen('http://localhost:18080/hello/3').read()
assert "100 bottles of beer!" == urllib.urlopen('http://localhost:18080/hello/100').read()
assert 400 == urllib.urlopen('http://localhost:18080/hello/500').getcode()
assert "3" == urllib.urlopen('http://localhost:18080/add_json', data='{"a":1,"b":2}').read()
assert "3" == urllib.urlopen('http://localhost:18080/add/1/2').read()
# test persistent connection
import socket
import time
s = socket.socket()
s.connect(('localhost', 18080))
for i in xrange(10):
s.send('''GET / HTTP/1.1
Host: localhost\r\n\r\n''');
assert 'Hello World!' in s.recv(1024)
# test large
s = socket.socket()
s.connect(('localhost', 18080))
s.send('''GET /large HTTP/1.1
Host: localhost\r\nConnection: close\r\n\r\n''')
r = ''
while True:
d = s.recv(1024*1024)
if not d:
break;
r += d
print len(r), len(d)
print len(r), r[:100]
assert len(r) > 512*1024
# test timeout
s = socket.socket()
s.connect(('localhost', 18080))
# invalid request, connection will be closed after timeout
s.send('''GET / HTTP/1.1
hHhHHefhwjkefhklwejfklwejf
''')
print s.recv(1024)
|
[
"kingsamchen@gmail.com"
] |
kingsamchen@gmail.com
|
152e6de373d3950907e1041d754d5e444fc78569
|
c71e5115b895065d2abe4120799ffc28fa729086
|
/procon-archive/atcoder.jp/abc129/abc129_c/Main.py
|
7e58a42e6fbe088cfc45aa4987d551c677b95895
|
[] |
no_license
|
ken0105/competitive-programming
|
eb82f92a7b7ad0db601ea341c1441de6c6165064
|
f918f85a0ea6dfbe9cac3ef835f80503bb16a75d
|
refs/heads/master
| 2023-06-05T09:55:25.264731
| 2021-06-29T14:38:20
| 2021-06-29T14:38:20
| 328,328,825
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
from bisect import bisect, bisect_right, bisect_left
if __name__ == "__main__":
n,m = map(int,input().split())
a = set()
for i in range(m):
a.add(int(input()))
dp = [0] * (n + 1)
dp[0] = 1
for i in range(1,n+1):
if i not in a and i >= 2:
dp[i] = (dp[i-1] + dp[i-2])
elif i not in a and i == 1:
dp[i] = dp[i-1]
print(dp[n] % 1000000007)
|
[
"iwata.kenaaa@gmail.com"
] |
iwata.kenaaa@gmail.com
|
8fb3f79b350977c88931c3266b2db486922dcec9
|
ffad717edc7ab2c25d5397d46e3fcd3975ec845f
|
/Python/pyesri/ANSWERS/countwords.py
|
3cb94d4482bdf35763fd40b40028fc5136cad2d1
|
[] |
no_license
|
shaunakv1/esri-developer-conference-2015-training
|
2f74caea97aa6333aa38fb29183e12a802bd8f90
|
68b0a19aac0f9755202ef4354ad629ebd8fde6ba
|
refs/heads/master
| 2021-01-01T20:35:48.543254
| 2015-03-09T22:13:14
| 2015-03-09T22:13:14
| 31,855,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print "Syntax: countwords.py PATTERN FILE ..."
sys.exit()
pattern = sys.argv[1]
for fname in sys.argv[2:]:
count = 0
with open(fname) as f:
for line in f:
if pattern in line:
count += 1
print '''"{0}" occurred on {1} lines in {2}'''.format(pattern,count,fname)
|
[
"shaunakv1@gmail.com"
] |
shaunakv1@gmail.com
|
2c14b342ece31335f536bac793332b879a2c8b94
|
7f54637e347e5773dfbfded7b46b58b50544cfe5
|
/8-1/chainxy/settings.py
|
0f222740778cd9f63c7bbb6304924cd66e17b44f
|
[] |
no_license
|
simba999/all-scrapy
|
5cc26fd92b1d03366b74d4fff58c4a0641c85609
|
d48aeb3c00fa2474153fbc8d131cf58402976e1d
|
refs/heads/master
| 2021-01-25T14:24:04.715550
| 2018-03-03T13:43:13
| 2018-03-03T13:43:13
| 123,695,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,587
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for chainxy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'chainxy'
SPIDER_MODULES = ['chainxy.spiders']
NEWSPIDER_MODULE = 'chainxy.spiders'
# Feed export
FEED_FORMAT = 'csv' # exports to csv
FEED_EXPORT_FIELDS = ['store_number', 'address'] # which fields should be exported
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'chainxy (+http://www.yourdomain.com)'
USER_AGENT = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
DOWNLOADER_MIDDLEWARES = {'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,}
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'chainxy.middlewares.ChainxySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'chainxy.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'chainxy.pipelines.ChainxyPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"oliverking8985@yahoo.com"
] |
oliverking8985@yahoo.com
|
58e695680127bb42f2f78903fc84e26e9f79b012
|
7822e658e88f3f948732e6e3e588ca4b2eb5662a
|
/guias/2012-2/octubre-17/torneos.py
|
3cc64246b4e91ed046f843aea8d045bff0ea5db2
|
[] |
no_license
|
carlos2020Lp/progra-utfsm
|
632b910e96c17b9f9bb3d28329e70de8aff64570
|
a0231d62837c54d4eb8bbf00bb1b84484efc1af2
|
refs/heads/master
| 2021-05-28T06:00:35.711630
| 2015-02-05T02:19:18
| 2015-02-05T02:19:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
def contar_partidos(partidos):
return len(partidos)
def obtener_equipos(partidos):
equipos = set()
for local, visita in partidos:
equipos.add(local)
equipos.add(visita)
equipos = list(equipos)
equipos.sort()
return equipos
def obtener_fechas(partidos):
fechas = set()
for p in partidos:
fecha, _ = partidos[p]
fechas.add(fecha)
fechas = list(fechas)
fechas.sort()
return fechas
def calcular_puntos(partidos, equipo):
puntos = 0
for p in partidos:
_, resultado = partidos[p]
if resultado == None:
continue
local, visita = p
gl, gv = resultado
if equipo == local:
if gl > gv:
puntos += 3
elif gl == gv:
puntos += 1
elif equipo == visita:
if gl < gv:
puntos += 3
elif gl == gv:
puntos += 1
return puntos
def calcular_diferencia(partidos, equipo):
diferencia = 0
for p in partidos:
_, resultado = partidos[p]
if resultado == None:
continue
gl, gv = resultado
local, visita = p
if equipo == local:
diferencia += (gl - gv)
elif equipo == visita:
diferencia += (gv - gl)
return diferencia
def ordenar_equipos(partidos):
equipos = obtener_equipos(partidos)
estadisticas = []
for equipo in equipos:
pts = calcular_puntos(partidos, equipo)
dif = calcular_diferencia(partidos, equipo)
estadisticas.append((pts, dif, equipo))
estadisticas.sort()
estadisticas.reverse()
equipos_ordenados = []
for _, _, equipo in estadisticas:
equipos_ordenados.append(equipo)
return equipos_ordenados
|
[
"rbonvall@gmail.com"
] |
rbonvall@gmail.com
|
958bffbcef5c0c35574ec6229d4eb3360c9cde5e
|
9d9fcf401bb47ccaaa6c3fd3fe7a8be255762855
|
/libs/numpy/sort/argsort.py
|
2725c26fb628d43f78413d5fa7ac417f25fcd07d
|
[] |
no_license
|
hanhiver/PythonBasic
|
f05ef9fe713f69610860c63e5223317decee09ad
|
8e012855cce61fb53437758021416e5f6deb02ea
|
refs/heads/master
| 2022-10-11T22:57:47.931313
| 2020-12-30T12:32:44
| 2020-12-30T12:32:44
| 148,477,052
| 0
| 3
| null | 2022-10-01T05:35:03
| 2018-09-12T12:29:33
|
Python
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
import numpy as np
a = np.random.randint(0, 10, (4, 5))
print(a, '\n')
index = np.argsort(a, axis=0)
print(index, '\n')
index_3 = index[..., 3]
print(index_3, '\n')
print(a[index_3])
|
[
"handongfr@163.com"
] |
handongfr@163.com
|
385dc29e8a96a82daa9709d0c22d2c368662202c
|
be0d83dde6b499b60f36c14c961a125581f36a57
|
/preprocess_files/mv_img.py
|
592114d7036909b48ede59be9b6dcca5df06b54f
|
[] |
no_license
|
zhengziqiang/gan_learning
|
4acaf18f452fed0e2eeb0ddb45d861e9d10af835
|
d9ffb1c18e592715b62df684e23a362f8d07ac41
|
refs/heads/master
| 2021-01-01T13:35:28.696378
| 2017-10-29T13:42:51
| 2017-10-29T13:42:51
| 97,583,619
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import os
import glob
# d={}
# for files in glob.glob('/home/zzq/research/windows_file/IIIT-CFW1.0/tmp/*.jpg'):
# filepath, filename = os.path.split(files)
# # print filename
# l=filename.split('.')
# # print l[0]
# my_namee=filter(str.isalpha, l[0])
# print my_namee
# if d.has_key(my_namee):
# d[my_namee]+=1
# else:
# d[my_namee]=1
# print d
dest='/home/zzq/research/windows_file/IIIT-CFW1.0/dest/'
name={}
for files in glob.glob('/home/zzq/research/windows_file/IIIT-CFW1.0/realFaces/*.jpg'):
filepath, filename = os.path.split(files)
l=filename.split('.')
my_name=filter(str.isalpha,l[0])
if name.has_key(my_name):
name[my_name]+=1
else:
name[my_name]=1
|
[
"1174986943@qq.com"
] |
1174986943@qq.com
|
9813d2f1469dc08e215edac52165f3615023264d
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/AlibabaOpendspAdgroupsAddRequest.py
|
ecc347df1177f0300f8f99e6b18777f4d00cdb29
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570
| 2016-03-04T09:48:24
| 2016-03-04T09:48:24
| 45,093,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:32
'''
from top.api.base import RestApi
class AlibabaOpendspAdgroupsAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'alibaba.opendsp.adgroups.add'
|
[
"yangwenjin@T4F-MBP-17.local"
] |
yangwenjin@T4F-MBP-17.local
|
cf634701ce51fc3cb9c14499ec878f065f7baad4
|
427cb811a465677542172b59f5e5f102e3cafb1a
|
/python/classes/subClass.py
|
2244d735db508c992121644c9b9e179b8a63ef61
|
[] |
no_license
|
IzaakWN/CodeSnippets
|
1ecc8cc97f18f77a2fbe980f322242c04dacfb89
|
07ad94d9126ea72c1a8ee5b7b2af176c064c8854
|
refs/heads/master
| 2023-07-26T21:57:10.660979
| 2023-07-20T20:35:59
| 2023-07-20T20:35:59
| 116,404,943
| 18
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
# http://www.jesshamrick.com/2011/05/18/an-introduction-to-classes-and-inheritance-in-python/
# https://stackoverflow.com/questions/2843165/python-how-to-inherite-and-override
# http://blog.thedigitalcatonline.com/blog/2014/05/19/method-overriding-in-python/
# https://docs.python.org/2.7/library/functions.html#super
class Animal(object):
def __init__(self,name,age):
self.name = name
self.age = age
def makeNoise(self):
print ">>> %s makes a noise"%(self.name)
def printName(self):
print ">>> Animal name = \"%s\""%(self.name)
def printClassification(self):
print ">>> Animal"
class Dog(Animal):
def __init__(self,name,age):
Animal.__init__(self,name,age)
# or super(Dog,self).__init__(name,age)]
def makeNoise(self):
print ">>> %s says \"%s\""%(self.name,"Woof!")
def printName(self):
print ">>> Dog name = \"%s\""%(self.name)
def printClassification(self):
super(Dog,self).printClassification()
print ">>> Dog"
animal1 = Animal("Carrol",2)
animal2 = Dog("Yeller",4)
print "\n>>> animal 1"
animal1.makeNoise()
animal1.printName()
print ">>>\n>>> animal 2"
animal2.makeNoise()
animal2.printName()
animal2.printClassification()
print
|
[
"iwn_@hotmail.com"
] |
iwn_@hotmail.com
|
b5f7b40cdab61e773d1bec1c144966fc8c019ad5
|
b9878c92b857f73ff0452fc51c822cfc9fa4dc1c
|
/watson_machine_learning_client/libs/repo/swagger_client/models/connection_object_target_experiments.py
|
f8548c105d870dc07cfbde41d0896b443cf3f175
|
[] |
no_license
|
DavidCastilloAlvarado/WMLC_mod
|
35f5d84990c59b623bfdd27369fe7461c500e0a5
|
f2673b9c77bd93c0e017831ee4994f6d9789d9a1
|
refs/heads/master
| 2022-12-08T02:54:31.000267
| 2020-09-02T15:49:21
| 2020-09-02T15:49:21
| 292,322,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,806
|
py
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ConnectionObjectTargetExperiments(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, type=None, connection=None, target=None):
"""
ConnectionObjectTargetExperiments - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'connection': 'dict(str, str)',
'target': 'object'
}
self.attribute_map = {
'type': 'type',
'connection': 'connection',
'target': 'target'
}
self._type = type
self._connection = connection
self._target = target
@property
def type(self):
"""
Gets the type of this ConnectionObjectTargetExperiments.
:return: The type of this ConnectionObjectTargetExperiments.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ConnectionObjectTargetExperiments.
:param type: The type of this ConnectionObjectTargetExperiments.
:type: str
"""
self._type = type
@property
def connection(self):
"""
Gets the connection of this ConnectionObjectTargetExperiments.
:return: The connection of this ConnectionObjectTargetExperiments.
:rtype: dict(str, str)
"""
return self._connection
@connection.setter
def connection(self, connection):
"""
Sets the connection of this ConnectionObjectTargetExperiments.
:param connection: The connection of this ConnectionObjectTargetExperiments.
:type: dict(str, str)
"""
self._connection = connection
@property
def target(self):
"""
Gets the target of this ConnectionObjectTargetExperiments.
:return: The target of this ConnectionObjectTargetExperiments.
:rtype: object
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this ConnectionObjectTargetExperiments.
:param target: The target of this ConnectionObjectTargetExperiments.
:type: object
"""
self._target = target
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"dcastilloa@uni.pe"
] |
dcastilloa@uni.pe
|
68740806ca9fdcb8c924b5a4b88a4c98f0efd8d7
|
3b831eedb7afede666088b6e018c829219938a93
|
/Grouping_Values.py
|
d73419177b17ac18330e2f7223561e75e54c044e
|
[] |
no_license
|
joydas65/GeeksforGeeks
|
f03ed1aaea88d894f4d8ac0d70f574c4cd78a64b
|
e58c42cb3c9fe3a87e6683d8e3fda442dc83b45b
|
refs/heads/master
| 2023-01-12T02:19:54.967779
| 2023-01-10T17:28:41
| 2023-01-10T17:28:41
| 161,937,667
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
class Solution:
def isPossible(self, N, arr, K):
# code here
d = dict()
for i in arr:
if i in d:
d[i] += 1
else:
d[i] = 1
for i in d:
if d[i] > K*2:
return 0
return 1
|
[
"noreply@github.com"
] |
joydas65.noreply@github.com
|
bfe6eb6e9734dbfe24074e1964400cdb06a23cc3
|
fce1b262820539e8574e5476692096f599ca2b27
|
/luffycity_s8/luffy/views/article.py
|
fecd0d8cf009734ca9798d3523d3afb6d261806e
|
[] |
no_license
|
iyouyue/green_hand
|
9386082a0589ee6e1805aafe189ee38e823c8202
|
7b80e8cc0622e4d8e9d07dde37c72ac7d6e3261c
|
refs/heads/master
| 2020-03-26T14:39:02.224727
| 2018-08-16T14:27:57
| 2018-08-16T14:27:57
| 144,997,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from django.core.exceptions import ObjectDoesNotExist
from luffy import models
from luffy.response.base import BaseResponse
from luffy.serializers.article import ArticleSerializer, ArticleDetailSerializer
from luffy.pagination.page import LuffyPageNumberPagination
class MyException(Exception):
def __init__(self, msg):
self.msg = msg
class ArticleView(GenericViewSet):
renderer_classes = [JSONRenderer,]
def list(self, request, *args, **kwargs):
ret = BaseResponse()
try:
# 1. 获取数据
article_list = models.Article.objects.all().only('id', 'title','brief').order_by('-id')
# 2. 对数据进行分页
page = LuffyPageNumberPagination()
page_article_list = page.paginate_queryset(article_list, request, self)
# 3. 对数据序列化
ser = ArticleSerializer(instance=page_article_list, many=True)
ret.data = ser.data
except Exception as e:
ret.code = 1001
ret.error = '获取数据失败'
return Response(ret.dict)
def retrieve(self, request, pk, *args, **kwargs):
ret = BaseResponse()
try:
obj = models.Article.objects.get(id=pk)
ser = ArticleDetailSerializer(instance=obj, many=False)
ret.data = ser.data
except ObjectDoesNotExist as e:
ret.code = 1001
ret.error = '查询数据不存在'
except Exception as e:
ret.code = 1002
ret.error = "查询失败"
return Response(ret.dict)
|
[
"iyouyue@qq.com"
] |
iyouyue@qq.com
|
16632e1cfd929360e81b6b66540741a40107d618
|
113d9082d153adbccd637da76318b984f249baf5
|
/setup.py
|
b2cce85ef433c74f9b005df1a6e7c62d9261ca91
|
[
"BSD-3-Clause"
] |
permissive
|
jorcast18462/django-applepodcast
|
bebb6f85d4c3ed98c96e6628443ece613898ca32
|
50732acfbe1ca258e5afb44c117a6ac5fa0c1219
|
refs/heads/master
| 2023-03-21T13:05:08.576831
| 2018-10-06T22:19:12
| 2018-10-06T22:19:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
from __future__ import unicode_literals
import os
from setuptools import find_packages, setup
setup(
name='django-applepodcast',
version='0.3.7',
description='A Django podcast app optimized for Apple Podcasts',
long_description=open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.rst')).read(),
author='Richard Cornish',
author_email='rich@richardcornish.com',
url='https://github.com/richardcornish/django-applepodcast',
license='BSD',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=('tests',)),
install_requires=[
'bleach',
'mutagen',
'pillow',
],
test_suite='podcast.tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
[
"richardcornish@gmail.com"
] |
richardcornish@gmail.com
|
a9340662bebfa1cdd1adef79408712eb2e5883fd
|
7188e4eca6bb6ba03453e5c1d9e3134e9ef1b588
|
/apps/clndr/apps.py
|
29d6fb8f53be320b7e1c8a59f9267f426baf18ea
|
[] |
no_license
|
mitshel/ghc_yapokaju
|
c85eb2c3cbfd9802f6fac16a6d6192ae85ad2511
|
d70b53235223dc935792aac3838678cb1b4d2b2e
|
refs/heads/master
| 2020-05-15T21:50:15.646729
| 2019-04-21T08:48:31
| 2019-04-21T08:48:31
| 182,509,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from django.apps import AppConfig
class ClndrConfig(AppConfig):
name = 'apps.clndr'
verbose_name = 'The calendar'
def ready(self):
from . import signals
|
[
"mitshel@mail.ru"
] |
mitshel@mail.ru
|
23d6a04e73cb64a8b99b1049956a491e698cfc84
|
86dc81e21f5b9e784dd087666d4d980c34781536
|
/udp_bro_send.py
|
596343dd578225cf7d1f4e55544f7bb7e2be5825
|
[] |
no_license
|
sheltie03/udp_python
|
37b4e1f3377979c26e247a020efb958b3dfc28e5
|
cb0551fc4026a3baff968e81b758ea4d7d7e5fd6
|
refs/heads/master
| 2021-07-09T15:37:46.684924
| 2017-10-02T08:06:25
| 2017-10-02T08:06:25
| 105,496,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
# -*- coding: utf-8 -*-
import socket
import time
def main():
host = ''
port = 4000
# local_addr = '192.168.10.255'
local_addr = '255.255.255.255'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind((host, port))
while True:
msg = 'Hello Server'.encode('utf-8')
print(msg)
sock.sendto(msg, (local_addr, port))
return
if __name__ == '__main__':
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
c261a3aa2393582101930b0d509c572623981a2b
|
29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68
|
/lightly/openapi_generated/swagger_client/models/docker_run_scheduled_priority.py
|
8f59946a24631b8670f78eced6e272cd1b4e2588
|
[
"MIT"
] |
permissive
|
lightly-ai/lightly
|
5b655fe283b7cc2ddf1d7f5bd098603fc1cce627
|
5650ee8d4057139acf8aa10c884d5d5cdc2ccb17
|
refs/heads/master
| 2023-08-17T11:08:00.135920
| 2023-08-16T12:43:02
| 2023-08-16T12:43:02
| 303,705,119
| 2,473
| 229
|
MIT
| 2023-09-14T14:47:16
| 2020-10-13T13:02:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class DockerRunScheduledPriority(str, Enum):
"""
DockerRunScheduledPriority
"""
"""
allowed enum values
"""
LOW = 'LOW'
MID = 'MID'
HIGH = 'HIGH'
CRITICAL = 'CRITICAL'
@classmethod
def from_json(cls, json_str: str) -> 'DockerRunScheduledPriority':
"""Create an instance of DockerRunScheduledPriority from a JSON string"""
return DockerRunScheduledPriority(json.loads(json_str))
|
[
"noreply@github.com"
] |
lightly-ai.noreply@github.com
|
2613f41ca4dc3a52d8a9eba8b22d5db1b4f73c1e
|
04d9ee05feb6dddf19b9f7653f4dd9e9ce3ee95c
|
/rbtools/commands/install.py
|
03724c98f41a1da2e2262344e0806a96951d6e81
|
[
"MIT"
] |
permissive
|
pbwkoswara/rbtools
|
2fa44ade1c60b4f076198bb8206a5d624dd40cd2
|
8ea5ff8843d2a3d44056ad4358d75c81a066cf28
|
refs/heads/master
| 2021-07-17T22:22:20.906220
| 2017-10-20T22:11:03
| 2017-10-25T17:05:21
| 108,022,324
| 0
| 0
| null | 2017-10-23T18:26:30
| 2017-10-23T18:26:30
| null |
UTF-8
|
Python
| false
| false
| 7,398
|
py
|
from __future__ import division, print_function, unicode_literals
import hashlib
import logging
import os
import shutil
import tempfile
import zipfile
import tqdm
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.request import urlopen
from rbtools.commands import Command, CommandError
from rbtools.utils.appdirs import user_data_dir
from rbtools.utils.checks import check_install
from rbtools.utils.process import execute
class Install(Command):
"""Install a dependency.
This allows RBTools to install external dependencies that may be needed for
some features.
"""
name = 'install'
author = 'The Review Board Project'
description = 'Install an optional dependency.'
args = '<package>'
option_list = []
package_urls = {
'tfs': 'http://downloads.beanbaginc.com/rb-tfs/rb-tfs.zip'
}
def main(self, package):
"""Run the command.
Args:
package (unicode):
The name of the package to install.
Raises:
rbtools.commands.CommandError:
An error occurred during installation.
"""
try:
url = self.package_urls[package]
except KeyError:
err = 'Package "%s" not found. Available packages are:\n' % package
err += '\n'.join(
' %s' % package_name
for package_name in self.package_urls.keys()
)
raise CommandError(err)
label = 'Downloading %s' % package
zip_filename = self.download_file(url, label=label)
try:
self.check_download(url, zip_filename)
self.unzip(
zip_filename,
os.path.join(user_data_dir('rbtools'), 'packages', package))
finally:
os.unlink(zip_filename)
def check_download(self, url, zip_filename):
"""Check to see if the file was successfully downloaded.
If the user has :command:`gpg` installed on their system, use that to
check that the package was signed. Otherwise, check the sha256sum.
Args:
url (unicode):
The URL that the file came from.
zip_filename (unicode):
The filename of the downloaded copy.
Raises:
rbtools.commands.CommandError:
The authenticity of the file could not be verified.
"""
if check_install('gpg'):
execute(['gpg', '--recv-keys', '4ED1F993'])
sig_filename = self.download_file('%s.asc' % url)
try:
retcode, output, errors = execute(
['gpg', '--verify', sig_filename, zip_filename],
with_errors=False, ignore_errors=True,
return_error_code=True, return_errors=True)
if retcode == 0:
logging.debug('Verified file signature')
else:
raise CommandError(
'Unable to verify authenticity of file downloaded '
'from %s:\n%s' % (url, errors))
finally:
os.unlink(sig_filename)
else:
logging.info('"gpg" not installed. Skipping signature validation.')
try:
sha_url = '%s.sha256sum' % url
logging.debug('Downloading %s', sha_url)
response = urlopen(sha_url)
real_sha = response.read().split(' ')[0]
except (HTTPError, URLError) as e:
raise CommandError('Error when downloading file: %s' % e)
with open(zip_filename, 'r') as f:
our_sha = hashlib.sha256(f.read()).hexdigest()
if real_sha == our_sha:
logging.debug('Verified SHA256 hash')
else:
logging.debug('SHA256 hash does not match!')
logging.debug(' Downloaded file hash was: %s', our_sha)
logging.debug(' Expected hash was: %s', real_sha)
raise CommandError(
'Unable to verify the checksum of the downloaded copy of '
'%s.\n'
'This could be due to an invasive proxy or an attempted '
'man-in-the-middle attack.' % url)
def unzip(self, zip_filename, package_dir):
"""Unzip a .zip file.
This method will unpack the contents of a .zip file into a target
directory. If that directory already exists, it will first be removed.
Args:
zip_filename (unicode):
The absolute path to the .zip file to unpack.
package_dir (unicode):
The directory to unzip the files into.
Raises:
rbtools.commands.CommandError:
The file could not be unzipped.
"""
logging.debug('Extracting %s to %s', zip_filename, package_dir)
try:
if os.path.exists(package_dir):
if os.path.isdir(package_dir):
shutil.rmtree(package_dir)
else:
os.remove(package_dir)
os.makedirs(package_dir)
except (IOError, OSError) as e:
raise CommandError('Failed to set up package directory %s: %s'
% (package_dir, e))
zip_file = zipfile.ZipFile(zip_filename, 'r')
try:
zip_file.extractall(package_dir)
except Exception as e:
raise CommandError('Failed to extract file: %s' % e)
finally:
zip_file.close()
def download_file(self, url, label=None):
"""Download the given file.
This is intended to be used as a context manager, and the bound value
will be the filename of the downloaded file.
Args:
url (unicode):
The URL of the file to download.
label (unicode, optional):
The label to use for the progress bar. If this is not
specified, no progress bar will be shown.
Yields:
unicode:
The filename of the downloaded file.
Raises:
rbtools.commands.CommandError:
An error occurred while downloading the file.
"""
logging.debug('Downloading %s', url)
try:
response = urlopen(url)
total_bytes = int(
response.info().getheader('Content-Length').strip())
read_bytes = 0
bar_format = '{desc} {bar} {percentage:3.0f}% [{remaining}]'
with tqdm.tqdm(total=total_bytes, desc=label or '',
ncols=80, disable=label is None,
bar_format=bar_format) as bar:
try:
f = tempfile.NamedTemporaryFile(delete=False)
while read_bytes != total_bytes:
chunk = response.read(8192)
chunk_length = len(chunk)
read_bytes += chunk_length
f.write(chunk)
bar.update(chunk_length)
finally:
f.close()
return f.name
except (HTTPError, URLError) as e:
raise CommandError('Error when downloading file: %s' % e)
|
[
"trowbrds@gmail.com"
] |
trowbrds@gmail.com
|
9117f9f2cce95c3f9c960a40127f7cde6384a932
|
d21864a26233d32913c44fd87d6f6e67ca9aabd8
|
/prosodic/lib/Phoneme.py
|
876171217cb508068e7a472fe4fc487bf116ba6c
|
[
"MIT"
] |
permissive
|
quadrismegistus/litlab-poetry
|
7721a8849667f2130bb6fa6b9f18a7f6beb9912e
|
28fff4c73344ed95d19d7e9a14e5a20697599605
|
refs/heads/master
| 2021-01-23T20:14:05.537155
| 2018-11-19T08:56:55
| 2018-11-19T08:56:55
| 27,054,260
| 16
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,454
|
py
|
from ipa import ipa,ipakey,ipa2cmu,formantd
from entity import entity
class Phoneme(entity):
def __init__(self,phons,ipalookup=True):
self.feats = {}
self.children = [] # should remain empty unless dipthong
self.featpaths={}
self.phon=None
if type(phons)==type([]):
for phon in phons:
if type(phon)==type(""):
self.children.append(Phoneme(phon))
else:
self.children.append(phon)
self.feat('dipthong',True)
else:
self.phon=phons.strip()
if ipalookup and self.phon:
if(self.phon in ipa):
k=-1
for v in ipa[self.phon]:
k+=1
self.feat(ipakey[k],v)
self.finished = True
if self.isLong() or self.isDipthong():
self.len=2
else:
self.len=1
def str_cmu(self):
strself=str(self)
if strself in ipa2cmu:
return ipa2cmu[strself].lower()
else:
print "<error> no cmu transcription for phoneme: ["+strself+"]"
return strself
def __str__(self):
if self.children:
return self.u2s(u"".join([x.phon for x in self.children]))
else:
return self.u2s(self.phon)
def __repr__(self):
#return "["+str(self)+"]"
return str(self)
def isConsonant(self):
return self.feature('cons')
def isVowel(self):
return (self.isDipthong() or self.isPeak())
def isPeak(self):
return self.feature('syll')
def isDipthong(self):
return self.feature('dipthong')
def isLong(self):
return self.feature('long')
def isHigh(self):
return self.feature('high')
@property
def phon_str(self):
if self.phon: return self.phon
return u''.join(phon.phon for phon in self.children)
@property
def featset(self):
if self.children:
featset=set()
for child in self.children:
featset|=child.featset
return featset
else:
return {feat for feat in self.feats if self.feats[feat]}
@property
def featspace(self):
fs={}
if self.children:
for child in self.children:
#print "CHILD:",child,child.featset
for f,v in child.feats.items():
fs[f]=int(v) if v!=None else 0
else:
for f,v in self.feats.items():
fs[f]=int(v) if v!=None else 0
return fs
def CorV(self):
if self.isDipthong() or self.isLong():
return "VV"
if self.isPeak():
return "V"
else:
return "C"
def distance(self,other):
lfs1=[self.featspace] if not self.children else [c.featspace for c in self.children]
lfs2=[other.featspace] if not other.children else [c.featspace for c in other.children]
dists=[]
for fs1 in lfs1:
for fs2 in lfs2:
allkeys=set(fs1.keys() + fs2.keys())
f=sorted(list(allkeys))
v1=[float(fs1.get(fx,0)) for fx in f]
v2=[float(fs2.get(fx,0)) for fx in f]
from scipy.spatial import distance
dists+=[distance.euclidean(v1,v2)]
return sum(dists)/float(len(dists))
def distance0(self,other):
import math
feats1=self.featset
feats2=other.featset
jc=len(feats1&feats2) / float(len(feats1 | feats2))
vdists=[]
if not 'cons' in feats1 and not 'cons' in feats2:
## ADD VOWEL F1,F2 DIST
v1=[p for p in self.phon_str if p in formantd]
v2=[p for p in other.phon_str if p in formantd]
if not v1 or not v2:
vdists+=[2]
for v1x in v1:
for v2x in v2:
#print v1x,v2x
vdist=math.sqrt( (formantd[v1x][0] - formantd[v2x][0])**2 + (formantd[v1x][1] - formantd[v2x][1])**2)
#print "ADDING",vdist
vdists+=[vdist]
#print self,other,feats1,feats2
return jc + sum(vdists)
def __eq__(self,other):
return self.feats == other.feats
|
[
"ryan.heuser@gmail.com"
] |
ryan.heuser@gmail.com
|
16fa0a4b39d17c4ece50384f657fc65fb6ee0fef
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02271/s666225963.py
|
a1e305d0cdb1bc4f6641e39bb56d1f7301cd5a82
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
# ALDS_5_A - 総当たり
import sys
n = int(input())
A = list(map(int, sys.stdin.readline().strip().split()))
q = int(input())
m = list(map(int, sys.stdin.readline().strip().split()))
sum_set = set()
for i in range(2 ** n):
bit = [(i >> j) & 1 for j in range(n)]
combined = [x * y for (x, y) in zip(A, bit)]
sum_set.add(sum(combined))
for target in m:
if target in sum_set:
print('yes')
else:
print('no')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9db73616056bed06a9c8484c5aea2920e6c7b81e
|
421b0ae45f495110daec64ed98c31af525585c2c
|
/PythonProgramsTraining/graphics/frame1.py
|
c0c8e6a93f60c197702ad936f518643ad8a67d1b
|
[] |
no_license
|
Pradeepsuthar/pythonCode
|
a2c87fb64c79edd11be54c2015f9413ddce246c4
|
14e2b397f69b3fbebde5b3af98898c4ff750c28c
|
refs/heads/master
| 2021-02-18T05:07:40.402466
| 2020-03-05T13:14:15
| 2020-03-05T13:14:15
| 245,163,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
import tkinter as tk
from tkinter import messagebox
def area():
'to calculate area'
len = float(tfLen.get())
wid = float(tfWidth.get())
result = len*wid
tfArea.insert(0,result)
# Showing massage box
messagebox.showinfo("Info MAss ", "Area is : "+str(result)+" CM")
# creating a frame
frame = tk.Tk()
frame.geometry("200x200")
#Creating controls
tfLen = tk.Entry(frame)
tfWidth = tk.Entry(frame)
tfArea = tk.Entry(frame)
btn = tk.Button(frame, text="Calculate Area", command=area)
# Adding components on frame
tfLen.pack()
tfWidth.pack()
tfArea.pack()
btn.pack()
# Showing frame
frame.mainloop()
|
[
"sutharpradeep081@gmail.com"
] |
sutharpradeep081@gmail.com
|
9cd66536cdc51a43bf901eccb7e2154f2e6368ec
|
768058e7f347231e06a28879922690c0b6870ed4
|
/venv/lib/python3.7/site-packages/numba/cuda/simulator/compiler.py
|
5a88a649e47d11efe9887678a7397e77376673b8
|
[] |
no_license
|
jciech/HeisenbergSpinChains
|
58b4238281d8c158b11c6c22dd0da82025fd7284
|
e43942bbd09f6675e7e2ff277f8930dc0518d08e
|
refs/heads/master
| 2022-12-18T08:04:08.052966
| 2020-09-29T12:55:00
| 2020-09-29T12:55:00
| 258,476,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
"""
The compiler is not implemented in the simulator. This module provides a stub
to allow tests to import successfully.
"""
compile_kernel = None
|
[
"jan@multiply.ai"
] |
jan@multiply.ai
|
a02d45d50426a72b18991c0c25da0082ba9e835f
|
1886065d10342822b10063cd908a690fccf03d8b
|
/appengine/findit/crash/loglinear/changelist_classifier.py
|
96277a04aefab650a935aa33a7cf08c3b48f7e7a
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/chromium-infra_A6Y5
|
26af0dee12f89595ebc6a040210c9f62d8ded763
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
refs/heads/master
| 2023-03-16T15:33:31.015840
| 2017-01-31T19:55:59
| 2017-01-31T20:06:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,932
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import logging
from common.chrome_dependency_fetcher import ChromeDependencyFetcher
from crash import changelist_classifier
from crash.changelist_classifier import StackInfo
from crash.crash_report_with_dependencies import CrashReportWithDependencies
from crash.loglinear.model import UnnormalizedLogLinearModel
class LogLinearChangelistClassifier(object):
"""A ``LogLinearModel``-based implementation of CL classification."""
def __init__(self, get_repository, meta_feature, meta_weight,
top_n_frames=7, top_n_suspects=3):
"""
Args:
get_repository (callable): a function from DEP urls to ``Repository``
objects, so we can get changelogs and blame for each dep. Notably,
to keep the code here generic, we make no assumptions about
which subclass of ``Repository`` this function returns. Thus,
it is up to the caller to decide what class to return and handle
any other arguments that class may require (e.g., an http client
for ``GitilesRepository``).
meta_feature (MetaFeature): All features.
meta_weight (MetaWeight): All weights. the weights for the features.
The keys of the dictionary are the names of the feature that weight is
for. We take this argument as a dict rather than as a list so that
callers needn't worry about what order to provide the weights in.
top_n_frames (int): how many frames of each callstack to look at.
top_n_suspects (int): maximum number of suspects to return.
"""
self._dependency_fetcher = ChromeDependencyFetcher(get_repository)
self._get_repository = get_repository
self._top_n_frames = top_n_frames
self._top_n_suspects = top_n_suspects
self._model = UnnormalizedLogLinearModel(meta_feature, meta_weight)
def __call__(self, report):
"""Finds changelists suspected of being responsible for the crash report.
Args:
report (CrashReport): the report to be analyzed.
Returns:
List of ``Suspect``s, sorted by probability from highest to lowest.
"""
annotated_report = CrashReportWithDependencies(
report, self._dependency_fetcher)
if annotated_report is None:
logging.warning('%s.__call__: '
'Could not obtain dependencies for report: %s',
self.__class__.__name__, str(report))
return []
suspects = self.GenerateSuspects(annotated_report)
if not suspects:
logging.warning('%s.__call__: Found no suspects for report: %s',
self.__class__.__name__, str(annotated_report))
return []
return self.RankSuspects(annotated_report, suspects)
def GenerateSuspects(self, report):
"""Generate all possible suspects for the reported crash.
Args:
report (CrashReportWithDependencies): the crash we seek to explain.
Returns:
A list of ``Suspect``s who may be to blame for the
``report``. Notably these ``Suspect`` instances do not have
all their fields filled in. They will be filled in later by
``RankSuspects``.
"""
# Look at all the frames from any stack in the crash report, and
# organize the ones that come from dependencies we care about.
dep_to_file_to_stack_infos = defaultdict(lambda: defaultdict(list))
for stack in report.stacktrace:
for frame in stack:
if frame.dep_path in report.dependencies:
dep_to_file_to_stack_infos[frame.dep_path][frame.file_path].append(
StackInfo(frame, stack.priority))
dep_to_file_to_changelogs, ignore_cls = (
changelist_classifier.GetChangeLogsForFilesGroupedByDeps(
report.dependency_rolls, report.dependencies,
self._get_repository))
# Get the possible suspects.
return changelist_classifier.FindSuspects(
dep_to_file_to_changelogs,
dep_to_file_to_stack_infos,
report.dependencies,
self._get_repository,
ignore_cls)
def RankSuspects(self, report, suspects):
"""Returns a lineup of the suspects in order of likelihood.
Suspects with a discardable score or lower ranking than top_n_suspects
will be filtered.
Args:
report (CrashReportWithDependencies): the crash we seek to explain.
suspects (iterable of Suspect): the CLs to consider blaming for the crash.
Returns:
A list of suspects in order according to their likelihood. This
list contains elements of the ``suspects`` list, where we mutate
some of the fields to store information about why that suspect
is being blamed (e.g., the ``confidence``, ``reasons``, and
``changed_files`` fields are updated). In addition to sorting the
suspects, we also filter out those which are exceedingly unlikely
or don't make the ``top_n_suspects`` cut.
"""
# Score the suspects and organize them for outputting/returning.
features_given_report = self._model.Features(report)
score_given_report = self._model.Score(report)
scored_suspects = []
for suspect in suspects:
score = score_given_report(suspect)
if self._model.LogZeroish(score):
logging.debug('Discarding suspect because it has zero probability: %s'
% str(suspect.ToDict()))
continue
suspect.confidence = score
# features is ``MetaFeatureValue`` object containing all feature values.
features = features_given_report(suspect)
suspect.reasons = features.reason
suspect.changed_files = [changed_file.ToDict()
for changed_file in features.changed_files]
scored_suspects.append(suspect)
scored_suspects.sort(key=lambda suspect: suspect.confidence)
return scored_suspects[:self._top_n_suspects]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
5e02976a619cb1e6ada32cf79cbd4ed879067ae8
|
4b69b5dd4b1b3cf81b996065831226a243abb332
|
/articles/admin.py
|
45fafe2207a9eb4a089c73b9557ee149401c8418
|
[] |
no_license
|
cui0519/myBlog
|
d8ebd601ac5bf5a3fe0dc16e2c703cdbaa055ab9
|
c0852b6e42bfa93820d330e8f9e547be229344e8
|
refs/heads/master
| 2023-02-09T06:33:13.641351
| 2021-01-05T00:18:21
| 2021-01-05T00:18:21
| 326,308,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
from django.contrib import admin
from .models import Articles
# Register your models here.
class ArticlesAdmin(admin.ModelAdmin):
list_display = ('title','author','img','abstract','visited','created_at')
<<<<<<< HEAD
search_fields = ('title','author','abstract','content')
=======
search_fields = ('title',)
>>>>>>> f4d958d ('模板复用')
list_filter = list_display
admin.site.register(Articles,ArticlesAdmin)
|
[
"you@example.com"
] |
you@example.com
|
c5b193fb983b5e4d663f93a6485499e152a180c1
|
e5cf5fd657b28d1c01d8fd954a911d72526e3112
|
/tide_teach/tide_time_windows.py
|
b54f5fcebaccedcc95ffb40b903d76d6c69a1cd4
|
[] |
no_license
|
parkermac/ptools
|
6b100f13a44ff595de03705a6ebf14a2fdf80291
|
a039261cd215fe13557baee322a5cae3e976c9fd
|
refs/heads/master
| 2023-01-09T11:04:16.998228
| 2023-01-02T19:09:18
| 2023-01-02T19:09:18
| 48,205,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,730
|
py
|
"""
Code to plot observed tide time series.
"""
import os
import sys
import pytz
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import numpy as np
from importlib import reload
import ephem_functions as efun
reload(efun)
import tractive_functions as tfun
reload(tfun)
alp = os.path.abspath('../../LiveOcean/alpha')
if alp not in sys.path:
sys.path.append(alp)
import zfun
indir = os.environ.get('HOME') + '/Documents/ptools_data/tide/'
zone='US/Pacific'
tz_local = pytz.timezone(zone)
def read_tide(in_fn):
df = pd.read_csv(in_fn, index_col='Date Time', parse_dates = True)
for k in df.keys():
df = df.rename(columns={k: k.strip()})
df = df.drop(['Sigma', 'I', 'L'], axis=1)
df = df.rename(columns={'Water Level': 'Tide Obs'})
# find the mean water level
eta0 = df['Tide Obs'].mean()
# Assumes time is UTC
df.index.name = 'Date UTC'
df = df.tz_localize('UTC')
return df, eta0
# READ IN OBSERVED TIDE DATA
fn = 'CO-OPS__9447130__hr.csv' # Seattle 2016 observed data
city = 'Seattle'
obs_fn = indir + fn
obs_df, eta0 = read_tide(obs_fn)
obs_df = obs_df.tz_convert(tz_local)
obs_df.index.name = 'Date (local time)'
obs_df['Tide Obs'] = obs_df['Tide Obs'] * 3.28084
# and set related time limits
year = 2016
#tzinfo = pytz.timezone('UTC')
tzinfo = tz_local
dt0_day = datetime(year,6,10,tzinfo=tzinfo)
dt1_day = datetime(year,6,11,tzinfo=tzinfo)
dt0_month = datetime(year,6,1,tzinfo=tzinfo)
dt1_month = datetime(year,7,1,tzinfo=tzinfo)
dt0_year = datetime(year,1,1,tzinfo=tzinfo)
dt1_year = datetime(year+1,1,1,tzinfo=tzinfo)
# PLOTTING
plt.close('all')
lw0 = 0.5
lw1 = 1
lw2 = 3
fsz=18
ylim=(-5, 15)
fig = plt.figure(figsize=(14,8))
ax = fig.add_subplot(221)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw2, grid=True, xlim=(dt0_day,dt1_day))
ax.text(.05,.05,'One Day', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.text(.05,.9,'Observed Tide Height (ft) ' + city,
transform=ax.transAxes, fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
ax = fig.add_subplot(222)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw1, grid=True, xlim=(dt0_month,dt1_month))
ax.text(.05,.05,'One Month', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
ax = fig.add_subplot(212)
obs_df.plot(y='Tide Obs',
legend=False, style='-b', ax=ax, ylim=ylim,
lw=lw0, grid=True, xlim=(dt0_year,dt1_year))
ax.text(.05,.05,'One Year', transform=ax.transAxes, fontweight='bold', fontsize=fsz)
ax.set_xticklabels('')
ax.set_xlabel('')
fig.set_tight_layout(True)
plt.show()
|
[
"p.maccready@gmail.com"
] |
p.maccready@gmail.com
|
6b1d1fdaa602c7768fb7a668612821ad314b4395
|
52d797a1a9f853f691d2d6fb233434cf9cc9e12b
|
/Implementation Challenges/Append and Delete.py
|
1e2622a5816301cb9b83c0a56d915bdfe4639df0
|
[] |
no_license
|
harshildarji/Algorithms-HackerRank
|
f1c51fedf2be9e6fbac646d54abccb7e66800e22
|
96dab5a76b844e66e68a493331eade91541fd873
|
refs/heads/master
| 2022-05-21T06:57:59.362926
| 2020-04-19T14:05:19
| 2020-04-19T14:05:19
| 114,212,208
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Append and Delete
# https://www.hackerrank.com/challenges/append-and-delete/problem
s, t = input().strip(), input().strip()
k = int(input().strip())
for i in reversed(range(1, k + 1)):
if s == t[:len(s)] and len(t) - len(s) == i or len(s) == 0:
break
s = s[:-1]
print("Yes" if len(t) - len(s) <= i else "No")
|
[
"darjiharshil2994@gmail.com"
] |
darjiharshil2994@gmail.com
|
c305892b8de9942ba1433b2aa00240da71b7b0bc
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCloudCloudbaseHttpaccessBindQueryResponse.py
|
ebc27653df46ebfce5e7c7e7b22f0e76998f3f54
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.DomainBind import DomainBind
class AlipayCloudCloudbaseHttpaccessBindQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCloudCloudbaseHttpaccessBindQueryResponse, self).__init__()
self._domain_binds = None
self._page_index = None
self._page_size = None
self._total = None
@property
def domain_binds(self):
return self._domain_binds
@domain_binds.setter
def domain_binds(self, value):
if isinstance(value, list):
self._domain_binds = list()
for i in value:
if isinstance(i, DomainBind):
self._domain_binds.append(i)
else:
self._domain_binds.append(DomainBind.from_alipay_dict(i))
@property
def page_index(self):
return self._page_index
@page_index.setter
def page_index(self, value):
self._page_index = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayCloudCloudbaseHttpaccessBindQueryResponse, self).parse_response_content(response_content)
if 'domain_binds' in response:
self.domain_binds = response['domain_binds']
if 'page_index' in response:
self.page_index = response['page_index']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total' in response:
self.total = response['total']
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
b4f391918f30a778d049bd168cb1ca4154c0b42a
|
3a4fbde06794da1ec4c778055dcc5586eec4b7d2
|
/@lib/12-13-2011-01/vyperlogix/decorators/addto.py
|
979a905e9a18fdcddf2620939aec919f9baa031a
|
[] |
no_license
|
raychorn/svn_python-django-projects
|
27b3f367303d6254af55c645ea003276a5807798
|
df0d90c72d482b8a1e1b87e484d7ad991248ecc8
|
refs/heads/main
| 2022-12-30T20:36:25.884400
| 2020-10-15T21:52:32
| 2020-10-15T21:52:32
| 304,455,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
__copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def addto(instance):
'''
alias for inject_method_into(instance)
'''
from inject import inject_method_into
return inject_method_into(instance)
|
[
"raychorn@gmail.com"
] |
raychorn@gmail.com
|
7ca223afe5153d45121ca9011ccb886e87b49eb5
|
99fddc8762379bcb707ad53081cd342efa7a5d89
|
/test/pinocchio_frame_test.py
|
fa17c45921833826190201d02cca144b699b6959
|
[
"MIT"
] |
permissive
|
zhilinxiong/PyPnC
|
ef19a4bcc366666d2550466b07cd8ec8f098c0c4
|
abf9739c953d19ca57fd4bd37be43415f3d5e4a7
|
refs/heads/master
| 2023-07-04T19:09:26.115526
| 2021-08-03T04:29:10
| 2021-08-03T04:29:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pinocchio as pin
import numpy as np
urdf_file = cwd + "/robot_model/manipulator/three_link_manipulator.urdf"
model = pin.buildModelFromUrdf(urdf_file)
data = model.createData()
print(model)
q = np.array([np.pi / 2., 0., 0.])
# q = np.zeros(3)
qdot = np.ones(3)
pin.forwardKinematics(model, data, q, qdot)
## Print Frame Names
print([frame.name for frame in model.frames])
## Calculate j2 placement
j2_frame = model.getFrameId('j1')
j2_translation = pin.updateFramePlacement(model, data, j2_frame)
print("j2 translation")
print(j2_translation)
## Calculate l2 placement
l2_frame = model.getFrameId('l2')
l2_translation = pin.updateFramePlacement(model, data, l2_frame)
print("l2 translation")
print(l2_translation)
## Calculate j2 jacobian
pin.computeJointJacobians(model, data, q)
j2_jacobian = pin.getFrameJacobian(model, data, j2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("j2 jacobian")
print(j2_jacobian)
## Calculate l2 jacobian
l2_jacobian = pin.getFrameJacobian(model, data, l2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("l2 jacobian")
print(l2_jacobian)
## Calculate j2 spatial velocity
j2_vel = pin.getFrameVelocity(model, data, j2_frame)
print("j2 vel")
print(j2_vel)
## Calculate l2 spatial velocity
l2_vel = pin.getFrameVelocity(model, data, l2_frame,
pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)
print("l2 vel")
print(l2_vel)
print(np.dot(l2_jacobian, qdot))
|
[
"junhyeokahn91@gmail.com"
] |
junhyeokahn91@gmail.com
|
2e808d917489faf59e65fb3ab6a7e999316ec019
|
14a853584c0c1c703ffd8176889395e51c25f428
|
/sem1/fop/lab5/static/strings.py
|
2f47c15c3b3c7d3bd361c700be9a29ee4f30b077
|
[] |
no_license
|
harababurel/homework
|
d0128f76adddbb29ac3d805c235cdedc9af0de71
|
16919f3b144de2d170cd6683d54b54bb95c82df9
|
refs/heads/master
| 2020-05-21T12:25:29.248857
| 2018-06-03T12:04:45
| 2018-06-03T12:04:45
| 43,573,199
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
"""
Most long messages displayed by the UI will be found here.
"""
from util.Color import bold
STRINGS = {
'helpPrompt':
'Commands:\n' +
'\t%s - displays this prompt.\n' % bold('help') +
'\t%s - adds a new student or assignment.\n' % bold('add') +
'\t%s - displays all students or assignments.\n' % bold('list') +
'\t%s - goes to previous state.\n' % bold('undo') +
'\t%s - goes to next state.\n' % bold('redo') +
'\t%s - clears the screen.\n' % bold('clear') +
'\t%s - saves the work session and exits the application.' % bold('exit')
}
|
[
"srg.pscs@gmail.com"
] |
srg.pscs@gmail.com
|
007da86134bd9cf81656b9de3a4b00e9262caadf
|
0bce7412d58675d6cc410fa7a81c294ede72154e
|
/Python3/0983. Minimum Cost For Tickets.py
|
67eeee126a10f3fbd09cd9f37ac9a746033d4c3f
|
[] |
no_license
|
yang4978/LeetCode
|
9ddf010b0f1dda32cddc7e94c3f987509dea3214
|
6387d05b619d403414bad273fc3a7a2c58668db7
|
refs/heads/master
| 2022-01-15T04:21:54.739812
| 2021-12-28T12:28:28
| 2021-12-28T12:28:28
| 182,653,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
# end = days[-1] + 1
# dp = [0]*end
# for d in range(1,end):
# temp = dp[d-1] + costs[0]
# temp = min(temp,min(dp[max(0,d-7):d])+costs[1])
# temp = min(temp,min(dp[max(0,d-30):d])+costs[2])
# if d not in days:
# temp = min(temp,dp[d-1])
# dp[d] = temp
# return dp[-1]
ans = [0]*(days[-1]+30)
for d in range(len(ans)):
if d in days:
ans[d] = min(ans[d-1]+costs[0],ans[d-7]+costs[1],ans[d-30]+costs[2])
else:
ans[d] = ans[d-1]
return ans[-1]
|
[
"noreply@github.com"
] |
yang4978.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.