blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
111bafc092b997b97fc93d8458b619c5a5376b99
|
59d41fc7fd8b261d08f25926d64b5bc6e1ed99e8
|
/python/baike/spider_main.py
|
65f1d98b8df01ad5f18c2b90bc05f0195312d748
|
[] |
no_license
|
MissMyDearBear/Diary
|
ff779aee87d52a5617eca0706ddb61107990d980
|
8375a302303f4524f19bcfe2e63a4afac3e495ed
|
refs/heads/master
| 2021-01-19T20:08:26.043203
| 2019-06-24T08:02:37
| 2019-06-24T08:02:37
| 88,489,275
| 39
| 17
| null | 2017-08-07T10:09:29
| 2017-04-17T08:43:44
|
Java
|
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
from baike import url_manager, html_download, html_parser, html_output
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.download = html_download.HtmlDown()
self.parser = html_parser.HtmlParser()
self.out = html_output.HtmlOutput()
def crow(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print 'craw %d:%s' % (count, new_url)
html_count = self.download.download(new_url)
data = self.parser.parser(html_count)
self.out.collect_data(data)
if count == 1000:
break
count = count+1
except:
print 'craw failed:'
self.out.output_html()
if __name__ == '__main__':
root_url = 'http://www.qiushibaike.com/'
obj_spider = SpiderMain()
obj_spider.crow(root_url)
|
[
"zz_caogehao0@126.com"
] |
zz_caogehao0@126.com
|
a113c0c97c558d27d5bbdcd60892e919edcb5d48
|
d31bc5cf9ed993ef4f460cdd1d7fb5e247e8f6b8
|
/mappers.py
|
12f15b1b8dfb5a0cbc7a47f0c9ae162acc307483
|
[] |
no_license
|
Arendelle12/checkers
|
76d31c616e65791ef5f1d101e3d14def562274db
|
c26d4f63b236e42063ae939dec43ba32b80da834
|
refs/heads/main
| 2023-02-26T01:23:54.512604
| 2021-01-31T19:39:49
| 2021-01-31T19:39:49
| 312,831,127
| 0
| 0
| null | 2020-12-21T19:01:31
| 2020-11-14T14:17:28
|
C++
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
from itertools import islice
#ZAMIANA STRINGA POSTACI 1234 NA LISTE
def str_to_list(test_str):
return [int(i) for i in test_str]
#ZAMIANA 1D LIST NA 2D
def convert_1d_to_2d(lst, len_lst):
it = iter(lst)
return [list(islice(it, i)) for i in len_lst]
#return 2 dimensional list
def convert_board(rec_board):
len_lst = [8, 8, 8, 8, 8, 8, 8, 8]
res = str_to_list(rec_board)
res2d = convert_1d_to_2d(res, len_lst)
for i in range(8):
print(res2d[i])
return res2d
def move_to_string(start, end):
move = start + end
#lista string
data = [str(x) for x in move]
send_str = ''.join(data)
return send_str
|
[
"gabriela.bartoszewska@student.put.poznan.pl"
] |
gabriela.bartoszewska@student.put.poznan.pl
|
659ecce83973b03700379049e57ba28927ef5bfe
|
e1467ade19cfaf7a25129398a9ba4dae8508e259
|
/seminar_3/TEMPLATE/test.py
|
27fc43001ead3db5143e08fb70c131de1eb4f2d2
|
[] |
no_license
|
evgenstf/hse_python_seminars
|
ee4d1dce877bfeb31e0373fbd0b5cb84dc38567d
|
c155d3f79a11b263df3bb4245d548aafbc58c209
|
refs/heads/master
| 2022-12-28T04:31:33.744168
| 2020-10-01T10:52:53
| 2020-10-01T10:52:53
| 295,569,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from <<solution_directory>>.solution import main
from tester import test_solution
test_solution(main, [<<input>>], [<<output>>])
|
[
"evgenstf@gmail.com"
] |
evgenstf@gmail.com
|
8fe975eac45d0cbc7088a107247e236f4fea121b
|
79a836022275b94b687325ae36980cafe6d66788
|
/setup.py
|
18eba6b9644a02f8b6a1d99711326daac0f2de62
|
[] |
no_license
|
reminder-bot/start
|
1194adede56c46b587e27b003c0c401ceb7b9056
|
33c613d5a9c168635ad221d864e25d27c726ae5a
|
refs/heads/master
| 2020-03-21T23:34:21.716780
| 2018-08-30T20:13:14
| 2018-08-30T20:13:14
| 139,195,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
import os
import configparser
try:
os.mkdir('../DATA')
except FileExistsError:
pass
files = ['todos']
contents = ['{}']
for fn, content in zip(files, contents):
if fn + '.json' in os.listdir('../DATA/'):
continue
f = open('../DATA/' + fn + '.json', 'w')
f.write(content)
f.close()
if 'config.ini' not in os.listdir('..'):
config = configparser.ConfigParser()
config['DEFAULT'] = {
'token' : 'token',
'dbl_token' : 'discordbotslist token',
'patreon_server' : 'serverid',
'patreon_enabled' : 'yes',
'strings_location' : './languages/'
}
config['WEB'] = {
'DISCORD_OAUTH_CLIENT_ID' : 'id',
'DISCORD_OAUTH_CLIENT_SECRET' : 'secretkey',
'SECRET' : 'secretkey'
}
config['MYSQL'] = {
'user' : 'username',
'passwd' : 'password',
'host' : 'localhost',
'database' : 'reminders'
'database_sfx' : 'soundfx'
}
with open('../config.ini', 'w') as f:
config.write(f)
|
[
"judewrs@gmail.com"
] |
judewrs@gmail.com
|
e0a3189c619942a87cd85c11009685b4e7d8de45
|
018c38e886b9c8904cbfe470c462abf0b4fa1eef
|
/jdaviz/vizcomponents/viewer/viewernd.py
|
bd7813b48a08905a090d5fa79dd306107749095a
|
[] |
no_license
|
astrofrog/jdaviz
|
4410244508377a6e768abba8f47f01c52ae97791
|
f8269c229aa58e6e0088be2146d338cdc56dd72e
|
refs/heads/master
| 2022-12-12T07:52:58.286625
| 2019-05-23T20:04:23
| 2019-05-23T20:04:23
| 188,431,057
| 0
| 0
| null | 2021-08-17T14:41:19
| 2019-05-24T13:58:05
|
Python
|
UTF-8
|
Python
| false
| false
| 673
|
py
|
import logging
from ipywidgets import Box
from .simple_bqplot_image import simple_imshow
from .viewer import Viewer
logging.basicConfig(filename='/tmp/vizapp.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger('viewernd')
class ViewerND(Viewer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._v3d = simple_imshow(self._glue_app, data=self._glue_app.data_collection[0])
def show(self):
return Box([self._v3d.main_widget])
|
[
"craig@brechmos.org"
] |
craig@brechmos.org
|
afba7b72075609ee49baf8ee4153cc72060abe00
|
104a2b0fc2af615c9073152aed094e991094ffb1
|
/generator/main.py
|
94d432127f10706a22571d5509702335c098646a
|
[
"MIT"
] |
permissive
|
Kaicastledine/homoglyph
|
e56b618ba85a483dfa4771ee6c2877badb94f035
|
8d63ae22655147a54fae3f4e8df9c7ebe7bdb52a
|
refs/heads/master
| 2021-01-19T21:08:18.636097
| 2015-12-22T07:50:02
| 2015-12-22T07:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
from data_file_parser import DataFileDir
from char_manager import CharacterManager
from output_char_codes import OutputCharCodes
from output_chars import OutputChars
from output_js import OutputJS
TEMPLATES_DIR = 'generator/templates'
DATA_DIR = 'generator/source_data'
CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
if __name__ == '__main__':
cm = CharacterManager()
dfd = DataFileDir(DATA_DIR)
dfd.parse_all(cm)
OutputCharCodes('raw_data', TEMPLATES_DIR).create(cm)
OutputChars('raw_data', TEMPLATES_DIR).create(cm)
OutputJS('homoglyph.js', 'javascript/src', TEMPLATES_DIR).create(cm, CHARS)
OutputJS('index.js','node', TEMPLATES_DIR).create(cm, CHARS)
|
[
"rob@codebox.org.uk"
] |
rob@codebox.org.uk
|
64cbfec3f3acc0e1d61883835c5f39fc1e73c1c0
|
9845815f0ff30819d6504adcac96b45deb865697
|
/forestConfigs/runForest_PbPb_MIX_75X_JECv4_localVoronoi.py
|
0ba7b44a0af6b2ba9e08c237c363a9fe1e412f27
|
[] |
no_license
|
dgulhan/forestConfigs
|
8efc4dc5f2341e877ae46dca8d9ae3dbe2d5895d
|
743178fa48457f6b6bdd49c9337931a4299b3994
|
refs/heads/master
| 2021-01-19T11:22:51.780050
| 2015-12-04T17:50:36
| 2015-12-04T17:50:36
| 22,839,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,648
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet(
# wantSummary = cms.untracked.bool(True)
#SkipEvent = cms.untracked.vstring('ProductNotFound')
)
#####################################################################################
# HiForest labelling info
#####################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.untracked.string(version)
#####################################################################################
# Input source
#####################################################################################
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
"file:step3_10.root"
))
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
# PbPb 53X MC
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_HIon', '')
################
###Centrality###
################
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("hiCentrality")
process.centralityBin.centralityVariable = cms.string("HFtowers")
# process.centralityBin.nonDefaultGlauberModel = cms.string("HydjetDrum5")
# process.centralityBin.nonDefaultGlauberModel = cms.string("HydjetDrum5")
process.GlobalTag.toGet.extend([
cms.PSet(record = cms.string("HeavyIonRcd"),
tag = cms.string("CentralityTable_HFtowers200_HydjetDrum5_v750x02_mc"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
label = cms.untracked.string("HFtowers")
),
])
##########
###JEC####
##########
process.load("CondCore.DBCommon.CondDBCommon_cfi")
from CondCore.DBCommon.CondDBSetup_cfi import *
process.jec = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK1Calo_offline'),
label = cms.untracked.string('AK1Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK2Calo_offline'),
label = cms.untracked.string('AK2Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK3Calo_offline'),
label = cms.untracked.string('AK3Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK4Calo_offline'),
label = cms.untracked.string('AK4Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK5Calo_offline'),
label = cms.untracked.string('AK5Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK6Calo_offline'),
label = cms.untracked.string('AK6Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK1PF_offline'),
label = cms.untracked.string('AK1PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK2PF_offline'),
label = cms.untracked.string('AK2PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK3PF_offline'),
label = cms.untracked.string('AK3PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK4PF_offline'),
label = cms.untracked.string('AK4PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK5PF_offline'),
label = cms.untracked.string('AK5PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK6PF_offline'),
label = cms.untracked.string('AK6PF_offline')
),
## here you add as many jet types as you need
## note that the tag name is specific for the particular sqlite file
),
connect = cms.string('sqlite:HI_PythiaCUETP8M1_5020GeV_753p1_v4.db')
# uncomment above tag lines and this comment to use MC JEC
# connect = cms.string('sqlite:Summer12_V7_MC.db')
)
## add an es_prefer statement to resolve a possible conflict from simultaneous connection to a global tag
process.es_prefer_jec = cms.ESPrefer('PoolDBESSource','jec')
##############
###Gen jets###
##############
process.load('RecoHI.HiJetAlgos.HiGenJets_cff')
process.load('RecoJets.Configuration.GenJetParticles_cff')
process.akHiGenJets = cms.Sequence(
process.genParticlesForJets +
process.ak1HiGenJets +
process.ak2HiGenJets +
process.ak3HiGenJets +
process.ak4HiGenJets +
process.ak5HiGenJets +
process.ak6HiGenJets)
#################
###Voronoi#######
#################
process.load("CondCore.DBCommon.CondDBCommon_cfi")
from CondCore.DBCommon.CondDBSetup_cfi import *
process.uetable = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETable_PF_v00_mc"),
#label = cms.untracked.string("UETable_PF")
label = cms.untracked.string("UETable_PFMarta")
),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETable_Calo_v00_mc"),
#label = cms.untracked.string("UETable_PF")
label = cms.untracked.string("UETable_CaloMarta")
)
),
connect = cms.string('sqlite:output.db')
# uncomment above tag lines and this comment to use MC JEC
# connect = cms.string('sqlite:Summer12_V7_MC.db')
)
## add an es_prefer statement to resolve a possible conflict from simultaneous connection to a global tag
process.es_prefer_uetable = cms.ESPrefer('PoolDBESSource','uetable')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.HiReRecoJets_HI_cff')
process.voronoiBackgroundPF.tableLabel = cms.string("UETable_PFMarta")
process.voronoiBackgroundCalo.tableLabel = cms.string("UETable_CaloMarta")
#####################################################################################
# Define tree output
#####################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("HiForest.root"))
#####################################################################################
# Additional Reconstruction and Analysis: Main Body
#####################################################################################
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu1CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs1CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs1PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu1PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu2CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs2CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs2PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu2PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu3CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs3CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs3PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu3PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu4CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs4CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs4PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu4PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu5CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs5CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs5PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu5PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu6CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs6CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs6PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu6PFJetSequence_PbPb_mc_cff')
process.jetSequences = cms.Sequence(process.voronoiBackgroundPF+
process.voronoiBackgroundCalo+
process.akPu1CaloJetSequence +
process.akVs1CaloJetSequence +
process.akVs1PFJetSequence +
process.akPu1PFJetSequence +
process.akPu2CaloJetSequence +
process.akVs2CaloJetSequence +
process.akVs2PFJetSequence +
process.akPu2PFJetSequence +
process.akPu3CaloJetSequence +
process.akVs3CaloJetSequence +
process.akVs3PFJetSequence +
process.akPu3PFJetSequence +
process.akPu4CaloJetSequence +
process.akVs4CaloJetSequence +
process.akVs4PFJetSequence +
process.akPu4PFJetSequence +
process.akPu5CaloJetSequence +
process.akVs5CaloJetSequence +
process.akVs5PFJetSequence +
process.akPu5PFJetSequence +
process.akPu6CaloJetSequence +
process.akVs6CaloJetSequence +
process.akVs6PFJetSequence +
process.akPu6PFJetSequence
)
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_mc_cfi')
process.hiEvtAnalyzer.doMC = cms.bool(False) #the gen info dataformat has changed in 73X, we need to update hiEvtAnalyzer code
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.HiGenAnalyzer_cfi')
#####################################################################################
# To be cleaned
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_MC_cff')
process.load("HeavyIonsAnalysis.TrackAnalysis.METAnalyzer_cff")
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_cfi")
process.load('HeavyIonsAnalysis.JetAnalysis.rechitanalyzer_cfi')
process.rechitAna = cms.Sequence(process.rechitanalyzer+process.pfTowers)
process.pfcandAnalyzer.skipCharged = False
process.pfcandAnalyzer.pfPtMin = 0
#####################################################################################
#########################
# Track Analyzer
#########################
process.anaTrack.qualityStrings = cms.untracked.vstring(['highPurity','tight','loose'])
process.pixelTrack.qualityStrings = cms.untracked.vstring('highPurity')
process.hiTracks.cut = cms.string('quality("highPurity")')
# set track collection to iterative tracking
process.anaTrack.trackSrc = cms.InputTag("hiGeneralTracks")
# clusters missing in recodebug - to be resolved
process.anaTrack.doPFMatching = True
process.anaTrack.pfCandSrc = cms.InputTag("particleFlowTmp")
process.anaTrack.doSimVertex = True
process.anaTrack.doSimTrack = True
# process.ppTrack.fillSimTrack = True
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cff")
process.tpRecoAssocGeneralTracks = process.trackingParticleRecoTrackAsssociation.clone()
process.tpRecoAssocGeneralTracks.label_tr = cms.InputTag("hiGeneralTracks")
process.quickTrackAssociatorByHits.ComponentName = cms.string('quickTrackAssociatorByHits')
process.quickTrackAssociatorByHits.SimToRecoDenominator = cms.string('reco')
process.quickTrackAssociatorByHits.Cut_RecoToSim = cms.double(0.5)
process.quickTrackAssociatorByHits.Quality_SimToReco = cms.double(0.0)
#####################
# Photons
#####################
process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
process.ggHiNtuplizer.genParticleSrc = cms.InputTag("genParticles")
process.ggHiNtuplizerGED = process.ggHiNtuplizer.clone(recoPhotonSrc = cms.InputTag('gedPhotonsTmp'),
recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerGED')
)
#####################
# HYDJET RECO file didn't have ak2GenJets and ak6GenJets as input, so removed them
# and ran our own hiGenJets sequence
# from RecoHI.HiJetAlgos.HiGenJets_cff import ak3HiGenJets, ak4HiGenJets
# from RecoJets.Configuration.GenJetParticles_cff import genParticlesForJets
# genParticlesForJets.ignoreParticleIDs += cms.vuint32( 12,14,16)
# process.hiSelectGenJets = cms.Sequence(
# genParticlesForJets +
# ak3HiGenJets +
# ak4HiGenJets
# )
process.HiGenParticleAna.genParticleSrc = cms.untracked.InputTag("genParticles")
process.load("GeneratorInterface.HiGenCommon.HeavyIon_cff")
process.ana_step = cms.Path(process.heavyIon*
process.hltanalysis *
#temp process.hltobject *
process.centralityBin *
process.hiEvtAnalyzer*
process.HiGenParticleAna*
process.quickTrackAssociatorByHits*
process.tpRecoAssocGeneralTracks + #used in HiPFJetAnalyzer
process.akHiGenJets +
process.hiReRecoCaloJets +
process.hiReRecoPFJets +
process.jetSequences +
process.ggHiNtuplizer +
process.ggHiNtuplizerGED +
process.pfcandAnalyzer +
process.rechitAna +
process.HiForest +
# process.cutsTPForFak +
# process.cutsTPForEff +
process.anaTrack
#process.pixelTrack
)
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.phltJetHI = cms.Path( process.hltJetHI )
process.pcollisionEventSelection = cms.Path(process.collisionEventSelection)
process.load('CommonTools.RecoAlgos.HBHENoiseFilterResultProducer_cfi')
process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.phfCoincFilter = cms.Path(process.hfCoincFilter )
process.phfCoincFilter3 = cms.Path(process.hfCoincFilter3 )
process.pprimaryVertexFilter = cms.Path(process.primaryVertexFilter )
process.phltPixelClusterShapeFilter = cms.Path(process.siPixelRecHits*process.hltPixelClusterShapeFilter )
process.phiEcalRecHitSpikeFilter = cms.Path(process.hiEcalRecHitSpikeFilter )
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
|
[
"dgulhan@cern.ch"
] |
dgulhan@cern.ch
|
407490770d18e8f48175636293ab3b86e5b8fdb7
|
88908644c4f3d48ba684b05491d7e6279408cf42
|
/My_K_NN.py
|
b654e9c55819ddea3101f29201189d35fabb7b02
|
[
"MIT"
] |
permissive
|
cakmakaf/pima_diabetes_biased_random_forest
|
6bf73174b5b99ae7842198866704a618be653d96
|
96bef0efb5fbd84dc050021a5025802bb21b73e0
|
refs/heads/main
| 2023-03-10T18:57:25.477280
| 2021-02-20T19:14:53
| 2021-02-20T19:14:53
| 340,498,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
from math import sqrt
# This function aloows us to find the min and max values for each column
# The input is the dataframe
# It will return the list of minimum and maximum values of the data matrix.
def data_min_max(data):
min_max = list()
for i in range(len(data[0])):
col_values = [row[i] for row in data]
value_min = min(col_values)
value_max = max(col_values)
min_max.append([value_min, value_max])
return min_max
# This function helps to calculate the Euclidean distance between two vectors
# The arguments are two vectors(row vectors)
# The value of the function is a positive scaler number value.
def euclidean_distance(row1, row2):
distance = 0.0
for i in range(len(row1) - 1):
distance += (row1[i] - row2[i]) ** 2
return sqrt(distance)
# Here I locate the most similar neighbors
# The inputs are the test row, train set and number of neighbors
# The return is list of neighbors
def get_neighbors(test_row, train, num_neighbors):
distances = list()
for train_row in train:
dist = euclidean_distance(test_row, train_row)
distances.append((train_row, dist))
distances.sort(key=lambda tup: tup[1])
neighbors = list()
for i in range(num_neighbors):
neighbors.append(distances[i][0])
return neighbors
# This function helps us to make a prediction with neighbors
# The inputs are the test row, train set and number of neighbors
# The return is the predicted class.
def predict_classification(train, test_row, num_neighbors):
neighbors = get_neighbors(train, test_row, num_neighbors)
output_values = [row[-1] for row in neighbors]
prediction = max(set(output_values), key=output_values.count)
return prediction
|
[
"noreply@github.com"
] |
noreply@github.com
|
3ceaf60c54178c57d9bc8ac72bef832180d52f54
|
9d74af6a0d84749c86d8ac3b1941cc7aa1145f5b
|
/python/Queens_Attack_II/queens_attack_ii.py
|
992c67709c9bcf409d0fa2421fa67d7e850d3212
|
[
"MIT"
] |
permissive
|
Oscar-Rod/hackerrank
|
1957fccd98af3d9509939c3555ef6c4e41149b9b
|
8c9679f6c0160121494d251712f2fea000bc2c8c
|
refs/heads/master
| 2020-07-11T08:39:52.528896
| 2020-01-29T16:39:00
| 2020-01-29T16:39:00
| 204,492,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,578
|
py
|
import unittest
def queensAttack(n, k, r_q, c_q, obstacles):
up_obstacle = None
down_obstacle = None
right_obstacle = None
left_obstacle = None
up_right_obstacle = None
down_right_obstacle = None
up_left_obstacle = None
down_left_obstacle = None
for obstacle in obstacles:
if is_over_the_queen(obstacle, r_q, c_q):
if up_obstacle is None:
up_obstacle = obstacle
elif obstacle[0] < up_obstacle[0]:
up_obstacle = obstacle
if is_under_the_queen(obstacle, r_q, c_q):
if down_obstacle is None:
down_obstacle = obstacle
elif obstacle[0] > down_obstacle[0]:
down_obstacle = obstacle
if is_right_of_the_queen(obstacle, r_q, c_q):
if right_obstacle is None:
right_obstacle = obstacle
elif obstacle[1] < right_obstacle[1]:
right_obstacle = obstacle
if is_left_of_the_queen(obstacle, r_q, c_q):
if left_obstacle is None:
left_obstacle = obstacle
elif obstacle[1] > left_obstacle[1]:
left_obstacle = obstacle
if is_upper_right_of_the_queen(obstacle, r_q, c_q):
if up_right_obstacle is None:
up_right_obstacle = obstacle
elif obstacle[1] < up_right_obstacle[1]:
up_right_obstacle = obstacle
if is_lower_left_of_the_queen(obstacle, r_q, c_q):
if down_left_obstacle is None:
down_left_obstacle = obstacle
elif obstacle[1] > down_left_obstacle[1]:
down_left_obstacle = obstacle
if is_upper_left_of_the_queen(obstacle, r_q, c_q):
if up_left_obstacle is None:
up_left_obstacle = obstacle
elif obstacle[0] < up_left_obstacle[0]:
up_left_obstacle = obstacle
if is_lower_right_of_the_queen(obstacle, r_q, c_q):
if down_right_obstacle is None:
down_right_obstacle = obstacle
elif obstacle[0] > down_right_obstacle[0]:
down_right_obstacle = obstacle
number_of_movements = 0
if up_obstacle is not None:
number_of_movements += up_obstacle[0] - r_q - 1
else:
number_of_movements += max(n - r_q, 0)
if up_left_obstacle is not None:
number_of_movements += up_left_obstacle[0] - r_q - 1
else:
number_of_movements += min(max(n - r_q, 0), max(c_q - 1, 0))
if up_right_obstacle is not None:
number_of_movements += up_right_obstacle[0] - r_q - 1
else:
number_of_movements += min(max(n - r_q, 0), max(n - c_q, 0))
if down_obstacle is not None:
number_of_movements += r_q - down_obstacle[0] - 1
else:
number_of_movements += max(r_q - 1, 0)
if down_left_obstacle is not None:
number_of_movements += r_q - down_left_obstacle[0] - 1
else:
number_of_movements += min(max(r_q - 1, 0), max(c_q - 1, 0))
if down_right_obstacle is not None:
number_of_movements += r_q - down_right_obstacle[0] - 1
else:
number_of_movements += min(max(r_q - 1, 0), max(n - c_q, 0))
if left_obstacle is not None:
number_of_movements += c_q - left_obstacle[1] - 1
else:
number_of_movements += max(c_q - 1, 0)
if right_obstacle is not None:
number_of_movements += right_obstacle[1] - c_q - 1
else:
number_of_movements += max(n - c_q, 0)
return number_of_movements
def is_over_the_queen(obstacle, r_q, c_q):
return obstacle[1] == c_q and obstacle[0] > r_q
def is_under_the_queen(obstacle, r_q, c_q):
return obstacle[1] == c_q and obstacle[0] < r_q
def is_right_of_the_queen(obstacle, r_q, c_q):
return obstacle[0] == r_q and obstacle[1] > c_q
def is_left_of_the_queen(obstacle, r_q, c_q):
return obstacle[0] == r_q and obstacle[1] < c_q
def is_upper_right_of_the_queen(obstacle, r_q, c_q):
return obstacle[0] - r_q == obstacle[1] - c_q and obstacle[0] - r_q > 0 and obstacle[1] - c_q > 0
def is_lower_left_of_the_queen(obstacle, r_q, c_q):
return obstacle[0] - r_q == obstacle[1] - c_q and obstacle[0] - r_q < 0 and obstacle[1] - c_q < 0
def is_upper_left_of_the_queen(obstacle, r_q, c_q):
return abs(obstacle[0] - r_q) == abs(obstacle[1] - c_q) and obstacle[0] - r_q > 0 and obstacle[1] - c_q < 0
def is_lower_right_of_the_queen(obstacle, r_q, c_q):
return abs(obstacle[0] - r_q) == abs(obstacle[1] - c_q) and obstacle[0] - r_q < 0 and obstacle[1] - c_q > 0
class TestStringMethods(unittest.TestCase):
def test_1(self):
n = 4
k = 0
r_q = 4
c_q = 4
obstacles = []
self.assertEqual(queensAttack(n, k, r_q, c_q, obstacles), 9)
def test_2(self):
n = 5
k = 3
r_q = 4
c_q = 3
obstacles = [[5, 5], [4, 2], [2, 3]]
self.assertEqual(queensAttack(n, k, r_q, c_q, obstacles), 10)
def test_6(self):
file = open("./test6.txt", "r")
line_list = [line.rstrip('\n') for line in file]
file.close()
nk = line_list.pop(0).split(" ")
n = int(nk[0])
k = int(nk[1])
rc = line_list.pop(0).split(" ")
r_q = int(rc[0])
c_q = int(rc[1])
obstacles = []
for line in line_list:
obstacles.append(list(map(int, line.split())))
self.assertEqual(queensAttack(n, k, r_q, c_q, obstacles), 40)
if __name__ == '__main__':
unittest.main()
|
[
"rguez.r.os@gmail.com"
] |
rguez.r.os@gmail.com
|
247fe090f43fd0a83855df1f9f2e432ddd933952
|
050d969572da4168df2184c43177b555db9f4391
|
/Pandata_Python/Pandata_Python/TwitterManager.py
|
ee297531d004c7b61a916d216c3c21d9cc9744be
|
[
"MIT"
] |
permissive
|
adrienyou/Pandata_Python
|
588145890537aeaac6dadd16655a5cc59b5b3c0f
|
fe871706a476ca3432f242c6ae585f241321275a
|
refs/heads/master
| 2021-03-27T10:14:45.793423
| 2015-03-21T18:12:25
| 2015-03-21T18:12:25
| 28,859,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,072
|
py
|
"""
@authors: YOU
"""
from twython import TwythonStreamer
import json
import pymongo
import Constants
import Variables
import MongoManager
import TextAnalysis
class TweetStreamer(TwythonStreamer):
""" Method on_success will be called when tweets are received """
def on_success(self, data):
# Connect to the default host and port. Can be specified : MongoClient('localhost', 27017)
client = pymongo.MongoClient()
# Create the tweet under the format we want
tweet = dataWorker(data)
# Call the text analysis worker and get the response (dictPosWords, dictNegWords, emotion)
response = TextAnalysis.getTextEmotion(tweet)
# Add new field to tweet
tweet[Constants.ResearchField.EMOTION] = response[Constants.ResearchField.EMOTION]
# Insert the tweet ine the research collection we want
MongoManager.insertTweetInCollection(tweet, Variables.Database.COLL_NAME, Variables.Database.DB_NAME, client)
# Modify the macro data with the response from emotion analysis
MongoManager.modifyMacroInCollection(response, Variables.Database.COLL_NAME, Variables.Database.DB_NAME, client)
def on_error(self, status_code, data):
print(status_code)
# Want to stop trying to get data because of the error?
# Uncomment the next line!
# self.disconnect()
""" Worker applied to the data from the stream """
def dataWorker(data):
tweet = createTweet(created_atWorker(data[Constants.TwitterField.CREATED].encode('utf-8')),
entitiesWorker(data[Constants.TwitterField.ENTITIES][Constants.TwitterField.HASHTAGS], data[Constants.TwitterField.ENTITIES][Constants.TwitterField.SYMBOLS]),
data[Constants.TwitterField.FAVORITED],
data[Constants.TwitterField.LANG],
placeWorker(data[Constants.TwitterField.PLACE]),
data[Constants.TwitterField.RETWEET],
data[Constants.TwitterField.TEXT].encode('utf-8'),
userWorker(data[Constants.TwitterField.USER])
)
return tweet
""" Return the month number """
def getMonth(strMonth):
if strMonth == 'Jan':
return "01"
elif strMonth == 'Feb':
return "02"
elif strMonth == 'Mar':
return "03"
elif strMonth == 'Apr':
return "04"
elif strMonth == 'May':
return "05"
elif strMonth == 'Jun':
return "06"
elif strMonth == 'Jul':
return "07"
elif strMonth == 'Aug':
return "08"
elif strMonth == 'Sep':
return "09"
elif strMonth == 'Oct':
return "10"
elif strMonth == 'Nov':
return "11"
else:
return "12"
""" Worker applied to the 'created_at' field from the data from the stream """
def created_atWorker(field):
#"created_at":"Wed Aug 27 13:08:45 +0000 2008"
strField = str(field, encoding='utf-8')
arrField = strField.split(' ')
date = {}
date['hour'] = arrField[3].split(':')[0]
date['day'] = arrField[2]
date['month'] = getMonth(arrField[1])
date['year'] = arrField[5]
return date
""" Worker applied to the 'entites' field from the data from the stream """
def entitiesWorker(hashtagsField, symbolsField):
hashList = []
symbList = []
for hash in hashtagsField:
if hash['text']:
hashList.append(hash['text'])
for symb in symbolsField:
if symb['text']:
symbList.append(symb['text'])
entities = {}
entities['hashtags'] = hashList
entities['symbols'] = symbList
return entities
""" Worker applied to the place field from the data from the stream """
def placeWorker(place):
if place:
if place['country']:
return place['country']
elif place['country_code']:
return place['country_code']
else:
return 'null'
else:
return 'null'
""" Worker applied to the user field from the data from the stream """
def userWorker(user):
userV = {}
if user:
if user['name']:
userV['name'] = user['name']
else:
userV['name'] = 'null'
if user['followers_count']:
userV['followers_count'] = user['followers_count']
else:
userV['followers_count'] = 'null'
if user['time_zone']:
userV['time_zone'] = user['time_zone']
else:
userV['time_zone'] = 'null'
return userV
def createTweet(v_created_at, v_entities, v_favorited, v_lang, v_place, v_retweet, v_text, v_user):
tweet = {}
tweet['created_at'] = v_created_at
tweet['entities'] = v_entities
tweet['favorited'] = v_favorited
tweet['lang'] = v_lang
tweet['place'] = v_place
tweet['retweet'] = v_retweet
tweet['text'] = str(v_text, encoding='utf-8')
tweet['user'] = v_user
return tweet
""" Return obj with a JSON format """
def to_JSON(obj):
return json.dumps(obj, sort_keys=True, indent=4)
|
[
"adrien.you@student.ecp.fr"
] |
adrien.you@student.ecp.fr
|
97115a5d5c194dc49f8fff0887fb410dffe587cd
|
a91ee43d466f765e8a2cc2ed5be7881ba2a18783
|
/profiles_api/admin.py
|
5f4f2f6ac0fe5e52909ddc1c15c5cd4b7fb22458
|
[
"MIT"
] |
permissive
|
divyajpodar/profile_rest_api
|
4e664e0aa3499d1e0b80f696b17af83e5f0091bc
|
8c8335373f0b13834965f23a8757a9807afda23d
|
refs/heads/master
| 2022-05-02T22:14:21.609300
| 2020-01-06T03:43:05
| 2020-01-06T03:43:05
| 231,166,744
| 0
| 0
|
MIT
| 2022-04-22T23:04:00
| 2020-01-01T02:18:47
|
Python
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.UserProfile)
|
[
"divpodar@gmail.com"
] |
divpodar@gmail.com
|
e8bd9944d02bc4a7cdfe64bf79fe0b622779f797
|
294e8b9fd90864c4fcbc28c7a93fd93e65cfecc4
|
/elastic/elastic_search.py
|
6b230d015c16fbace849268e33cae7f93335361a
|
[] |
no_license
|
liujustin/Simply-Search
|
afaff4bb06057a8d8324d70118d5a3c5a25ebc53
|
134cf15386cabc84626a88fe894995e709606e1d
|
refs/heads/master
| 2021-03-19T13:25:26.855567
| 2018-07-29T02:35:14
| 2018-07-29T02:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
import os
import elasticsearch
import json
import sys
from bson import json_util
class Elastic_Search:
def __init__(self, hosts):
self.hosts = hosts
self.es = elasticsearch.Elasticsearch(hosts=hosts)
def initiate(self, mongo_db):
"""
Takes in a mongo_db instance, initializes elastic search and indexes the results from mongo.
"""
try:
# mongodb cursor
cursor = mongo_db.find({})
# for each entry in mongodb, index an elasticsearch
if self.es.indices.exists(index="elasticsearch"):
self.es.indices.delete(index="elasticsearch")
self.es.indices.create(index='elasticsearch', ignore=400)
print '---Indexing data into elastic search---'
for document in cursor:
self.es.index(index="elasticsearch",
doc_type="sample_data",
id=json.loads(json_util.dumps(document['_id'])).get("$oid"),
body={"data": document["sample_data"]["name"], "date":document["sample_data"]["date"]})
# refresh indices in elastic
self.es.indices.refresh(index="elasticsearch")
print '---Data successfully indexed---'
except:
print "Unexpected Error: ", sys.exc_info()
def search_elastic(self, search_term):
"""
Searches the database for a specified search term and returns the result.
"""
return self.es.search(index="elasticsearch", doc_type="sample_data", size=1000, body={"query":{"match_phrase":{"data": search_term}}})['hits']['hits']
|
[
"jxl3908@rit.edu"
] |
jxl3908@rit.edu
|
973a8b519d0ecebd2d6d553f93b4dd06ffde7f56
|
f1a38db672ee3051fd96dd3bb923e9d7be3036c6
|
/tests/test_load.py
|
ce4bfa2c01ad1f67111dac78bdd4936e5f006211
|
[
"Apache-2.0"
] |
permissive
|
zhangxinxf/dwml
|
47e16ba6067ff3dee2d8b02096d7da2819453515
|
406130cff0e158687246e20ea289190260e4329f
|
refs/heads/master
| 2021-01-21T23:54:32.907129
| 2016-06-17T01:58:53
| 2016-06-17T01:58:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from base import PROJECT_ROOT
from dwml import omml
class TestLoad(unittest.TestCase):
def test_load_ml(self):
latex_results = [
r'\sin(\sqrt[3]{x})^{x^{11}}/_{b}x_{m}^{n}',
r'\tilde{a}\begin{array}{c}a=b+c\\d+e=f\end{array}\\\underline{cdef}']
i = 0
for omath in omml.load(PROJECT_ROOT+'/tests/simple.xml'):
self.assertEqual(omath.latex,latex_results[i])
i=i+1
def test_load_group(self):
latex_results = [
r'A\overbrace{123}\underbrace{456}=\left\{a+b\right.'
]
i=0
for omath in omml.load(PROJECT_ROOT+'/tests/group.xml'):
self.assertEqual(omath.latex,latex_results[i])
i=i+1
def test_load_lim(self):
latex_results = [
r'A=log_{x}y\max_{0\leq x\leq 1}xe^{−x^{2}}\lim_{1\to \infty }a\overset{def}{=}x'
]
i=0
for omath in omml.load(PROJECT_ROOT+'/tests/lim.xml'):
self.assertEqual(omath.latex,latex_results[i])
i=i+1
def test_load_m(self):
latex_results = [
r'A=\left(\begin{matrix}1&2&3\\4&5&6\end{matrix}\right)\sum_{1}^{20}x'
]
i=0
for omath in omml.load(PROJECT_ROOT+'/tests/m.xml'):
self.assertEqual(omath.latex,latex_results[i])
i=i+1
def test_load_d(self):
latex_results = [
r'\left\{\begin{array}{c}m+1\leq 2m-1\\m+1>5\end{array}\right.'
]
i=0
for omath in omml.load(PROJECT_ROOT+'/tests/d.xml'):
self.assertEqual(omath.latex,latex_results[i])
i=i+1
def test_escape(self):
self.assertEqual(omml.escape_latex(r'\\\\\\'),'\\\\\\')
if __name__ == '__main__':
unittest.main()
|
[
"xilei125@163.com"
] |
xilei125@163.com
|
2c20bf56f31e55fefd4e0b352cc38047c153827f
|
c152d0c541b784e5859367813f8d5c270949d73b
|
/blogengine/blogengine/settings.py
|
a54887048509c688da2c0b7ba2723e8e77a291b7
|
[] |
no_license
|
ivadin/django-projects
|
7ceb5e3ce5a14e696178ccbbab18dd0838519a83
|
d2f2b7aeea50533a7aace4aaf163ce09032a16d3
|
refs/heads/master
| 2020-04-08T00:00:46.576931
| 2018-11-23T13:10:30
| 2018-11-23T13:10:30
| 158,833,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,178
|
py
|
"""
Django settings for blogengine project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't30he6@%%mmxxv!r^e9n8b@z1_@^(cw11gtbf_2)kv1=d^0$7q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogengine.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogengine.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"ivadin@rambler.ru"
] |
ivadin@rambler.ru
|
3a4dcd69f4f38418bd714df065790230b8476157
|
7546742a3131fa90cc9466a08ec521e232f09da2
|
/mousse_rocket_genetics.py
|
27e17bb326a439cc5dfca7b8608e9488c088d589
|
[] |
no_license
|
gabrielandm/genetic_mousses
|
f504ec5c7480d651c15961fbad7d32709e310209
|
13a9a540e1332aca4a4ea22e150539ca6c471fa7
|
refs/heads/main
| 2023-05-05T07:22:03.268686
| 2021-05-24T12:55:38
| 2021-05-24T12:55:38
| 365,259,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,029
|
py
|
'''
Angle
Thrust limit
Engine flow rate (between 100% and 70% or stuck at 0%)
Fitness -> Height and Speed
Rule:
Fuel limit
'''
from math import sqrt, pi, atan, sin, cos
from random import random
from collections import namedtuple
Rocket = namedtuple('Rocket', ['m', 'vy', 'vx', 'h'])
# m -> Rocket's mass
# vy -> Rocket's Vertical velocity
# vx -> Rocket's orbital velocity
# h -> Orbital height
cd = 0.4
mo = 1.3*10^4 # Initial rocket mass [kg] # UwU
D = 1.2 # Rocket diameter [meters]
Mf = 1.25*10^4 # Fuel mass [kg]
dmdt = 650 # Engine fuel flow rate [kg]
dt = 0.3 # Iteration time [s]
gimble_limit = 7 # Gimble limit [degree]
orbital_height = 10^6 # Final height [meters]
G = 6.67*10^-11
M = 5.97223*10^24
R = 6.3781*10^6
thrust = 2.25*10^5 # Max thrust [NEWtown]
def newton_gravity(h):
global G
global M
global R
return (G * M) / (R + h)^2
def atm_pressure(h, g):
Po = 1
a = 1 / 25
To = 298
M = 29
R = 0.082
# in atm
return Po * ((a * h + To) / To) ^ (-M * g / a * R)
def air_density(P, h):
M = 29
R = 0.082
T = 298
if h > 8.5*10^6:
return 0
else:
return (P * M) / (R * T)
def s_big_calculator(): # cross_sectional_area
global D
return (pi * D ^ 2) / 4
def air_drag(V, h):
air_density = air_density(atm_pressure(h, newton_gravity(h)), h)
global cd
S = s_big_calculator()
F = (S * V^2 * cd) * 0.5
return F
def orbital_velocity():
global orbital_height
global G
global M
global R
return sqrt(G * M / (R + orbital_height))
def rocket_weight(g, m):
return (m * g)
def rocket_aceleration(F, m):
# | ax | ay |
return F / m
def rocket_velocity_variation(a):
global dt
# | dvdt |
return dt * a
def rocket_mass_variation(throtle):
global dt
global dmdt
return -dmdt * dt * throtle
def things_calculator(rocket_stuff):
global thrust
# getting current rocket info
m = rocket_stuff.m
vy = rocket_stuff.vy
vx = rocket_stuff.vx
h = rocket_stuff.h
# escolher o angulo
gimble = random() * 14 - 7
# força do motor
throtle = random() * 0.3 + 0.7
# calcular massa do foguete
mf = m + rocket_mass_variation()
if vx == 0:
téta = pi / 2
else:
téta = atan(vy / vx)
# Força do engine
Fmx = thrust * throtle * cos(téta + gimble)
Fmy = thrust * throtle * sin(téta + gimble)
Fx = Fmx - air_drag(vx, h)
Fy = Fmy - air_drag(vy, h) - rocket_weight(newton_gravity(h), mf)
afx = rocket_aceleration(Fx, mf)
afy = rocket_aceleration(Fy, mf)
vfx = vx + rocket_velocity_variation(afx)
vfy = vy + rocket_velocity_variation(afy)
# atitude
hf = h + vfy * dt
return mf, afx, afy, vfx, vfy, hf
''' Limits
things_calculator() until:
m <= mo - Mf
h >= orbital_height
vx > orbital_velocity
vx == orbital_velocity && vy != 0
'''
''' Fitness
vx == orbital_velocity
h == orbital_height
vy == 0
'''
|
[
"gabriel.paoli@kumulus.com.br"
] |
gabriel.paoli@kumulus.com.br
|
c116d5ed45b73688faf291bb45ee75687c5466ea
|
93c25a7cd79bc997342b81737836113a8b1ee3ae
|
/An Introduction to Interactive Programming in Python (Part 1)/Practice problems/Week 1a/EX9-NameandAge.py
|
f8b052b800fc448a51dbbd8330db708e482abd55
|
[] |
no_license
|
monishappusamy/coursera
|
28897f686e10e2e1a29f7ffbfad3b6a4af5db4fd
|
129bc63787f93abc98671a2cd25234e44966dd45
|
refs/heads/master
| 2021-01-10T06:13:38.625914
| 2015-10-21T03:57:32
| 2015-10-21T03:57:32
| 36,129,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
# Compute the statement about a person's name and age, given the person's name and age.
###################################################
# Name and age formula
# Student should enter function on the next lines.
def name_and_age (name, age):
return name + " is " + str(age) + " years old."
###################################################
# Tests
# Student should not change this code.
def test(name, age):
print name_and_age(name, age)
test("Joe Warren", 52)
test("Scott Rixner", 40)
test("John Greiner", 46)
###################################################
# Expected output
# Student should look at the following comments and compare to printed output.
#Joe Warren is 52 years old.
#Scott Rixner is 40 years old.
#John Greiner is 46 years old.
|
[
"monishappusamy@gmail.com"
] |
monishappusamy@gmail.com
|
d7420989469dab17d9f1146e6f856d16c343fb1e
|
054eefaa17157b32869ea986347b3e539d2bf06b
|
/big_o_coding/Green_06/midterm1.py
|
bbd16356c363c790e981dd3ec7c049cf0c48699b
|
[] |
no_license
|
baocogn/self-learning
|
f2cb2f45f05575b6d195fc3c407daf4edcfe7d0e
|
f50a3946966354c793cac6b28d09cb5dba2ec57a
|
refs/heads/master
| 2021-07-12T23:32:14.728163
| 2019-02-10T14:24:46
| 2019-02-10T14:24:46
| 143,170,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
n = int(input())
result = 1
if n == 1:
result = 1 * 15000
elif 2 <= n <= 5:
result = 1 * 15000 + (n - 1) * 13500
elif 6 <= n <= 11:
result = 1 * 15000 + 4 * 13500 + (n - 5) * 11000
elif n >= 12:
result = int((1 * 15000 + 4 * 13500 + (n - 5) * 11000) * (1 - 0.1))
print(result )
|
[
"baocogn123@gmail.com"
] |
baocogn123@gmail.com
|
4fcd7f02528a475c4a5e347b44c26c46a151141f
|
c37d94976f42118b392637f0c3a0ed23f13cb522
|
/5/week4/circuit_design/circuit_design.py
|
05bfaadf737317fe3647d636882f092bbd42ce19
|
[] |
no_license
|
mcgaw/psychic-garbanzo
|
e992eec01efd471a720c5f2dddaf38ff467d77c0
|
48b43069928c36ba09dcdcec78ff49a9f050ae33
|
refs/heads/master
| 2021-09-17T14:03:03.778301
| 2017-11-07T11:30:25
| 2017-11-07T11:30:25
| 109,826,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,841
|
py
|
# python3
import sys
import threading
import collections
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**26) # new thread will get stack of such size
import itertools
def conn_comp(edges):
"""
Tarjan's algorithm for connected
components.
"""
vertices = set(v for v in itertools.chain(*edges))
indices = dict((v, -1) for v in vertices)
lowlinks = indices.copy()
ccs = []
index = 0
stack = []
for v in vertices:
if indices[v] < 0:
strong_connect(v, edges, indices, lowlinks, ccs, index, stack)
return ccs
def strong_connect(vertex, edges, indices, lowlinks, ccs, index, stack):
indices[vertex] = index
lowlinks[vertex] = index
index += 1
stack.append(vertex)
for v, w in [e for e in edges if e[0] == vertex]:
if indices[w] < 0:
strong_connect(w, edges, indices, lowlinks, ccs, index, stack)
lowlinks[v] = min(lowlinks[v], lowlinks[w])
elif w in stack:
lowlinks[v] = min(lowlinks[v], indices[w])
if indices[vertex] == lowlinks[vertex]:
ccs.append([])
while stack[-1] != vertex:
ccs[-1].append(stack.pop())
ccs[-1].append(stack.pop())
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def post_orders(adjacents):
"""
Order the nodes of the graph according to their
post order. Uses (possibly repeated) Depth First Search on
the graph.
"""
vertices = set([node for node in range(len(adjacents))])
def dfs(node, order, traversed):
q = collections.deque([node])
while len(q) > 0:
node = q.pop()
traversed.add(node)
moving_up = True
to_add = []
for adj in adjacents[node]:
if adj in traversed:
continue
moving_up = False
to_add.append(adj)
if moving_up:
order.add(node)
if node in vertices:
vertices.remove(node)
else:
q.append(node)
for n in to_add:
q.append(n)
post_order = OrderedSet([])
traversed = set([])
vertices = set([node for node in range(len(adjacents))])
while True:
dfs(vertices.pop(), post_order, traversed)
if len(post_order) == len(adjacents):
break
assert len(post_order) == len(adjacents)
return list(post_order)
def post_orders_(adjacents):
"""
Order the nodes of the graph according to their
post order. Uses (possibly repeated) Depth First Search on
the graph.
"""
def dfs(node, order, traversed):
traversed.add(node)
for adj in adjacents[node]:
if adj in traversed:
continue
dfs(adj, order, traversed)
if node in vertices:
vertices.remove(node)
order.add(node)
post_order = OrderedSet([])
traversed = set([])
vertices = set([node for node in range(len(adjacents))])
while True:
dfs(vertices.pop(), post_order, traversed)
if len(post_order) == len(adjacents):
break
assert len(post_order) == len(adjacents)
return list(post_order)
def connected_component_(adjacents, node, found):
"""
Explore the graph starting at node stopping
at dead ends and avoiding both cycles and nodes
in the ignore list.
"""
connected = set([])
def dfs(node, connected):
connected.add(node)
found.add(node)
for adj in adjacents[node]:
if adj in found or adj in connected:
continue
dfs(adj, connected)
dfs(node, connected)
return connected
def connected_component(adjacents, node, found):
"""
Explore the graph starting at node stopping
at dead ends and avoiding both cycles and nodes
in the ignore list.
While exploring build up the part of the solution belonging to
this connected component while also checking that there is no
duplicates of any litteral, indicating no solution.
"""
connected = set([])
q = collections.deque([node])
found_litterals = set([])
while len(q) > 0:
node = q.pop()
# Possible a node is added twice before
# having a chance to be processed.
if node in connected:
continue
connected.add(node)
found.add(node)
for adj in adjacents[node]:
if adj in found or adj in connected:
continue
q.append(adj)
return connected
def analyse_connected_components_(n, adjacents, reverse, var_map):
# Ensure topological ordering.
order = post_orders_(reverse)
#print('orders: {0}'.format(orders))
order_pointer = len(order) - 1
found = set([])
ccs = []
while order_pointer >= 0:
if order[order_pointer] in found:
order_pointer -= 1
continue
ccs.append(connected_component_(adjacents, order[order_pointer], found))
assert len(found) == len(adjacents), 'found {0} nodes, but {1} were specified'.format(len(found), n)
return ccs
def analyse_connected_components(n, adjacents, reverse, var_map):
# Ensure topological ordering.
order = post_orders_(reverse)
#print('orders: {0}'.format(orders))
order_pointer = len(order) - 1
found = set([])
ccs = []
while order_pointer >= 0:
if order[order_pointer] in found:
order_pointer -= 1
continue
ccs.append(connected_component(adjacents, order[order_pointer], found))
assert len(found) == len(adjacents), 'found {0} nodes, but {1} were specified'.format(len(found), n)
return ccs
def build_implication_graph(n, clauses):
edges = []
var_dict = {}
node_dict = {}
node_num = 0
adjacents = [[] for _ in range(2*n)]
reversed_adjs = [[] for _ in range(2*n)]
for clause in clauses:
#if len(clause) == 1:
# assert False, 'should be two terms in the clause'
left = clause[0]
right = clause[1]
for term in [left, right]:
if not term in node_dict:
var_dict[node_num] = term
node_dict[term] = node_num
node_num += 1
if not -term in node_dict:
var_dict[node_num] = -term
node_dict[-term] = node_num
node_num += 1
adjacents[node_dict[-left]].append(node_dict[right])
reversed_adjs[node_dict[right]].append(node_dict[-left])
#edges.append((node_dict[-left], node_dict[right]))
adjacents[node_dict[-right]].append(node_dict[left])
reversed_adjs[node_dict[left]].append(node_dict[-right])
#edges.append((node_dict[-right], node_dict[left]))
return edges, adjacents[:node_num], reversed_adjs[:node_num], node_dict, var_dict
def is_satisfiable(n, m, clauses):
edges, implication_g, reversed_imp_g, node_map, var_map = build_implication_graph(n, clauses)
ccs = analyse_connected_components_(n, implication_g, reversed_imp_g, var_map)
#ccs = analyse_connected_components(n, implication_g, reversed_imp_g, var_map)
print(ccs)
result = collections.defaultdict(lambda: None)
for cc in ccs:
cc_vars = set([])
for node in cc:
# Check valid solution.
litteral = var_map[node]
if abs(litteral) in cc_vars:
return None
else:
cc_vars.add(abs(litteral))
if result[abs(litteral)] is None:
if litteral < 0:
result[abs(litteral)] = 0
else:
result[abs(litteral)] = 1
return result
def circuit_design():
n, m = map(int, input().split())
clauses = [ list(map(int, input().split())) for i in range(m) ]
result = is_satisfiable(n, m, clauses)
if result is None:
print("UNSATISFIABLE")
else:
print("SATISFIABLE");
print(" ".join(str(i if result[i] else -i) for i in range(1, n+1)))
if __name__ == '__main__':
threading.Thread(target=circuit_design).start()
|
[
"mcgaw@gmx.com"
] |
mcgaw@gmx.com
|
02ad55d12b0c96af333d4519f4dea9b94f3645e8
|
6e1f3f64d36ad7dfdd930f2d33bdc5af7fd3b6b7
|
/飞机大战/plane_main.py
|
ca3a39dfa59692b6c03b38e16e55a4d13d812d61
|
[] |
no_license
|
barrylee9527/python-games
|
7f02d7883f45a24a19c29c507e94bd87070d7a39
|
61d4feadcccdfab78e41eb14b8c7f9390634d4b2
|
refs/heads/master
| 2020-08-09T10:05:53.743539
| 2019-10-10T02:14:04
| 2019-10-10T02:14:04
| 214,065,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,919
|
py
|
from 飞机大战.plane_sprites import *
class PlaneGame(object):
def __init__(self):
# 游戏的初始化
print("游戏场景初始化")
self.screen = pygame.display.set_mode(SCREEN_RECT.size) # 初始化屏幕大小
self.clock = pygame.time.Clock() # 游戏时钟帧数检测
self.__create__sprites() # 创建精灵组
pygame.time.set_timer(CREATE_ENEMY_EVENT, 1000) # 设置时间间隔来出现敌机,毫秒为单位
pygame.time.set_timer(HERO_FRIE_EVENT, 200) # 设置发射子弹的时间间隔
def __create__sprites(self):
# 创建游戏精灵和精灵组
bg1 = Background()
bg2 = Background(True)
self.back_group = pygame.sprite.Group(bg1, bg2)
# 创建敌机的精灵组
self.enemy_group = pygame.sprite.Group()
# 创建英雄的精灵和精灵组
self.hero = Hero() # 定义英雄属性
self.hero_group = pygame.sprite.Group(self.hero)
def start_game(self):
print("开始游戏")
while True:
self.clock.tick(TIME_SEC)
self.__event_hander()
self.__check_collide()
self.__update_sprites()
pygame.display.update()
def __event_hander(self):
# 时间监听
for event in pygame.event.get():
if event.type == pygame.QUIT:
PlaneGame.__game_over()
elif event.type == CREATE_ENEMY_EVENT:
print("敌机出场")
enemy = Enemy()
self.enemy_group.add(enemy)
elif event.type == HERO_FRIE_EVENT:
self.hero.fire()
key_pressed = pygame.key.get_pressed() # 捕获键盘按键事件
if key_pressed[pygame.K_RIGHT] or key_pressed[pygame.K_DOWN]:
self.hero.speed = 3
elif key_pressed[pygame.K_LEFT] or key_pressed[pygame.K_UP]:
self.hero.speed = -3
else:
self.hero.speed = 0
def __check_collide(self):
# 碰撞检测
pygame.sprite.groupcollide(self.hero.bullets, self.enemy_group, True, True)
# 敌机撞毁英雄
enemys = pygame.sprite.spritecollide(self.hero, self.enemy_group, True)
if len(enemys) > 0:
self.hero.kill()
PlaneGame.__game_over()
def __update_sprites(self):
# 更新精灵和精灵组的位置
self.back_group.update()
self.back_group.draw(self.screen)
self.enemy_group.update()
self.enemy_group.draw(self.screen)
self.hero_group.update()
self.hero_group.draw(self.screen)
self.hero.bullets.update()
self.hero.bullets.draw(self.screen)
@staticmethod
def __game_over():
pygame.quit()
print('游戏结束')
exit(0)
# 游戏运行
if __name__ == '__main__':
game = PlaneGame()
game.start_game()
|
[
"1763272870@qq.com"
] |
1763272870@qq.com
|
a2b8a33ac784568eaef815d0402ff1feaea3322e
|
daa84286da8b250adfed6a95e3cac73986c769cf
|
/CeasarCipher.py
|
0d36ec0770fa394dc4dfb35a0ca38c0492ff3648
|
[] |
no_license
|
Neojin629/CeasarCipher
|
3c38d8ead0c84a4b0713571e746c437f030e76a2
|
4c6b7c1b6eb37c6324248b9d55459e12f3df2957
|
refs/heads/main
| 2023-07-24T12:07:53.249646
| 2021-08-31T15:43:28
| 2021-08-31T15:43:28
| 401,757,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def caesar(start_text, shift_amount, cipher_direction):
end_text = ""
if cipher_direction == "decode":
shift_amount *= -1
for char in start_text:
#TODO-3: What happens if the user enters a number/symbol/space?
#Can you fix the code to keep the number/symbol/space when the text is encoded/decoded?
#e.g. start_text = "meet me at 3"
#end_text = "•••• •• •• 3"
if char in alphabet:
position = alphabet.index(char)
new_position = position + shift_amount
end_text += alphabet[new_position]
else:
end_text += char
print(f"Here's the {cipher_direction}d result: {end_text}")
#TODO-1: Import and print the logo from art.py when the program starts.
from art import logo
print(logo)
#TODO-4: Can you figure out a way to ask the user if they want to restart the cipher program?
#e.g. Type 'yes' if you want to go again. Otherwise type 'no'.
#If they type 'yes' then ask them for the direction/text/shift again and call the caesar() function again?
#Hint: Try creating a while loop that continues to execute the program if the user types 'yes'.
should_end = False
while not should_end:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
#TODO-2: What if the user enters a shift that is greater than the number of letters in the alphabet?
#Try running the program and entering a shift number of 45.
#Add some code so that the program continues to work even if the user enters a shift number greater than 26.
#Hint: Think about how you can use the modulus (%).
shift = shift % 26
caesar(start_text=text, shift_amount=shift, cipher_direction=direction)
restart = input("Type 'yes' if you want to go again. Otherwise type 'no'.\n")
if restart == "no":
should_end = True
print("Goodbye")
|
[
"noreply@github.com"
] |
noreply@github.com
|
6af2e5be03f5004ba0e7cf3c2b61a359794bb422
|
3c3fc1a15f06771ea4d8ffd8144694a952759cb7
|
/UI/app/test_camera.py
|
11133065aa47fd4b786258b080266fccc926cace
|
[
"MIT"
] |
permissive
|
JeremYnov/Faice
|
57492159247267277631787ebcdf24e00df54fb5
|
de2f42c5b885831b0da02e2f7116c9eca013369a
|
refs/heads/main
| 2023-04-14T12:41:00.049826
| 2021-04-27T15:21:51
| 2021-04-27T15:21:51
| 305,392,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
'''
Camera Example
==============
This example demonstrates a simple use of the camera. It shows a window with
a buttoned labelled 'play' to turn the camera on and off. Note that
not finding a camera, perhaps because gstreamer is not installed, will
throw an exception during the kv language processing.
'''
# Uncomment these lines to see all the messages
#from kivy.logger import Logger
#import logging
#Logger.setLevel(logging.TRACE)
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
import time
Builder.load_string('''
<CameraClick>:
orientation: 'vertical'
Camera:
id: camera
resolution: (640, 480)
play: False
ToggleButton:
text: 'Play'
on_press: camera.play = not camera.play
size_hint_y: None
height: '48dp'
Button:
text: 'Capture'
size_hint_y: None
height: '48dp'
on_press: root.capture()
''')
class CameraClick(BoxLayout):
def capture(self):
'''
Function to capture the images and give them the names
according to their captured time and date.
'''
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
camera.export_to_png("IMG_{}.png".format(timestr))
print("Captured")
class TestCamera(App):
def build(self):
return CameraClick()
TestCamera().run()
|
[
"ow.charlon@gmail.com"
] |
ow.charlon@gmail.com
|
6fc9d8032c359a5d145d139b99cdd20fc1f2f83f
|
78f5e27e8048d71351d6228ff826e3ffc2c2891b
|
/test_shared_string.py
|
416832811fcf763b1e9685f815f5b7b8bc6043e5
|
[] |
no_license
|
ksato-dev/multiprocessing_exercises
|
c89ed1818dfc487f57f7d41f59ed6a4fe10f445d
|
0d16a7ee1923b2d21c5953fd1576c2dade734bc6
|
refs/heads/master
| 2023-01-14T14:48:44.099272
| 2020-11-14T14:15:43
| 2020-11-14T14:15:43
| 312,815,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
# -*- coding: utf-8 -*-
from multiprocessing import Process, Manager, Lock
import time
from ctypes import c_char_p
def hoge(shared_string, lock):
while True:
string = shared_string.value
if (string != ""):
lock.acquire()
print(shared_string.value)
shared_string.value = ""
lock.release()
print("**************************")
print("**************************")
print("**************************")
print("update shared_string")
print("**************************")
print("**************************")
print("**************************")
time.sleep(0.3)
else:
print("hoge")
def main():
print("Start main()")
lock = Lock()
manager = Manager()
shared_string = manager.Value(c_char_p, "Hoge")
hoge_subprocess = Process(target=hoge, args=(shared_string, lock))
hoge_subprocess.start()
while True:
lock.acquire()
print("main")
shared_string.value = "Time:" + str(time.time()) + ", main() to hoge()."
lock.release()
time.sleep(2)
if __name__ == "__main__":
main()
|
[
"satokazu.general@gmail.com"
] |
satokazu.general@gmail.com
|
92e0b430076045d277db8c855447dd0eaf5328a7
|
000642ddd1bb9ec993ff880c143f3b1fd905f880
|
/week8/PythonExercises/second_max.py
|
6527e9cc5d1a86ac8909158c95fb2dcceafffbc4
|
[] |
no_license
|
luikaaa27/Web_Development
|
f4de99733133f9958e0b17e05c889c91f0e5da2f
|
a2e1b595edee06c18dd952ba7ccd39ec0311eceb
|
refs/heads/master
| 2023-01-13T18:46:36.030482
| 2020-04-17T21:46:50
| 2020-04-17T21:46:50
| 237,271,297
| 0
| 0
| null | 2023-01-07T17:12:34
| 2020-01-30T17:52:59
|
Python
|
UTF-8
|
Python
| false
| false
| 176
|
py
|
if __name__ == '__main__':
n = int(raw_input())
arr = map(int, raw_input().split())
new_arr = set(arr)
new_arr.remove(max(new_arr))
print(max(new_arr))
|
[
"noreply@github.com"
] |
noreply@github.com
|
c8275c0263fa17dd5c699419bd33e02aa94828bc
|
384813261c9e8d9ee03e141ba7270c48592064e9
|
/new_project/fastsklearnfeature/interactiveAutoML/new_bench/multiobjective/metalearning/openml_data/private_models/randomforest/sam_node.py
|
8fbb83881a619090a269f8cb2979875d31f3c78e
|
[] |
no_license
|
pratyushagnihotri/DFS
|
b99d87c085e67888b81c19629c338dae92272a3b
|
3b60e574905e93c24a2b883cc251ecc286cb2263
|
refs/heads/master
| 2023-04-18T22:17:36.816581
| 2021-04-20T13:41:29
| 2021-04-20T13:41:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,128
|
py
|
''' A class defining the nodes in our Differentially Private Random Decision Forest '''
from collections import defaultdict
import random
import numpy as np
import math
from scipy import stats # for Exponential Mechanism
class node:
def __init__(self, parent_node, split_value_from_parent, splitting_attribute, tree_level, id, children,
svfp_numer=None):
self._parent_node = parent_node
self._split_value_from_parent = split_value_from_parent
self._svfp_numer = svfp_numer
self._splitting_attribute = splitting_attribute
# self._level = tree_level # comment out unless needed. saves memory.
# self._id = id # comment out unless needed. saves memory.
self._children = children
self._class_counts = defaultdict(int)
self._noisy_majority = None
self._empty = 0 # 1 if leaf and has no records
self._sensitivity = -1.0
def add_child(self, child_node):
self._children.append(child_node)
def increment_class_count(self, class_value):
self._class_counts[class_value] += 1
def set_noisy_majority(self, epsilon, class_values):
if not self._noisy_majority and not self._children: # to make sure this code is only run once per leaf
for val in class_values:
if val not in self._class_counts: self._class_counts[val] = 0
if max([v for k, v in self._class_counts.items()]) < 1:
self._empty = 1
self._noisy_majority = random.choice([k for k, v in self._class_counts.items()])
return 0 # we dont want to count purely random flips
else:
all_counts = sorted([v for k, v in self._class_counts.items()], reverse=True)
count_difference = all_counts[0] - all_counts[1]
self._sensitivity = math.exp(-1 * count_difference * epsilon)
self._sens_of_sens = 1.
self._noisy_sensitivity = 1.
self._noisy_majority = self.expo_mech(epsilon, self._sensitivity, self._class_counts)
if self._noisy_majority != int(
max(self._class_counts.keys(), key=(lambda key: self._class_counts[key]))):
# print('majority: '+str(self._noisy_majority)+' vs. max_count: '+str( max(self._class_counts.keys(), key=(lambda key: self._class_counts[key]))))
return 1 # we're summing the flipped majorities
else:
return 0
else:
return 0
def laplace(self, e, counts):
noisy_counts = {}
for label, count in counts.items():
noisy_counts[label] = max(0, int(count + np.random.laplace(scale=float(1. / e))))
return int(max(noisy_counts.keys(), key=(lambda key: noisy_counts[key])))
def expo_mech(self, e, s, counts):
''' For this implementation of the Exponetial Mechanism, we use a piecewise linear scoring function,
where the element with the maximum count has a score of 1, and all other elements have a score of 0. '''
weighted = []
max_count = max([v for k, v in counts.items()])
for label, count in counts.items():
''' if the score is non-monotonic, s needs to be multiplied by 2 '''
if count == max_count:
if s < 1.0e-10:
power = 50 # e^50 is already astronomical. sizes beyond that dont matter
else:
power = min(50, (e * 1) / (2 * s)) # score = 1
else:
power = 0 # score = 0
weighted.append([label, math.exp(power)])
sum = 0.
for label, count in weighted:
sum += count
for i in range(len(weighted)):
weighted[i][1] /= sum
customDist = stats.rv_discrete(name='customDist',
values=([lab for lab, cou in weighted], [cou for lab, cou in weighted]))
best = customDist.rvs()
# print("best_att examples = "+str(customDist.rvs(size=20)))
return int(best)
|
[
"neutatz@googlemail.com"
] |
neutatz@googlemail.com
|
addce9e9601f6db6495755d3b9f0ef59ec7bae2b
|
ac6a1789722de5e37be54b39b964beef005d111d
|
/rest_registration/utils/validation.py
|
d23aac4f20409a39eba749fbb9ac6225315dc012
|
[
"MIT"
] |
permissive
|
sunbeamer/django-rest-registration
|
cd194ccf152c62802ca6f7d7a048764da8aadf8a
|
dd25b84d0151630659da4c2c17ed48d26238e006
|
refs/heads/master
| 2023-03-29T10:39:06.225559
| 2021-03-25T23:21:46
| 2021-03-25T23:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,971
|
py
|
import functools
from collections import OrderedDict
from collections.abc import Mapping
from typing import Any, Callable, Dict, Iterable, List
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError as DjangoValidationError
from django.utils.translation import gettext as _
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.settings import api_settings
from rest_registration.utils.users import (
build_initial_user,
get_user_by_verification_id
)
Validator = Callable[[Any], None]
def wrap_validation_error_with_field(field_name: str):
def decorator(func: Validator):
@functools.wraps(func)
def wrapper(value: Any) -> None:
try:
func(value)
except ValidationError as exc:
raise ValidationError({field_name: exc.detail}) from None
return wrapper
return decorator
@wrap_validation_error_with_field('password_confirm')
def validate_user_password_confirm(user_data: Dict[str, Any]) -> None:
if user_data['password'] != user_data['password_confirm']:
raise ValidationError(ErrorDetail(
_("Passwords don't match"),
code='passwords-do-not-match'),
)
@wrap_validation_error_with_field('password')
def validate_user_password(user_data: Dict[str, Any]) -> None:
password = user_data['password']
user = build_initial_user(user_data)
return _validate_user_password(password, user)
@wrap_validation_error_with_field('password')
def validate_password_with_user_id(user_data: Dict[str, Any]) -> None:
password = user_data['password']
user_id = user_data['user_id']
user = get_user_by_verification_id(user_id, require_verified=False)
return _validate_user_password(password, user)
def _validate_user_password(password, user) -> None:
try:
validate_password(password, user=user)
except DjangoValidationError as exc:
raise ValidationError(list(exc.messages)) from None
def run_validators(validators: Iterable[Validator], value: Any) -> None:
fields_errors = OrderedDict() # type: Dict[str, Any]
non_field_errors = [] # type: List[Any]
for validator in validators:
try:
validator(value)
except ValidationError as exc:
if isinstance(exc.detail, Mapping):
for field_name, field_errors in exc.detail.items():
fields_errors.setdefault(field_name, []).extend(
field_errors)
elif isinstance(exc.detail, list):
non_field_errors.extend(exc.detail)
if fields_errors:
errors = {}
errors.update(fields_errors)
errors.setdefault(
api_settings.NON_FIELD_ERRORS_KEY, []).extend(non_field_errors)
raise ValidationError(errors)
if non_field_errors:
raise ValidationError(non_field_errors)
|
[
"apragacz@o2.pl"
] |
apragacz@o2.pl
|
bbf0609a21b500bf3da4fb281afcfc2156eac9b3
|
c3d6e2bc2391733f1eb0f8c5e811990fdef00952
|
/dogs/migrations/0002_auto_20181206_1624.py
|
0f5acb206048e627a1b27e3e4597d952d346c38e
|
[] |
no_license
|
svich/k9
|
a753655aa9279b68f24f15f58c93203a77424676
|
1fab7b65ce43138cabff4674a5d26407f2663548
|
refs/heads/master
| 2020-04-09T19:53:40.626438
| 2018-12-07T09:39:47
| 2018-12-07T09:39:47
| 160,556,790
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# Generated by Django 2.1.4 on 2018-12-06 16:24
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('breeds', '0002_auto_20181206_1139'),
('dogs', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='dog',
name='id',
),
migrations.AddField(
model_name='dog',
name='breed',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='breeds.Breed'),
),
migrations.AddField(
model_name='dog',
name='name',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='dog',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
|
[
"a.svidchenkov@gmail.com"
] |
a.svidchenkov@gmail.com
|
9833b1ae185ddf9c36ccb4d70694a5f5430e2cc9
|
4750dec849243686bdfaf26bb448946134780599
|
/CNN-UNet/Training/convert_to_tiffs.py
|
162a4c4995d344ab2aa21568e8834c65c8594463
|
[] |
no_license
|
yxu233/Myelin
|
319b847db9de280564acf9146b5a46a0e8eae7f4
|
669dea7c4d77b27b75b6f9e6be4bdfe6d94aed04
|
refs/heads/master
| 2021-09-28T16:24:23.661366
| 2021-09-27T01:55:49
| 2021-09-27T01:55:49
| 143,020,497
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,995
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sunday Dec. 24th
============================================================
Converts pickled data to tiffs
@author: Tiger
"""
import tensorflow as tf
from matplotlib import *
import numpy as np
from PIL import Image
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
from natsort import natsort_keygen, ns
from skimage import measure
import pickle as pickle
import os
import zipfile
import scipy
from plot_functions import *
from data_functions import *
#from post_process_functions import *
from UNet import *
import nibabel as nib
import tkinter
from tkinter import filedialog
import os
""" Currently assumes:
R ==>
G ==> Nanofibers
B ==>
dot ==> DAPI mask
"""
scale = 0.6904
minDAPIsize = 15 / (scale * scale) # um ^ 2
root = tkinter.Tk()
input_path = filedialog.askdirectory(parent=root, initialdir="/Users/Neuroimmunology Unit/Anaconda3/AI stuff/MyelinUNet/Checkpoints/",
title='Please select checkpoint directory')
input_path = input_path + '/'
sav_dir = filedialog.askdirectory(parent=root, initialdir="/Users/Neuroimmunology Unit/Anaconda3/AI stuff/MyelinUNet/Source/",
title='Please select saving directory')
sav_dir = sav_dir + '/'
""" Load filenames from zip """
myzip, onlyfiles_mask, counter = read_zip_names(input_path, 'new_DATA_11_13.zip')
counter = list(range(len(onlyfiles_mask))) # create a counter, so can randomize it
counter = np.array(counter)
cleaned = 0
uncleaned = 0
for i in range(len(onlyfiles_mask)):
filename = onlyfiles_mask[counter[i]]
input_im, truth_im = load_training_ZIP(myzip, filename)
filename = filename.split('/')[-1]
pos = '_neg'
if truth_im[:, :, 1].any():
pos = '_pos'
print(i)
# if has nanofibers
if input_im.shape[-1] > 3:
slice_num = 3
else:
slice_num = 1
# Clean small body sizes
if np.count_nonzero(input_im[:, :, slice_num]) < minDAPIsize:
cleaned = cleaned + 1
print('Cleaned: ' + str(cleaned))
continue
# deal with the fiber channe;
if slice_num == 3 and input_im[:, :, 1].any():
nanofibers = input_im[:, :, 1]
nanofibers = Image.fromarray(input_im[:, :, 1].astype('uint8'))
nanofibers.save(sav_dir + 'myelin_' + filename + '_' + "%07d" % (uncleaned,) + pos + '_NANOFIBERS.tiff')
input_im[:, :, 1] = input_im[:, :, 3]
input_im = input_im[:, :, 0:3]
input_im = Image.fromarray(input_im.astype('uint8'))
truth_im = Image.fromarray((truth_im[:,:,1] * 255).astype('uint8'))
input_im.save(sav_dir + 'myelin_' + filename + '_' + "%07d" % (uncleaned,) + pos + '_input.tiff')
truth_im.save(sav_dir + 'myelin_' + filename + '_' + "%07d" % (uncleaned,) + pos + '_truth.tiff')
uncleaned = uncleaned + 1
|
[
"tigerxu96@hotmail.com"
] |
tigerxu96@hotmail.com
|
b3c3d93049b1e2066eff628df1956bdd155fdca1
|
570e773112f03048fc4080a066f1ab8bdd87f288
|
/hour_minute.py
|
77c0438a20bb8540f6c676bc0c35eff89a386352
|
[] |
no_license
|
shuvendra4228/learn-python
|
c0da1e0ea7188a80d97b39b933e4278f8321db10
|
c61368db0064455104b13f1d6d23df24a64e8b61
|
refs/heads/master
| 2021-09-04T02:25:47.969362
| 2018-01-14T16:43:42
| 2018-01-14T16:43:42
| 115,523,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
def hour_min(hr,min):
total_mins = hr*60 + min
return 0.5*total_mins
hour = int(input("enter hour: "))
minute = int(input("enter minute: "))
watch = hour_min(hour,minute)
print(f'hour hand moves {watch} degrees in {hour} hr and {minute} minutes')
|
[
"shuvendra4227@gmail.com"
] |
shuvendra4227@gmail.com
|
ab3912c4caa084bcab151c48ecec4d4569bd8d24
|
5ba6e51fa7745fae4c66eb3fc4b27c36b682f68f
|
/app.py
|
b74fa5c1f55903d2acc829b17709bc80983a2628
|
[] |
no_license
|
Dishi30/Book-Manager
|
9b15c3b2a9d47dfcd760ec0fd5837cbc7c02e240
|
de879c67ffbce1ece2368f06db14174ccf91dc47
|
refs/heads/main
| 2023-08-16T20:05:14.548017
| 2021-09-26T16:13:48
| 2021-09-26T16:13:48
| 410,599,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
from utils import database
user_choice = """
Enter:
-'a' to ADD new book
-'l' to LIST all book
-'r' to mark a book as READ
-'d' to DELETE a book
-'q' to QUIT
Your choice: """
def menu():
database.create_book_table()
a = input(user_choice)
while a != 'q':
if a == 'a':
add()
elif a == 'l':
list_book()
elif a == 'r':
read_book()
elif a == 'd':
del_book()
else:
print("UNKNOWN COMMAND, please try again")
a = input(user_choice)
def add():
name = input("Enter the new book name: ")
author = input("Enter the new book author: ")
database.add_book(name, author)
def list_book():
books = database.get_books()
for book in books:
read = 'YES' if book['read'] == '1' else 'NO'
print(f"{book['name']} by {book['author']}, read : {read}")
def read_book():
name = input("Enter the name of book you just finished reading: ")
database.readed(name)
def del_book():
name = input("Enter the book you want to delete: ")
database.delete(name)
menu()
|
[
"noreply@github.com"
] |
noreply@github.com
|
caadde480ede3737af17ac7035a2844eaffa7880
|
0a2f838d253ebf2a5f8b9a1ec41f011b19ebc05e
|
/cifrado/escritorio/codigo/playfair-Python/reglas/regla4/Regla4.py
|
50928ec5a31a787a23538fee629a9cb3bae312e5
|
[] |
no_license
|
HernandezGarciaDavid/disor
|
75774c21e54e338a5eb44632b7a1fbaf3dc73685
|
009d8743d2b6b22103e8e1dc95b6e448e6378d21
|
refs/heads/master
| 2020-04-11T17:32:36.533058
| 2018-12-16T05:51:58
| 2018-12-16T05:51:58
| 161,964,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from posicion.Posicion import Posicion
class Regla4():
def aplicar(self,mensaje,matriz,resultado):
if Posicion().devolverPosicion(mensaje[0],matriz,0)[1]-1 >= 0:# len(matriz[Posicion().devolverPosicion(mensaje[0],matriz,0)[0]]):
resultado+=matriz[Posicion().devolverPosicion(mensaje[0],matriz,0)[0]][Posicion().devolverPosicion(mensaje[0],matriz,0)[1]-1]
else:
resultado+=matriz[Posicion().devolverPosicion(mensaje[0],matriz,0)[0]][len(matriz[Posicion().devolverPosicion(mensaje[0],matriz,0)[0]])-1]
if Posicion().devolverPosicion(mensaje[1],matriz,0)[1]-1 >= 0:#< len(matriz[Posicion().devolverPosicion(mensaje[1],matriz,0)[0]]):
resultado+=matriz[Posicion().devolverPosicion(mensaje[1],matriz,0)[0]][Posicion().devolverPosicion(mensaje[1],matriz,0)[1]-1]
else:
resultado+=matriz[Posicion().devolverPosicion(mensaje[1],matriz,0)[0]][len(matriz[Posicion().devolverPosicion(mensaje[1],matriz,0)[0]])-1]
return resultado
|
[
"David.Hernandez@Glicware"
] |
David.Hernandez@Glicware
|
f3852d63cf9e096a53290bd0506fa35bb2c56415
|
be7b0c496920e634bfbcb5a19ed4598289f533f8
|
/Chapter 7/parrot.py
|
3c47cc463d3833ed7f2636f36cecc48a0fdbd720
|
[] |
no_license
|
AlbertRipak/PythonLearining
|
2e411ece542e98dbaf0cc5f67bb8ed4e93b0c770
|
7c1c37ccac6cc22186c7e6845aabc6e695b22669
|
refs/heads/main
| 2023-07-13T03:15:07.969555
| 2021-08-24T16:03:12
| 2021-08-24T16:03:12
| 393,701,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
#message = input("Tell me something, and I will repeat it back to you: ")
#print(message)
prompt = "\nTell me something, and I will repeat it back to you: "
prompt2 = "\nEnter 'quit' to end the program. "
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
print(prompt2)
|
[
"arspak107@gmail.com"
] |
arspak107@gmail.com
|
ab07b3c7457ca63c2c580c147a7f05ebaff5b274
|
36ed4043a3d60089e556beea6a2c889aa99082cd
|
/qiskit/quantum_info/operators/dihedral/dihedral_circuits.py
|
a27fa4699f4ef47a844bef2e69c5e828572408cd
|
[
"Apache-2.0"
] |
permissive
|
sethmerkel/qiskit-terra
|
ad5b7380f257033e200325892732b4f89d938dea
|
c13cf5e3a160cada884e99cb083645201792de4e
|
refs/heads/master
| 2023-04-12T15:07:12.816572
| 2022-06-24T14:18:21
| 2022-06-24T14:18:21
| 185,865,440
| 0
| 0
|
Apache-2.0
| 2023-03-27T10:07:00
| 2019-05-09T20:16:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,363
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Circuit simulation for the CNOTDihedral class
"""
import numpy as np
from qiskit.exceptions import QiskitError
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.barrier import Barrier
from qiskit.circuit.delay import Delay
def _append_circuit(elem, circuit, qargs=None):
"""Update a CNOTDihedral element inplace by applying a CNOTDihedral circuit.
Args:
elem (CNOTDihedral): the CNOTDihedral element to update.
circuit (QuantumCircuit or Instruction): the gate or composite gate to apply.
qargs (list or None): The qubits to apply gates to.
Returns:
CNOTDihedral: the updated CNOTDihedral.
Raises:
QiskitError: if input gates cannot be decomposed into CNOTDihedral gates.
"""
if qargs is None:
qargs = list(range(elem.num_qubits))
if isinstance(circuit, (Barrier, Delay)):
return elem
if isinstance(circuit, QuantumCircuit):
gate = circuit.to_instruction()
else:
gate = circuit
# Handle cx, cz and id since they are basic gates, and cannot be decomposed,
if gate.name == "cx":
if len(qargs) != 2:
raise QiskitError("Invalid qubits for 2-qubit gate cx.")
elem._append_cx(qargs[0], qargs[1])
return elem
elif gate.name == "cz":
if len(qargs) != 2:
raise QiskitError("Invalid qubits for 2-qubit gate cz.")
elem._append_phase(7, qargs[1])
elem._append_phase(7, qargs[0])
elem._append_cx(qargs[1], qargs[0])
elem._append_phase(2, qargs[0])
elem._append_cx(qargs[1], qargs[0])
elem._append_phase(7, qargs[1])
elem._append_phase(7, qargs[0])
return elem
if gate.name == "id":
if len(qargs) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate id.")
return elem
if gate.definition is None:
raise QiskitError(f"Cannot apply Instruction: {gate.name}")
if not isinstance(gate.definition, QuantumCircuit):
raise QiskitError(
"{} instruction definition is {}; expected QuantumCircuit".format(
gate.name, type(gate.definition)
)
)
flat_instr = gate.definition
bit_indices = {
bit: index
for bits in [flat_instr.qubits, flat_instr.clbits]
for index, bit in enumerate(bits)
}
for instruction in gate.definition:
if isinstance(instruction.operation, (Barrier, Delay)):
continue
# Get the integer position of the flat register
new_qubits = [qargs[bit_indices[tup]] for tup in instruction.qubits]
if instruction.operation.name == "x" or gate.name == "x":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate x.")
elem._append_x(new_qubits[0])
elif instruction.operation.name == "z" or gate.name == "z":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate z.")
elem._append_phase(4, new_qubits[0])
elif instruction.operation.name == "y" or gate.name == "y":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate y.")
elem._append_x(new_qubits[0])
elem._append_phase(4, new_qubits[0])
elif instruction.operation.name == "p" or gate.name == "p":
if len(new_qubits) != 1 or len(instruction.operation.params) != 1:
raise QiskitError("Invalid qubits or params for 1-qubit gate p.")
elem._append_phase(int(4 * instruction.operation.params[0] / np.pi), new_qubits[0])
elif instruction.operation.name == "t" or gate.name == "t":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate t.")
elem._append_phase(1, new_qubits[0])
elif instruction.operation.name == "tdg" or gate.name == "tdg":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate tdg.")
elem._append_phase(7, new_qubits[0])
elif instruction.operation.name == "s" or gate.name == "s":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate s.")
elem._append_phase(2, new_qubits[0])
elif instruction.operation.name == "sdg" or gate.name == "sdg":
if len(new_qubits) != 1:
raise QiskitError("Invalid qubits for 1-qubit gate sdg.")
elem._append_phase(6, new_qubits[0])
elif instruction.operation.name == "cx":
if len(new_qubits) != 2:
raise QiskitError("Invalid qubits for 2-qubit gate cx.")
elem._append_cx(new_qubits[0], new_qubits[1])
elif instruction.operation.name == "cz":
if len(new_qubits) != 2:
raise QiskitError("Invalid qubits for 2-qubit gate cz.")
elem._append_phase(7, new_qubits[1])
elem._append_phase(7, new_qubits[0])
elem._append_cx(new_qubits[1], new_qubits[0])
elem._append_phase(2, new_qubits[0])
elem._append_cx(new_qubits[1], new_qubits[0])
elem._append_phase(7, new_qubits[1])
elem._append_phase(7, new_qubits[0])
elif instruction.operation.name == "swap" or gate.name == "swap":
if len(new_qubits) != 2:
raise QiskitError("Invalid qubits for 2-qubit gate swap.")
elem._append_cx(new_qubits[0], new_qubits[1])
elem._append_cx(new_qubits[1], new_qubits[0])
elem._append_cx(new_qubits[0], new_qubits[1])
elif instruction.operation.name == "id":
pass
else:
raise QiskitError(f"Not a CNOT-Dihedral gate: {instruction.operation.name}")
return elem
|
[
"noreply@github.com"
] |
noreply@github.com
|
34f3f9198a19dce963a0fbaf29dc652cf2e4b47b
|
a35e38a12247660e4c9c9e430aa00ca69319aba9
|
/swaggerAPIClient/swagger_client/api/mapping_api.py
|
ae3b4bf94a785ca276bdd2cad5e4d1f0025302d5
|
[] |
no_license
|
tf-msdev/htm_cli
|
5106768753346b3c34ac509a342c0f6b2a34f229
|
cbd0f0d9f6c2eb7ad469ce0d22422ede405b4d0a
|
refs/heads/master
| 2020-06-22T16:26:04.356802
| 2019-09-26T18:50:54
| 2019-09-26T18:50:54
| 197,745,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71,251
|
py
|
# coding: utf-8
"""
HOT Tasking Manager API
API endpoints for the HOT tasking manager # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class MappingApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def api_v1_project_project_id_aoi_get(self, project_id, **kwargs): # noqa: E501
"""Get AOI of Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_aoi_get(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The unique project ID (required)
:param bool as_file: Set to false if file download not preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_aoi_get_with_http_info(project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_aoi_get_with_http_info(project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_aoi_get_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Get AOI of Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_aoi_get_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The unique project ID (required)
:param bool as_file: Set to false if file download not preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'as_file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_aoi_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_aoi_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
if 'as_file' in params:
query_params.append(('as_file', params['as_file'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/aoi', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_get(self, accept_language, project_id, **kwargs): # noqa: E501
"""Get HOT Project for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_get(accept_language, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str accept_language: Language user is requesting (required)
:param int project_id: The unique project ID (required)
:param bool as_file: Set to true if file download is preferred
:param bool abbreviated: Set to true if only state information is desired
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_get_with_http_info(accept_language, project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_get_with_http_info(accept_language, project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_get_with_http_info(self, accept_language, project_id, **kwargs): # noqa: E501
"""Get HOT Project for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_get_with_http_info(accept_language, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str accept_language: Language user is requesting (required)
:param int project_id: The unique project ID (required)
:param bool as_file: Set to true if file download is preferred
:param bool abbreviated: Set to true if only state information is desired
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['accept_language', 'project_id', 'as_file', 'abbreviated'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_get`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
if 'as_file' in params:
query_params.append(('as_file', params['as_file'])) # noqa: E501
if 'abbreviated' in params:
query_params.append(('abbreviated', params['abbreviated'])) # noqa: E501
header_params = {}
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_has_user_locked_tasks_details_get(self, authorization, accept_language, project_id, **kwargs): # noqa: E501
"""Gets details of any locked task on the project from logged in user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_has_user_locked_tasks_details_get(authorization, accept_language, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_has_user_locked_tasks_details_get_with_http_info(authorization, accept_language, project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_has_user_locked_tasks_details_get_with_http_info(authorization, accept_language, project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_has_user_locked_tasks_details_get_with_http_info(self, authorization, accept_language, project_id, **kwargs): # noqa: E501
"""Gets details of any locked task on the project from logged in user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_has_user_locked_tasks_details_get_with_http_info(authorization, accept_language, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization', 'accept_language', 'project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_has_user_locked_tasks_details_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_has_user_locked_tasks_details_get`") # noqa: E501
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_has_user_locked_tasks_details_get`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_has_user_locked_tasks_details_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/has-user-locked-tasks/details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_has_user_locked_tasks_get(self, authorization, project_id, **kwargs): # noqa: E501
"""Gets any locked task on the project from logged in user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_has_user_locked_tasks_get(authorization, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param int project_id: The ID of the project the task is associated with (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_has_user_locked_tasks_get_with_http_info(authorization, project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_has_user_locked_tasks_get_with_http_info(authorization, project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_has_user_locked_tasks_get_with_http_info(self, authorization, project_id, **kwargs): # noqa: E501
"""Gets any locked task on the project from logged in user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_has_user_locked_tasks_get_with_http_info(authorization, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param int project_id: The ID of the project the task is associated with (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization', 'project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_has_user_locked_tasks_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_has_user_locked_tasks_get`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_has_user_locked_tasks_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/has-user-locked-tasks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_summary_get(self, accept_language, project_id, **kwargs): # noqa: E501
"""Gets project summary # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_summary_get(accept_language, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_summary_get_with_http_info(accept_language, project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_summary_get_with_http_info(accept_language, project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_summary_get_with_http_info(self, accept_language, project_id, **kwargs): # noqa: E501
"""Gets project summary # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_summary_get_with_http_info(accept_language, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['accept_language', 'project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_summary_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_summary_get`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_summary_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
header_params = {}
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/summary', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_task_task_id_comment_post(self, body, authorization, project_id, task_id, **kwargs): # noqa: E501
"""Adds a comment to the task outside of mapping/validation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_comment_post(body, authorization, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TaskComment body: JSON object representing the comment (required)
:param str authorization: Base64 encoded session token (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_task_task_id_comment_post_with_http_info(body, authorization, project_id, task_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_task_task_id_comment_post_with_http_info(body, authorization, project_id, task_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_task_task_id_comment_post_with_http_info(self, body, authorization, project_id, task_id, **kwargs): # noqa: E501
"""Adds a comment to the task outside of mapping/validation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_comment_post_with_http_info(body, authorization, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TaskComment body: JSON object representing the comment (required)
:param str authorization: Base64 encoded session token (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'authorization', 'project_id', 'task_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_task_task_id_comment_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `api_v1_project_project_id_task_task_id_comment_post`") # noqa: E501
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_task_task_id_comment_post`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_task_task_id_comment_post`") # noqa: E501
# verify the required parameter 'task_id' is set
if ('task_id' not in params or
params['task_id'] is None):
raise ValueError("Missing the required parameter `task_id` when calling `api_v1_project_project_id_task_task_id_comment_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
if 'task_id' in params:
path_params['task_id'] = params['task_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/task/{task_id}/comment', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_task_task_id_get(self, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Get task for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_get(accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:param str authorization: Base64 encoded session token
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_task_task_id_get_with_http_info(accept_language, project_id, task_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_task_task_id_get_with_http_info(accept_language, project_id, task_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_task_task_id_get_with_http_info(self, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Get task for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_get_with_http_info(accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:param str authorization: Base64 encoded session token
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['accept_language', 'project_id', 'task_id', 'authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_task_task_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_task_task_id_get`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_task_task_id_get`") # noqa: E501
# verify the required parameter 'task_id' is set
if ('task_id' not in params or
params['task_id'] is None):
raise ValueError("Missing the required parameter `task_id` when calling `api_v1_project_project_id_task_task_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
if 'task_id' in params:
path_params['task_id'] = params['task_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/task/{task_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_task_task_id_lock_for_mapping_post(self, authorization, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Locks the task for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_lock_for_mapping_post(authorization, accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_task_task_id_lock_for_mapping_post_with_http_info(authorization, accept_language, project_id, task_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_task_task_id_lock_for_mapping_post_with_http_info(authorization, accept_language, project_id, task_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_task_task_id_lock_for_mapping_post_with_http_info(self, authorization, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Locks the task for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_lock_for_mapping_post_with_http_info(authorization, accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization', 'accept_language', 'project_id', 'task_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_task_task_id_lock_for_mapping_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_task_task_id_lock_for_mapping_post`") # noqa: E501
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_task_task_id_lock_for_mapping_post`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_task_task_id_lock_for_mapping_post`") # noqa: E501
# verify the required parameter 'task_id' is set
if ('task_id' not in params or
params['task_id'] is None):
raise ValueError("Missing the required parameter `task_id` when calling `api_v1_project_project_id_task_task_id_lock_for_mapping_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
if 'task_id' in params:
path_params['task_id'] = params['task_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/task/{task_id}/lock-for-mapping', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_task_task_id_stop_mapping_post(self, body, authorization, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Unlock task that is locked for mapping resetting it to it's last status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_stop_mapping_post(body, authorization, accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TaskUpdateStop body: JSON object for unlocking a task (required)
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_task_task_id_stop_mapping_post_with_http_info(body, authorization, accept_language, project_id, task_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_task_task_id_stop_mapping_post_with_http_info(body, authorization, accept_language, project_id, task_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_task_task_id_stop_mapping_post_with_http_info(self, body, authorization, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Unlock task that is locked for mapping resetting it to it's last status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_stop_mapping_post_with_http_info(body, authorization, accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TaskUpdateStop body: JSON object for unlocking a task (required)
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'authorization', 'accept_language', 'project_id', 'task_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_task_task_id_stop_mapping_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `api_v1_project_project_id_task_task_id_stop_mapping_post`") # noqa: E501
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_task_task_id_stop_mapping_post`") # noqa: E501
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_task_task_id_stop_mapping_post`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_task_task_id_stop_mapping_post`") # noqa: E501
# verify the required parameter 'task_id' is set
if ('task_id' not in params or
params['task_id'] is None):
raise ValueError("Missing the required parameter `task_id` when calling `api_v1_project_project_id_task_task_id_stop_mapping_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
if 'task_id' in params:
path_params['task_id'] = params['task_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/task/{task_id}/stop-mapping', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_task_task_id_undo_mapping_post(self, authorization, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Get task for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_undo_mapping_post(authorization, accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_task_task_id_undo_mapping_post_with_http_info(authorization, accept_language, project_id, task_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_task_task_id_undo_mapping_post_with_http_info(authorization, accept_language, project_id, task_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_task_task_id_undo_mapping_post_with_http_info(self, authorization, accept_language, project_id, task_id, **kwargs): # noqa: E501
"""Get task for mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_undo_mapping_post_with_http_info(authorization, accept_language, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Base64 encoded session token (required)
:param str accept_language: Language user is requesting (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization', 'accept_language', 'project_id', 'task_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_task_task_id_undo_mapping_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_task_task_id_undo_mapping_post`") # noqa: E501
# verify the required parameter 'accept_language' is set
if ('accept_language' not in params or
params['accept_language'] is None):
raise ValueError("Missing the required parameter `accept_language` when calling `api_v1_project_project_id_task_task_id_undo_mapping_post`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_task_task_id_undo_mapping_post`") # noqa: E501
# verify the required parameter 'task_id' is set
if ('task_id' not in params or
params['task_id'] is None):
raise ValueError("Missing the required parameter `task_id` when calling `api_v1_project_project_id_task_task_id_undo_mapping_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
if 'task_id' in params:
path_params['task_id'] = params['task_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
if 'accept_language' in params:
header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/task/{task_id}/undo-mapping', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_task_task_id_unlock_after_mapping_post(self, body, authorization, project_id, task_id, **kwargs): # noqa: E501
"""Unlocks the task after mapping completed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_unlock_after_mapping_post(body, authorization, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TaskUpdateUnlock body: JSON object for unlocking a task (required)
:param str authorization: Base64 encoded session token (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_task_task_id_unlock_after_mapping_post_with_http_info(body, authorization, project_id, task_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_task_task_id_unlock_after_mapping_post_with_http_info(body, authorization, project_id, task_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_task_task_id_unlock_after_mapping_post_with_http_info(self, body, authorization, project_id, task_id, **kwargs): # noqa: E501
"""Unlocks the task after mapping completed # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_task_task_id_unlock_after_mapping_post_with_http_info(body, authorization, project_id, task_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TaskUpdateUnlock body: JSON object for unlocking a task (required)
:param str authorization: Base64 encoded session token (required)
:param int project_id: The ID of the project the task is associated with (required)
:param int task_id: The unique task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'authorization', 'project_id', 'task_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_task_task_id_unlock_after_mapping_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `api_v1_project_project_id_task_task_id_unlock_after_mapping_post`") # noqa: E501
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `api_v1_project_project_id_task_task_id_unlock_after_mapping_post`") # noqa: E501
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_task_task_id_unlock_after_mapping_post`") # noqa: E501
# verify the required parameter 'task_id' is set
if ('task_id' not in params or
params['task_id'] is None):
raise ValueError("Missing the required parameter `task_id` when calling `api_v1_project_project_id_task_task_id_unlock_after_mapping_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
if 'task_id' in params:
path_params['task_id'] = params['task_id'] # noqa: E501
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/task/{task_id}/unlock-after-mapping', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_tasks_as_gpx_get(self, project_id, **kwargs): # noqa: E501
"""Get tasks as GPX # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_tasks_as_gpx_get(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The ID of the project the task is associated with (required)
:param str tasks: List of tasks; leave blank for all
:param bool as_file: Set to true if file download preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_tasks_as_gpx_get_with_http_info(project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_tasks_as_gpx_get_with_http_info(project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_tasks_as_gpx_get_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Get tasks as GPX # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_tasks_as_gpx_get_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The ID of the project the task is associated with (required)
:param str tasks: List of tasks; leave blank for all
:param bool as_file: Set to true if file download preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'tasks', 'as_file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_tasks_as_gpx_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_tasks_as_gpx_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
if 'tasks' in params:
query_params.append(('tasks', params['tasks'])) # noqa: E501
if 'as_file' in params:
query_params.append(('as_file', params['as_file'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/tasks_as_gpx', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_tasks_as_osm_xml_get(self, project_id, **kwargs): # noqa: E501
"""Get tasks as OSM XML # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_tasks_as_osm_xml_get(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The ID of the project the task is associated with (required)
:param str tasks: List of tasks; leave blank to retrieve all
:param bool as_file: Set to true if file download preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_tasks_as_osm_xml_get_with_http_info(project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_tasks_as_osm_xml_get_with_http_info(project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_tasks_as_osm_xml_get_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Get tasks as OSM XML # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_tasks_as_osm_xml_get_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The ID of the project the task is associated with (required)
:param str tasks: List of tasks; leave blank to retrieve all
:param bool as_file: Set to true if file download preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'tasks', 'as_file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_tasks_as_osm_xml_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_tasks_as_osm_xml_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
if 'tasks' in params:
query_params.append(('tasks', params['tasks'])) # noqa: E501
if 'as_file' in params:
query_params.append(('as_file', params['as_file'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/tasks-as-osm-xml', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_v1_project_project_id_tasks_get(self, project_id, **kwargs): # noqa: E501
"""Get tasks as JSON # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_tasks_get(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The ID of the project the task is associated with (required)
:param bool as_file: Set to true if file download preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_v1_project_project_id_tasks_get_with_http_info(project_id, **kwargs) # noqa: E501
else:
(data) = self.api_v1_project_project_id_tasks_get_with_http_info(project_id, **kwargs) # noqa: E501
return data
def api_v1_project_project_id_tasks_get_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Get tasks as JSON # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_project_project_id_tasks_get_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int project_id: The ID of the project the task is associated with (required)
:param bool as_file: Set to true if file download preferred
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'as_file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_project_project_id_tasks_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params or
params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `api_v1_project_project_id_tasks_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['project_id'] = params['project_id'] # noqa: E501
query_params = []
if 'as_file' in params:
query_params.append(('as_file', params['as_file'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/project/{project_id}/tasks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"v-fita@microsoft.com"
] |
v-fita@microsoft.com
|
a25334656cde1cf3ae95a292a3c4a80cbc70ed9f
|
56fa9e3f5cd63fe8b521feb9765aefc37e46f573
|
/flask-aws/bin/pycolor
|
5ed9c95581a74fa300a7eaea3e7a79752ae29eea
|
[
"MIT"
] |
permissive
|
Roshin29/notejam
|
7b4c65df2972b9efe1e8edfd9688e6760d134e3e
|
a6baf1bc5e9f898fa4001867410968bd16489306
|
refs/heads/master
| 2022-11-15T15:18:20.443453
| 2020-07-07T18:00:34
| 2020-07-07T18:00:34
| 272,704,845
| 0
| 0
|
NOASSERTION
| 2020-06-16T12:41:28
| 2020-06-16T12:41:28
| null |
UTF-8
|
Python
| false
| false
| 249
|
#!/home/ubuntu/notejam/flask-aws/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from IPython.utils.PyColorize import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"root@ip-172-31-24-171.eu-west-2.compute.internal"
] |
root@ip-172-31-24-171.eu-west-2.compute.internal
|
|
3b08686f4e2a71c8bf371a520051eda1f14a2bf7
|
ac025f20b0c72321b6f051b40c7e946b8121ebaf
|
/tests/test_hand.py
|
ee0ec76803a3f44190dc84cef70ece935f8f40c4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Simon-Lee-UK/blackjack-game
|
7c797aefab63724afbbecadc43c1510d977cfe4b
|
a3a09573621898ef70d2ea9e18d92d1f0ed16e47
|
refs/heads/master
| 2022-12-05T19:51:38.059391
| 2020-08-26T19:53:31
| 2020-08-26T19:53:31
| 273,967,918
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
"""Tests for hand objects. Run using: python -m pytest."""
def test_hand_value_three_ten(hand_13_fixture):
assert hand_13_fixture.hand_value() == [13]
def test_hand_value_single_ace(hand_1ace_fixture):
assert hand_1ace_fixture.hand_value() == [11, 21]
def test_hand_value_double_ace(hand_2ace_fixture):
assert hand_2ace_fixture.hand_value() == [2, 12, 22]
def test_hand_value_quad_ace(hand_4ace_fixture):
assert hand_4ace_fixture.hand_value() == [4, 14, 24, 34, 44]
def test_facedown_hand_value(hand_facedown_fixture):
assert hand_facedown_fixture.hand_value() == ["3 + *-*"]
def test_iter_method(hand_4ace_fixture):
for idx, _ in enumerate(hand_4ace_fixture):
None
assert idx == 3
def test_len_method(hand_4ace_fixture):
assert len(hand_4ace_fixture) == 4
def test_best_hand_value_multiple_values(hand_4ace_fixture):
assert hand_4ace_fixture.best_hand_value() == 14
def test_best_hand_value_bust(hand_bust_fixture):
assert hand_bust_fixture.best_hand_value() is None
|
[
"[simonalee96@gmail.com]"
] |
[simonalee96@gmail.com]
|
c5e7e86446a2042faffeab8a1537027b1f2e519c
|
c4223fa1db8324af96797240b3e156f9f697b547
|
/agent_code/agent_task1/train.py
|
c5040fbdee545b213041da50c29463ae446b66d2
|
[] |
no_license
|
Philipp-g/ifml-project
|
74b18933ab55a6402aa70d567ea961a641498f8c
|
34c8fc56b0cf635509921290a1e70b6a812146d6
|
refs/heads/master
| 2023-03-25T06:38:54.566525
| 2021-03-29T12:18:14
| 2021-03-29T12:18:14
| 338,555,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,696
|
py
|
import pickle
import random
from collections import namedtuple, deque
import datetime
from typing import List, Tuple
import events as e
from .callbacks import state_to_features
from . import config
import torch
from collections import defaultdict
import numpy as np
# This is only an example!
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
def setup_training(self):
"""
Initialise self for training purpose.
This is called after `setup` in callbacks.py.
:param self: This object is passed to all callbacks and you can set arbitrary values.
"""
# Example: Setup an array that will note transition tuples
# (s, a, r, s')
self.transitions = deque(maxlen=config.TRANSITION_MEM_SIZE)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.0025)
self.loss_fn = torch.nn.SmoothL1Loss()
self.loss = defaultdict(list)
def game_events_occurred(self, old_game_state: dict, self_action: str, new_game_state: dict, events: List[str]):
"""
Called once per step to allow intermediate rewards based on game events.
When this method is called, self.events will contain a list of all game
events relevant to your agent that occurred during the previous step. Consult
settings.py to see what events are tracked. You can hand out rewards to your
agent based on these events and your knowledge of the (new) game state.
This is *one* of the places where you could update your agent.
:param self: This object is passed to all callbacks and you can set arbitrary values.
:param old_game_state: The state that was passed to the last call of `act`.
:param self_action: The action that you took.
:param new_game_state: The state the agent is in now.
:param events: The events that occurred when going from `old_game_state` to `new_game_state`
"""
self.logger.debug(f'Encountered game event(s) {", ".join(map(repr, events))} in step {new_game_state["step"]}')
# Idea: Add your own events to hand out rewards
# if ...:
# events.append(PLACEHOLDER_EVENT)
# state_to_features is defined in callbacks.py
if old_game_state is None:
return
if new_game_state is None:
return
step = old_game_state["step"]
round = old_game_state["round"]
self.transitions.append(
Transition(state_to_features(old_game_state), self_action, state_to_features(new_game_state),
reward_from_events(self, events)))
if len(self.transitions) < config.MINIMAL_TRANSITION_LEN:
return
# Update target model every x steps
# if step % config.UPDATE_TARGET_STEPS == 0:
# update_target(self)
state, next_state, action, reward = sample_from_transitions(self, config.BATCH_SIZE)
td_est = td_estimate(self, state, action)
td_targ = td_target(self, reward, next_state)
loss = update_online(self, td_est, td_targ)
self.loss[round].append(loss)
def td_estimate(self, state, action):
current_Q = self.model(state, model="online")
current_Q_indexed = current_Q[torch.arange(0, config.BATCH_SIZE, dtype=torch.long), action] # .squeeze(1)]
return current_Q_indexed
def update_online(self, td_estimate, td_target):
loss = self.loss_fn(td_estimate, td_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def update_target(self):
online = self.model.online.state_dict()
self.model.target.load_state_dict(online)
@torch.no_grad()
def td_target(self, reward, next_state):
next_state_Q = self.model(next_state, model="online")
best_action = torch.argmax(next_state_Q, axis=1)
next_Q = self.model(next_state, model="target")
next_Q_indexed = next_Q[torch.arange(0, config.BATCH_SIZE, dtype=torch.long), best_action]
return (reward + config.GAMMA * next_Q_indexed).float()
def end_of_round(self, last_game_state: dict, last_action: str, events: List[str]):
"""
Called at the end of each game or when the agent died to hand out final rewards.
This is similar to reward_update. self.events will contain all events that
occurred during your agent's final step.
This is *one* of the places where you could update your agent.
This is also a good place to store an agent that you updated.
:param self: The same object that is passed to all of your callbacks.
"""
# update target model
update_target(self)
steps = last_game_state["step"]
round = last_game_state["round"]
self.logger.debug(f'Encountered event(s) {", ".join(map(repr, events))} in final step')
print(f"Round: {last_game_state['round']}, Steps: {last_game_state['step']}")
print(f"Mean Loss: {np.mean(self.loss[round])}")
print(f"Points: {last_game_state['self'][1]}")
#self.transitions.append(
# Transition(state_to_features(last_game_state), last_action, None, reward_from_events(self, events)))
# Store the model
save_path = config.SAVE_PATH + str(datetime.datetime.now())[:-7]
torch.save(self.model, save_path)
def sample_from_transitions(self, batch_size: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
batch = random.sample(self.transitions, batch_size)
states = []
next_states = []
actions = []
rewards = []
for transition in batch:
states.append(transition.state)
next_states.append(transition.next_state)
actions.append(config.ACTIONS.index(transition.action))
rewards.append(transition.reward)
state = torch.stack(tuple(states))
next_state = torch.stack(tuple(next_states))
action = torch.Tensor(actions).long() # .unsqueeze(1)
reward = torch.Tensor(rewards) # .unsqueeze(1)
if torch.cuda.is_available():
return state.cuda(), next_state.cuda(), action.cuda(), reward.cuda()
else:
return state, next_state, action, reward
def reward_from_events(self, events: List[str]) -> int:
"""
*This is not a required function, but an idea to structure your code.*
Here you can modify the rewards your agent get so as to en/discourage
certain behavior.
"""
game_rewards = {
e.COIN_COLLECTED: 100,
e.SURVIVED_ROUND: 2000,
e.WAITED: -10,
e.INVALID_ACTION: -100,
# e.KILLED_OPPONENT: 5,
# PLACEHOLDER_EVENT: -.1 # idea: the custom event is bad
}
# punish steps without events
reward_sum = -1
for event in events:
if event in game_rewards:
reward_sum += game_rewards[event]
self.logger.info(f"Awarded {reward_sum} for events {', '.join(events)}")
return reward_sum
|
[
"philipp.goeldner@gmail.com"
] |
philipp.goeldner@gmail.com
|
2ac952f31d08278c866ed2990a35fd7f970f3e15
|
fdf3aff5344271ef69ac7441c5dbca9cbf832cd1
|
/car_location/location/models/__init__.py
|
1219e9aa74d0d07e37129adcf33bba5812ee7ee2
|
[] |
no_license
|
lffsantos/DesafioPython
|
6069b3277780326611e34ae024f7506f3d56c5b4
|
fbc451b77c0310630fd95cbd23c339e194af88d1
|
refs/heads/master
| 2021-01-17T07:42:12.181187
| 2016-01-19T03:39:20
| 2016-01-19T03:39:20
| 49,730,610
| 0
| 0
| null | 2016-01-19T03:39:22
| 2016-01-15T16:25:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
__author__ = 'lucas'
from car_location.location.models import categoriaveiculo
from car_location.location.models import veiculo
from car_location.location.models import cliente
from car_location.location.models import locacao
from car_location.location.models import devolucao
|
[
"lffsantos@gmail.com"
] |
lffsantos@gmail.com
|
4404d9fc262775f54d590079477f8a1ba5b93179
|
e65ae5bd9ae1c93e7117e630f7340bc73aa71212
|
/lib/gevent/greentest/test__semaphore.py
|
480ec0e930466916a152bfe75550bf85470a4e0e
|
[
"MIT"
] |
permissive
|
nadirhamid/oneline
|
e98ff1ed81da0536f9602ecdde2fb2a4fe80d256
|
833ebef0e26ae8e0cc452756381227746d830b23
|
refs/heads/master
| 2021-01-21T04:27:41.715047
| 2016-05-30T03:50:34
| 2016-05-30T03:50:34
| 23,320,578
| 1
| 2
|
NOASSERTION
| 2020-03-12T17:22:24
| 2014-08-25T16:29:36
|
Python
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
import greentest
import gevent
from gevent.lock import Semaphore
class TestTimeoutAcquire(greentest.TestCase):
# issue 39
def test_acquire_returns_false_after_timeout(self):
s = Semaphore(value=0)
result = s.acquire(timeout=0.01)
assert result is False, repr(result)
def test_release_twice(self):
s = Semaphore()
result = []
s.rawlink(lambda s: result.append('a'))
s.release()
s.rawlink(lambda s: result.append('b'))
s.release()
gevent.sleep(0.001)
self.assertEqual(result, ['a', 'b'])
if __name__ == '__main__':
greentest.main()
|
[
"matrix.nad@gmail.com"
] |
matrix.nad@gmail.com
|
58c7af9907e90657db990a4e460eb35ea902d102
|
f3693916a8b118bf139364604dac3f51235ed613
|
/functional/Components/Authorization_System/Authorization_System_generateToken_POST/test_TC_43372_Authorizationsystems_POST_Pastdate_For_Not_Before_Time.py
|
fbedd554265220c2b614fc0c146a20e9c5d9bc1c
|
[] |
no_license
|
muktabehera/QE
|
e7d62284889d8241d22506f6ee20547f1cfe6db1
|
3fedde591568e35f7b80c5bf6cd6732f8eeab4f8
|
refs/heads/master
| 2021-03-31T02:19:15.369562
| 2018-03-13T02:45:10
| 2018-03-13T02:45:10
| 124,984,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,941
|
py
|
# -*- coding: UTF-8 -*-
"""PFE Component Tests - Authorization_Systems.
* TC-43372 - Authorization_Systems POST:
Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken ".
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/authorizationSystems/<data_ID1_under_test>/generateToken"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/authorizationSystems/ab.qumu.com/generateToken"
JSON data sent to PathFinder in this test:
{'audience': 'qed:a1',
'expirationTime': '2017-09-30T06:10:50.714Z',
'generatedToken': 'string',
'issueTime': '2016-01-29T06:10:50.714Z',
'macKey': '123456789012345678901234567890121',
'notBeforeTime': '2017-09-20T06:10:50.714Z',
'permissions': ['MANAGE_SYSTEM', 'MANAGE_CONFIGURATION'],
'qeda': {},
'qedp': {},
'subject': 'sub1',
'url': '',
'useCompactPermissions': True}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.draft # remove this after script passed unit tests successfuly
@pytest.mark.components
@pytest.allure.story('Authorization_Systems')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Authorization_Systems test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-43372')
@pytest.mark.Authorization_Systems
@pytest.mark.POST
def test_TC_43372_POST_Authorization_Systems_Pastdate_For_Not_Before_Time(self, context):
"""TC-43372 - Authorization_Systems-POST
Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken "."""
# Define a test step
with pytest.allure.step("""Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken "."""):
### Positive test example
# Test case configuration
tokenGenerationDetails = context.sc.TokenGenerationDetails(
audience='qed:a1',
expirationTime='2017-09-30T06:10:50.714Z',
generatedToken='string',
issueTime='2016-01-29T06:10:50.714Z',
jwtId=None,
macKey='123456789012345678901234567890121',
notBeforeTime='2017-09-20T06:10:50.714Z',
permissions=['MANAGE_SYSTEM', 'MANAGE_CONFIGURATION'],
qeda={},
qedp={},
referrer=None,
subject='sub1',
url='',
useCompactPermissions=True)
# generateToken the Authorization_Systems.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Authorization_Systems.generateToken(
id='generateToken',
body=tokenGenerationDetails
)
)
### Can add tests here to validate the response content
with pytest.allure.step("""Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken "."""):
### Negative test example
# Test case configuration
tokenGenerationDetails = context.sc.TokenGenerationDetails(
audience='qed:a1',
expirationTime='2017-09-30T06:10:50.714Z',
generatedToken='string',
issueTime='2016-01-29T06:10:50.714Z',
jwtId=None,
macKey='123456789012345678901234567890121',
notBeforeTime='2017-09-20T06:10:50.714Z',
permissions=['MANAGE_SYSTEM', 'MANAGE_CONFIGURATION'],
qeda={},
qedp={},
referrer=None,
subject='sub1',
url='',
useCompactPermissions=True)
# prepare the request, so we can modify it
request = context.cl.Authorization_Systems.generateToken(
id='generateToken',
body=tokenGenerationDetails
)
### Invalid JSON Error injection example
### Errors that result in valid JSON can be configured above.
### Otherwise, uncomment the code below (request.future....)
# Get the generated payload and corrupt the metric
# request.future.request.data = request.future.request.data.replace(
# '"metric": 1,', '"metric":,'
# )
# generateToken the Authorization_Systems, and check we got the error we expect
try:
client, response = check(
request,
quiet=True, returnResponse=True
)
except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error
get_error_message(e) | expect.any(
should.start_with('may not be empty'),
should.start_with('Invalid page parameter specified'),
should.contain('Invalid Authorization Token')
)
else:
raise Exception(
"Expected error message, got {} status code instead.".format(
response.status_code))
|
[
"mbehera@qumu.com"
] |
mbehera@qumu.com
|
9b86763b34bce30afdb20d256f2e76972cc7a3ed
|
06919b9fd117fce042375fbd51d7de6bb9ae14fc
|
/py/tests/problems/hashtable/sparse_matrix.py
|
6ffec84f318bf38c68b1a11b7e3818d670628f49
|
[
"MIT"
] |
permissive
|
bmoretz/Daily-Coding-Problem
|
0caf2465579e81996869ee3d2c13c9ad5f87aa8f
|
f79e062e9f6e7b18b7e95c071fbe71ad104affcb
|
refs/heads/master
| 2022-12-07T15:41:06.498049
| 2021-11-18T19:45:19
| 2021-11-18T19:45:19
| 226,376,236
| 1
| 0
|
MIT
| 2022-11-22T09:20:23
| 2019-12-06T17:17:00
|
C++
|
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
import unittest
from dcp.problems.hashtable.sparse_matrix import SparseMatrix
class Test_SparseMatrix(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
mat = [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1]]
n, m = 100, 100
sm = SparseMatrix(mat, n, m)
non_zero = [[5, 13, 19, 28], \
[5, 13, 19, 28], \
[5, 13, 19, 28], \
[5, 13, 19, 28], \
[5, 13, 19, 28]]
for n, row in enumerate(mat):
for m, _ in enumerate(row):
if m in non_zero[n]:
assert sm.get(n, m) != 0
else:
assert sm.get(n, m) == 0
|
[
"bmoretz82@gmail.com"
] |
bmoretz82@gmail.com
|
bb86e58235679dd269f98bd712f8895d48d43d34
|
b503b00b34d4581959ed7534aea4ea196f99d7f7
|
/mstar_picker.py
|
acd7345d465ed7cd5194ad6d3cc8fa929f02c8ff
|
[] |
no_license
|
afitts/analysis_scripts
|
1f76feff7213a26a010e136dea9c5375f447f513
|
e02678a2ecbb9628937cceb0a958e19b5b540597
|
refs/heads/master
| 2021-01-22T20:07:43.033514
| 2017-08-03T13:54:06
| 2017-08-03T13:54:06
| 49,749,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,198
|
py
|
import numpy as np
import sys
import glob
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as co
import pylab
import pygadgetreader as pg
import scipy.integrate
import time
import scipy.stats as stats
import pandas as pd
import scipy.optimize as opt
from matplotlib import rcParams
import matplotlib.animation as animation
from matplotlib.ticker import FixedLocator
from matplotlib.ticker import FixedFormatter
def _a_dot(a, h0, om_m, om_l):
om_k = 1.0 - om_m - om_l
return h0 * a * np.sqrt(om_m * (a ** -3) + om_k * (a ** -2) + om_l)
def _a_dot_recip(*args):
return 1. / _a_dot(*args)
hnum =['848','1016','32503']#['11707','32503','12596','007','848','796','20192','20910','897','948','1016','32257','10q','10v','1084']
res = ['_13','_13','_13','_11','_13','_13','_13','_13','_13','_13','_13','_13','_11','_11','_13']
ver = ['5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16','5_12_16']
snap = [184,184,184,184,184,184,184,184,184,184,184,184,600,600,184]
for i in np.arange(len(hnum)):
print 'HALO ',hnum[i]
for k in np.arange(185):
a = pd.HDFStore('/nobackup/afitts/Gadget-2.0.7/production/mfm%s%s_giz%s_raw_output/analysis/dataframes/halo%s%s_giz%s_snap%03d.h5'%(hnum[i],res[i],ver[i],hnum[i],res[i],ver[i],k))
try:
pid = a['particles/star'].index.values
mass = a['particles/star']['mass']
x = a['particles/star']['x'].as_matrix()*1000/.71
y = a['particles/star']['y'].as_matrix()*1000/.71
z = a['particles/star']['z'].as_matrix()*1000/.71
vx = a['particles/star']['vx'].as_matrix()*1000
vy = a['particles/star']['vy'].as_matrix()*1000
vz = a['particles/star']['vz'].as_matrix()*1000
he = a['particles/star']['metal_He'].as_matrix()
fe = a['particles/star']['metal_Fe'].as_matrix()
metal = a['particles/star']['metal_tot'].as_matrix()
numH = mass*(1-(metal+he))/(1.6733e-24/1.99e33)
numFe = mass*fe/(9.27e-23/1.99e33)
meta = numFe/numH
nofloor = meta[meta>4.54877795e-09]
avgnum = np.mean(nofloor)
fullmetal = np.log10(meta)+4.5
metal = np.log10(avgnum)+4.5
sft = a['particles/star']['sft'].as_matrix()
YEAR = 60*60*24*365.
h0 = 71
om_l = 0.734
om_m = 0.266
conv = 3.085677581e+19
for j in np.arange(len(sft)):
sft[j] = 13.736583-scipy.integrate.quad(_a_dot_recip, 0, sft[j], (h0, om_m, om_l))[0]*conv/1e9/YEAR
time = np.zeros(len(sft))
time += np.float(a['props']['time'])
np.savetxt('Halo%s_%03d_stars.out'%(hnum[i],k),np.column_stack((pid,mass,x,y,z,vx,vy,vz,sft,fullmetal,time)),header = '(0) ID (1) MASS (M_sun) (2) X (kpc) (3) Y (4) Z (5) Vx (km/s) (6) Vy (7) Vz (8) Age (Gyr) (9) [Fe/H] (10) Time of snapshot (Gyr)')
print k
except Exception,e:
print e,'No stars'
try:
time = np.float(a['props']['time'])
np.savetxt('Halo%s_%03d_stars.out'%(hnum[i],k),np.column_stack((0,0,0,0,0,0,0,0,0,0,time)),header = '(0) ID (1) MASS (M_sun) (2) X (kpc) (3) Y (4) Z (5) Vx (km/s) (6) Vy (7) Vz (8) Age (Gyr) (9) [Fe/H] (10) Time of snapshot (Gyr)')
except:
print 'No time'
a.close()
|
[
"fitts.alex@gmail.com"
] |
fitts.alex@gmail.com
|
e187b8c37156b8dcca7888e92b0a8c5bbcc7dfff
|
818edca070b068345b23bd80f76ab95238d6fb43
|
/yiaygenerator/settings.py
|
2907bacf0e8cc98deea121255babcdf07bee8562
|
[
"MIT"
] |
permissive
|
PaperBag42/yiaygenerator
|
39f4595d252c55074edb2e9ff36d08f25ef5746e
|
2aacff5624676d8df6b9544309f33a8f9ab32799
|
refs/heads/master
| 2020-03-29T00:32:58.361790
| 2019-04-27T16:44:31
| 2019-04-27T16:44:31
| 149,344,832
| 0
| 1
| null | 2019-02-06T21:06:51
| 2018-09-18T19:58:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,555
|
py
|
"""
Django settings for yiaygenerator project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from os import path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'yiaygenerator',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yiaygenerator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yiaygenerator.wsgi.application'
ASGI_APPLICATION = 'yiaygenerator.routing.application'
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
#
# 'formatters': {
# 'clip': {
# 'format': '{levelname}:{name}:YIAY#{se',
# 'style': '{',
# },
# },
# 'loggers': {
# '': {
#
# },
# },
# }
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"ben.zichri@gmail.com"
] |
ben.zichri@gmail.com
|
890df7bc7ac818f73e723fd60e31348000e90d75
|
2c763b37baff67006bc8612868b2af4214e26a9d
|
/news_blog/views.py
|
853114b524d7b453f4c804c4e8a485956b868b0b
|
[] |
no_license
|
Nikkeborn/news_blog_release
|
b4f95ad9e5534e562a3571ab7ab305317f51f5f0
|
2edbbba6bc4d904d027e3b5ea467fb4126a0f06f
|
refs/heads/master
| 2021-09-25T19:41:54.221678
| 2020-04-16T14:53:11
| 2020-04-16T14:53:11
| 244,409,782
| 0
| 0
| null | 2021-09-22T18:40:29
| 2020-03-02T15:46:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
from django.shortcuts import render, redirect, reverse
from django.views.generic import View
from django.contrib.auth.views import LoginView
from django.contrib.auth.models import User
from .forms import UserForm
class Login_(LoginView):
def get_success_url(self):
user_name = self.request.POST.get('username')
user_object = User.objects.get(username=user_name)
user_id = user_object.pk
return reverse('blog:author_details', kwargs={'pk': user_id})
class UserRegistration(View):
form = UserForm
template_name = 'registration/user_form.html'
def get(self, request):
return render(request, self.template_name, context={'form': self.form})
def post(self, request):
bound_form = self.form(request.POST)
if bound_form.is_valid():
new_user = bound_form.save(commit=False)
user_pass = new_user.password
new_user.set_password(user_pass)
new_user.save()
user_id = new_user.pk
return redirect(reverse('blog:author_details', kwargs={'pk': user_id}))
return render(request, self.template_name, context={'form': self.form})
|
[
"loomvolum@gmail.com"
] |
loomvolum@gmail.com
|
44ed7aab029125950b6f5f506929e89f4de0dcdf
|
6968c7f9d2b20b5296663829f99a27d184a59fc1
|
/autodisc/autodisc/gui/jupyter/imagelistwidget.py
|
b6152e78a221de869f622b5b6696836db4ed377e
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
flowersteam/automated_discovery_of_lenia_patterns
|
d42dff37323d51732571b33845c0562d844f498f
|
97cc7cde2120fa95225d1e470e00b8aa8c034e97
|
refs/heads/master
| 2020-06-29T07:08:58.404541
| 2020-05-14T07:37:10
| 2020-05-14T07:37:10
| 200,470,902
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,855
|
py
|
import autodisc as ad
import ipywidgets
import numpy as np
import IPython.display
class ImageListWidget(ipywidgets.VBox):
@staticmethod
def get_default_gui_config():
default_config = ad.Config()
default_config.elements_per_page = 100
default_config.output_layout = ad.Config()
# default_config.output_layout.border='3px solid black'
default_config.box_layout = ad.Config()
default_config.box_layout.overflow_y = 'scroll'
default_config.box_layout.width = '100%'
default_config.box_layout.height = '500px'
default_config.box_layout.flex_flow = 'row wrap'
default_config.box_layout.display = 'flex'
default_config.content_ouput = ad.Config()
default_config.page_label = ad.Config()
default_config.page_selection = ad.Config()
default_config.page_selection.description = 'Page: '
default_config.previous_page_button = ad.Config()
default_config.previous_page_button.description = '<'
default_config.previous_page_button.layout = ad.Config()
default_config.previous_page_button.layout.width = '20px'
default_config.next_page_button = ad.Config()
default_config.next_page_button.description = '>'
default_config.next_page_button.layout = ad.Config()
default_config.next_page_button.layout.width = '20px'
default_config.button_box = ad.Config()
default_config.button_box.layout = ad.Config()
default_config.button_box.layout.flex_flow = 'row'
default_config.button_box.layout.display = 'flex'
default_config.button_box.layout.align_items = 'center'
default_config.button_box.layout['justify-content'] = 'flex-end'
default_config.button_box.layout.width = '100%'
default_config.image_items = ad.Config()
default_config.image_items.layout = ad.Config()
default_config.image_items.layout.height = '200px'
default_config.image_items.layout.width = '200px'
default_config.image_items.layout.border = '2px solid white'
default_config.image_captions = ad.Config()
return default_config
def __init__(self, images=None, config=None, **kwargs):
self.config = ad.config.set_default_config(kwargs, config, ImageListWidget.get_default_gui_config())
self.images = None
self.main_box = None
self.content_ouput_widget = ipywidgets.Output(**self.config.content_ouput)
self.page_label_widget = ipywidgets.Label(**self.config.page_label, value='of 0')
self.previous_page_button_widget = ipywidgets.Button(**self.config.previous_page_button)
self.previous_page_button_widget.on_click(self.on_prev_page_button_click)
self.page_selection_widget = ipywidgets.Dropdown(**self.config.page_selection)
self.page_selection_widget.observe(self.on_page_selection_change)
self.next_page_button_widget = ipywidgets.Button(**self.config.next_page_button)
self.next_page_button_widget.on_click(self.on_next_page_button_click)
self.page_selection_widget_ignore_next_value_change = False
self.button_box_widget = ipywidgets.Box(
[self.page_selection_widget,
self.page_label_widget,
self.previous_page_button_widget,
self.next_page_button_widget],
**self.config.button_box
)
super().__init__([self.content_ouput_widget, self.button_box_widget], layout=self.config.output_layout)
self.cur_page_idx = 0
if images is not None:
self.update(images)
def update(self, images, captions=None):
self.images = images
self.captions = captions
if self.images is not None and self.images:
# update page selection widget
n_pages = int(np.ceil(len(self.images) / self.config.elements_per_page))
opts = [page_idx + 1 for page_idx in range(n_pages)]
self.page_selection_widget.options = opts
# update number of pages
self.page_label_widget.value = 'of {}'.format(n_pages)
self.update_page_items(0, force_update=True)
else:
self.page_selection_widget.options = []
self.page_label_widget.value = 'of 0'
self.content_ouput_widget.clear_output()
def update_page_items(self, page_idx, force_update=False):
if self.images is not None and self.images:
n_pages = int(np.ceil(len(self.images) / self.config.elements_per_page))
if n_pages == 0:
self.content_ouput_widget.clear_output()
elif page_idx >= 0 and page_idx < n_pages and (self.cur_page_idx != page_idx or force_update):
items = []
self.cur_page_idx = page_idx
start_idx = self.config.elements_per_page * self.cur_page_idx
end_idx = min(self.config.elements_per_page * (self.cur_page_idx + 1), len(self.images))
for image_idx in range(start_idx, end_idx):
image = self.images[image_idx]
item_elems = []
if self.captions is not None:
if image_idx < len(self.captions):
caption_text = self.captions[image_idx]
else:
caption_text = ''
caption_widget = ipywidgets.Label(
value=caption_text,
**self.config.image_captions
)
item_elems.append(caption_widget)
img_widget = ipywidgets.Image(
value=image,
format='png',
**self.config.image_items
)
item_elems.append(img_widget)
items.append(ipywidgets.VBox(item_elems))
self.main_box = ipywidgets.Box(items, layout=self.config.box_layout)
self.content_ouput_widget.clear_output(wait=True)
with self.content_ouput_widget:
IPython.display.display(self.main_box)
self.page_selection_widget.value = page_idx + 1
else:
self.content_ouput_widget.clear_output()
def on_prev_page_button_click(self, button):
self.update_page_items(self.cur_page_idx - 1)
def on_next_page_button_click(self, button):
self.update_page_items(self.cur_page_idx + 1)
def on_page_selection_change(self, change):
if change['type'] == 'change' and change['name'] == 'value':
if self.page_selection_widget.value is not None:
self.update_page_items(self.page_selection_widget.value - 1)
|
[
"chris.reinke@inria.fr"
] |
chris.reinke@inria.fr
|
91827b433498925f9c557663f2ffc3ab84e0f02d
|
2d83c1084de8b53cbe78638b5672a8a65a3659d5
|
/.venv/bin/isort
|
da12d4435288dd048c85d79b73422bd36f53479c
|
[] |
no_license
|
Ulrika90/Hello
|
650a778c120f7a42d99138e7e34afb7dcc9504f2
|
496252f9fc75a86e99a40c035f2ad99e4e1decf2
|
refs/heads/master
| 2020-06-24T03:32:49.818152
| 2019-07-25T13:23:31
| 2019-07-25T13:23:31
| 198,835,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
#!/Users/ulrikawestholm/Git/Python/hello/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ulrikawestholm@Ulrikas-MacBook-Air.local"
] |
ulrikawestholm@Ulrikas-MacBook-Air.local
|
|
bbb33c2583e79f2ebfcf80477d93aa479721526b
|
648f742d6db2ea4e97b83c99b6fc49abd59e9667
|
/common/vault/oas/models/contracts_smart_contract.py
|
d5d7062b8e4de5bd91efe115deb981877b802760
|
[] |
no_license
|
jmiller-tm/replit
|
c56ce63718f6eb2d9b53bd09d3f7b3ef3496cb86
|
c8e6af3268c4ef8da66516154850919ea79055dc
|
refs/heads/main
| 2023-08-30T00:49:35.738089
| 2021-11-16T23:09:08
| 2021-11-16T23:09:08
| 428,809,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,856
|
py
|
# coding: utf-8
"""
vault/kernel/core_api/proto/v1/accounts/core_api_account_schedule_tags.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ContractsSmartContract(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'smart_contract_param_vals': 'dict(str, str)',
'smart_contract_version_id': 'str'
}
attribute_map = {
'code': 'code',
'smart_contract_param_vals': 'smart_contract_param_vals',
'smart_contract_version_id': 'smart_contract_version_id'
}
def __init__(self, code=None, smart_contract_param_vals=None, smart_contract_version_id=None): # noqa: E501
"""ContractsSmartContract - a model defined in Swagger""" # noqa: E501
self._code = None
self._smart_contract_param_vals = None
self._smart_contract_version_id = None
self.discriminator = None
if code is not None:
self.code = code
if smart_contract_param_vals is not None:
self.smart_contract_param_vals = smart_contract_param_vals
if smart_contract_version_id is not None:
self.smart_contract_version_id = smart_contract_version_id
@property
def code(self):
"""Gets the code of this ContractsSmartContract. # noqa: E501
Source code of the Smart Contract that is to be simulated. # noqa: E501
:return: The code of this ContractsSmartContract. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ContractsSmartContract.
Source code of the Smart Contract that is to be simulated. # noqa: E501
:param code: The code of this ContractsSmartContract. # noqa: E501
:type: str
"""
self._code = code
@property
def smart_contract_param_vals(self):
"""Gets the smart_contract_param_vals of this ContractsSmartContract. # noqa: E501
Values for the Smart Contract parameters. # noqa: E501
:return: The smart_contract_param_vals of this ContractsSmartContract. # noqa: E501
:rtype: dict(str, str)
"""
return self._smart_contract_param_vals
@smart_contract_param_vals.setter
def smart_contract_param_vals(self, smart_contract_param_vals):
"""Sets the smart_contract_param_vals of this ContractsSmartContract.
Values for the Smart Contract parameters. # noqa: E501
:param smart_contract_param_vals: The smart_contract_param_vals of this ContractsSmartContract. # noqa: E501
:type: dict(str, str)
"""
self._smart_contract_param_vals = smart_contract_param_vals
@property
def smart_contract_version_id(self):
"""Gets the smart_contract_version_id of this ContractsSmartContract. # noqa: E501
The ID that will be used as the Smart Contract ID in the simulation and can be referenced by the simulation instructions. # noqa: E501
:return: The smart_contract_version_id of this ContractsSmartContract. # noqa: E501
:rtype: str
"""
return self._smart_contract_version_id
@smart_contract_version_id.setter
def smart_contract_version_id(self, smart_contract_version_id):
"""Sets the smart_contract_version_id of this ContractsSmartContract.
The ID that will be used as the Smart Contract ID in the simulation and can be referenced by the simulation instructions. # noqa: E501
:param smart_contract_version_id: The smart_contract_version_id of this ContractsSmartContract. # noqa: E501
:type: str
"""
self._smart_contract_version_id = smart_contract_version_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ContractsSmartContract, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContractsSmartContract):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"jmiller@jmiller-tm00769-mbp.nomad.thomac.net"
] |
jmiller@jmiller-tm00769-mbp.nomad.thomac.net
|
a3bd9f7287b261d6b7e3c747f1d10e15bca2a1c1
|
2855f26e603ec7bf5b18876b54b75ee4577bdf2c
|
/witdraw/forms.py
|
65f12090c349714d0754149c7cec48b2f49658bc
|
[] |
no_license
|
zkenstein/ppob_multipay_v2
|
e8ea789c395c6fa5b83ba56fbaf5ea08a2a77a14
|
85296f925acf3e94cc371637805d454581391f6e
|
refs/heads/master
| 2022-03-04T13:53:30.893380
| 2019-11-16T22:49:50
| 2019-11-16T22:49:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
from django import forms
from django.contrib.auth.models import User
from .models import Witdraw
MIN_COMMISION = 10000
class WitdrawForm(forms.ModelForm):
class Meta:
model = Witdraw
fields = [
'create_by', 'amount'
]
def __init__(self, *args, **kwargs):
super(WitdrawForm, self).__init__(*args, **kwargs)
self.fields['create_by'].queryset = User.objects.filter(
profile__user_type=2
)
def clean_amount(self):
amount = self.cleaned_data.get('amount')
if amount < MIN_COMMISION:
raise forms.ValidationError('Monimal withdraw 10.000')
return amount
def clean_create_by(self):
usr = self.cleaned_data.get('create_by')
if usr.profile.user_type != 2:
raise forms.ValidationError('User is not an agen')
if usr.profile.ponsel is None or usr.profile.ponsel == '':
raise forms.ValidationError('Ponsel canot be empty')
if usr.profile.wallet.commision < MIN_COMMISION:
raise forms.ValidationError('Commision not enought to withdraw')
return usr
|
[
"anderi.setiawan@gmail.com"
] |
anderi.setiawan@gmail.com
|
b73222ef2351dbea7ab1c2b6235bf83274de39e7
|
f3c98241adce87b9fb480647fe35318d979dfe0e
|
/src/amarket/utils/test_timeUtils.py
|
7866180b995f5344b195517a01e4e0e2bb0f2bd3
|
[] |
no_license
|
qwteng/Amarket
|
4fb5915f77666e0b86513d697d83fcb10f90df51
|
20929a0d53908bb716d5583beaf24263a868c946
|
refs/heads/master
| 2023-05-13T07:28:38.966534
| 2019-10-07T13:59:18
| 2019-10-07T13:59:18
| 164,220,620
| 0
| 0
| null | 2023-05-01T21:15:41
| 2019-01-05T14:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
# coding=utf-8
import pytest
from .timeUtils import *
def test_get_workday_backward():
assert get_workday('20190325', False) == '20190325'
assert get_workday('20190324', False) == '20190322'
assert get_workday('20190323', False) == '20190322'
assert get_workday('20190322', False) == '20190322'
def test_get_workday_forward():
assert get_workday('20190325', True) == '20190325'
assert get_workday('20190324', True) == '20190325'
assert get_workday('20190323', True) == '20190325'
assert get_workday('20190322', True) == '20190322'
def test_get_workday_offset():
assert get_workday('20190325', 0) == '20190325'
assert get_workday('20190325', 1) == '20190326'
assert get_workday('20190325', -1) == '20190322'
assert get_workday('20190325', 5) == '20190401'
assert get_workday('20190324', 0) == '20190322'
assert get_workday('20190324', 1) == '20190325'
assert get_workday('20190324', -1) == '20190321'
assert get_workday('20190324', 5) == '20190329'
|
[
"qwteng@qq.com"
] |
qwteng@qq.com
|
c19b81e1c8eeff010b5280346d1a9652be1aa501
|
d53ad9afe037a750b1418e69c668e7f167af48ba
|
/scripts/Rent_ntbk_util.py
|
46d0ad574cb183ec0b5037cee1ca052fe7d008d1
|
[] |
no_license
|
priyachotalia/Group_Project
|
e0ed654730ea92bab1902de15a5ce2a40e2177a7
|
1fb7789cafb002061862bfa5155ed62552b5bc5a
|
refs/heads/master
| 2022-12-21T23:43:52.162406
| 2020-09-20T11:11:35
| 2020-09-20T11:11:35
| 297,054,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,135
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
rent_df=pd.read_csv("data/Rent_Data.csv")
rent_df.columns = ["Year", "Quarter", "LGA", "Dwelling Type", "Bedroom Number",
"First QNB", "Second QNB", "Third QNB", "NB Lodged", "TB Lodged", "Qtrly Median Change",
"Annly Median Change", "Qtrly NB", "Annly NB"]
### DATA CLEANING ###
rent_df = rent_df.replace("-", np.nan)
rent_df["First QNB"] = rent_df["First QNB"].astype(str).astype(float)
rent_df["Second QNB"] = rent_df["Second QNB"].astype(str).astype(float)
rent_df["Third QNB"] = rent_df["Third QNB"].astype(str).astype(float)
## NAN values are replaced with min-1
f_min_val = np.min(rent_df["First QNB"]) - 1
s_min_val = np.min(rent_df["Second QNB"]) - 1
t_min_val = np.min(rent_df["Third QNB"]) - 1
null = []
for x in rent_df.index:#range(len(rent_df.index)):
null_row = rent_df.loc[x].isnull().sum()
if null_row == 9:
null.append(x)
rent_df.loc[x] = rent_df.loc[x].replace(np.nan, 0)
## NAN values are replaced with min-1
rent_df["First QNB"] = rent_df["First QNB"].replace(np.nan, f_min_val)
rent_df["Second QNB"] = rent_df["Second QNB"].replace(np.nan, s_min_val)
rent_df["Third QNB"] = rent_df["Third QNB"].replace(np.nan, t_min_val)
## replace s with 1
rent_df = rent_df.replace("s", 1)
rent_df["NB Lodged"] = rent_df["NB Lodged"].astype(str).astype(float)
rent_df["TB Lodged"] = rent_df["TB Lodged"].astype(str).astype(float)
### NAN values are replaced with 0 ###
## s values which mean really small values are replaced with min/2
fq_min_val = np.min(rent_df["NB Lodged"])
rent_df["NB Lodged"] = rent_df["NB Lodged"].replace(1, fq_min_val)/2
rent_df["NB Lodged"] = rent_df["NB Lodged"].replace(np.nan, 0)
sq_min_val = np.min(rent_df["TB Lodged"])
rent_df["TB Lodged"] = rent_df["TB Lodged"].replace(1, sq_min_val)/2
rent_df["TB Lodged"] = rent_df["TB Lodged"].replace(np.nan, 0)
## float to int
rent_df["NB Lodged"] = rent_df["NB Lodged"].astype(int)
rent_df["TB Lodged"] = rent_df["TB Lodged"].astype(int)
rent_df["First QNB"] = rent_df["First QNB"].astype(int)
rent_df["Second QNB"] = rent_df["Second QNB"].astype(int)
rent_df["Third QNB"] = rent_df["Third QNB"].astype(int)
## remove % sign
rent_df['Qtrly Median Change'] = rent_df['Qtrly Median Change'].astype(str).str.extract('(\d+)').astype(float)
rent_df['Annly Median Change'] = rent_df['Annly Median Change'].astype(str).str.extract('(\d+)').astype(float)
rent_df['Qtrly NB'] = rent_df['Qtrly NB'].astype(str).str.extract('(\d+)').astype(float)
rent_df['Annly NB'] = rent_df['Annly NB'].astype(str).str.extract('(\d+)').astype(float)
## replace nan values for the last 4 columns with 0
rent_df = rent_df.replace(np.nan, 0)
price_df = rent_df[['LGA', 'Dwelling Type', 'Bedroom Number', 'First QNB', 'Second QNB', 'Third QNB']].copy()
price_df['mean'] = price_df.mean(axis=1)
rent_df['Mean QNB'] = np.nan
rent_df['Mean QNB'] = price_df['mean']
rent_df['Mean QNB'] = rent_df['Mean QNB'].astype(int)
### GET TOP LGA ###
## remove Total column from LGA, and get unique LGA values
sort_df = rent_df[rent_df['LGA']!='Total']
top_mean = sort_df.sort_values(by="Mean QNB", ascending = False)['LGA'].unique()[:10]
top_mean_arr = []
## Get the Mean QNB per quarter and year of top LGA for graphing
for x in range(len(top_mean)):
name = top_mean[x]
top_mean_df = rent_df[rent_df['LGA']==top_mean[x]].sort_values(by="Mean QNB", ascending = False)
top_mean_df = top_mean_df[top_mean_df['Dwelling Type'] == 'Total']
top_mean_df = top_mean_df[top_mean_df['Bedroom Number'] == 'Total']
top_mean_df = top_mean_df.sort_values(by=['Year', 'Quarter'])
top_mean_df = top_mean_df.reset_index()
for y in top_mean_df.index:
if y == 0:
Q1_15 = top_mean_df['Mean QNB'].loc[y]
if y == 1:
Q2_15 = top_mean_df['Mean QNB'].loc[y]
if y == 2:
Q3_15 = top_mean_df['Mean QNB'].loc[y]
if y == 3:
Q4_15 = top_mean_df['Mean QNB'].loc[y]
if y == 4:
Q1_16 = top_mean_df['Mean QNB'].loc[y]
if y == 5:
Q2_16 = top_mean_df['Mean QNB'].loc[y]
if y == 6:
Q3_16 = top_mean_df['Mean QNB'].loc[y]
if y == 7:
Q4_16 = top_mean_df['Mean QNB'].loc[y]
if y == 8:
Q1_17 = top_mean_df['Mean QNB'].loc[y]
if y == 9:
Q2_17 = top_mean_df['Mean QNB'].loc[y]
if y == 10:
Q3_17 = top_mean_df['Mean QNB'].loc[y]
if y == 11:
Q4_17 = top_mean_df['Mean QNB'].loc[y]
if y == 12:
Q1_18 = top_mean_df['Mean QNB'].loc[y]
if y == 13:
Q2_18 = top_mean_df['Mean QNB'].loc[y]
if y == 14:
Q3_18 = top_mean_df['Mean QNB'].loc[y]
if y == 15:
Q4_18 = top_mean_df['Mean QNB'].loc[y]
if y == 16:
Q1_19 = top_mean_df['Mean QNB'].loc[y]
if y == 17:
Q2_19 = top_mean_df['Mean QNB'].loc[y]
top_mean_arr.append((name, Q1_15, Q2_15, Q3_15, Q4_15, Q1_16, Q2_16, Q3_16, Q4_16,
Q1_17, Q2_17, Q3_17, Q4_17, Q1_18, Q2_18, Q3_18, Q4_18,
Q1_19, Q2_19))
## Graph LGA
top_mean_df = pd.DataFrame.from_records(top_mean_arr)
top_mean_df.columns = ['LGA', '2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2', '2017.Q3', '2017.Q4', '2018.Q1', '2018.Q2', '2018.Q3', '2018.Q4',
'2019.Q1', '2019.Q2']
top_mean_df.index = top_mean_df['LGA']
top_mean_df = top_mean_df.drop('LGA', axis=1)
## remove columns for the graph to look nice
top_mean_df = top_mean_df.drop(['2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2'], axis=1)
### Get all LGAs and Mean QNB per year and quarter ###
## Get unique LGA
LGA_list = sort_df['LGA'].unique()
all_mean_arr = []
## Get LGA renting price per year, append in array
for x in range(len(LGA_list)):
name = LGA_list[x]
all_mean_df = rent_df[rent_df['LGA']==LGA_list[x]].sort_values(by="Mean QNB", ascending = False)
all_mean_df = all_mean_df[all_mean_df['Dwelling Type'] == 'Total']
all_mean_df = all_mean_df[all_mean_df['Bedroom Number'] == 'Total']
all_mean_df = all_mean_df.sort_values(by=['Year', 'Quarter'])
all_mean_df = all_mean_df.reset_index()
for y in all_mean_df.index:
if y == 0:
Q1_15 = all_mean_df['Mean QNB'].loc[y]
if y == 1:
Q2_15 = all_mean_df['Mean QNB'].loc[y]
if y == 2:
Q3_15 = all_mean_df['Mean QNB'].loc[y]
if y == 3:
Q4_15 = all_mean_df['Mean QNB'].loc[y]
if y == 4:
Q1_16 = all_mean_df['Mean QNB'].loc[y]
if y == 5:
Q2_16 = all_mean_df['Mean QNB'].loc[y]
if y == 6:
Q3_16 = all_mean_df['Mean QNB'].loc[y]
if y == 7:
Q4_16 = all_mean_df['Mean QNB'].loc[y]
if y == 8:
Q1_17 = all_mean_df['Mean QNB'].loc[y]
if y == 9:
Q2_17 = all_mean_df['Mean QNB'].loc[y]
if y == 10:
Q3_17 = all_mean_df['Mean QNB'].loc[y]
if y == 11:
Q4_17 = all_mean_df['Mean QNB'].loc[y]
if y == 12:
Q1_18 = all_mean_df['Mean QNB'].loc[y]
if y == 13:
Q2_18 = all_mean_df['Mean QNB'].loc[y]
if y == 14:
Q3_18 = all_mean_df['Mean QNB'].loc[y]
if y == 15:
Q4_18 = all_mean_df['Mean QNB'].loc[y]
if y == 16:
Q1_19 = all_mean_df['Mean QNB'].loc[y]
if y == 17:
Q2_19 = all_mean_df['Mean QNB'].loc[y]
all_mean_arr.append((name, Q1_15, Q2_15, Q3_15, Q4_15, Q1_16, Q2_16, Q3_16, Q4_16,
Q1_17, Q2_17, Q3_17, Q4_17, Q1_18, Q2_18, Q3_18, Q4_18,
Q1_19, Q2_19))
## Array to dataframe
all_mean_df = pd.DataFrame.from_records(all_mean_arr)
all_mean_df.columns = ['LGA', '2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2', '2017.Q3', '2017.Q4', '2018.Q1', '2018.Q2', '2018.Q3', '2018.Q4',
'2019.Q1', '2019.Q2']
all_mean_df.index = all_mean_df['LGA']
#all_LGA_df = all_LGA_df.drop('LGA', axis=1)
## remove columns for the graph to look nice ###
all_mean_df = all_mean_df.drop(['2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2'], axis=1)
### TRAIN TEST for all_mean_df dataframe ###
## divide data
from sklearn.model_selection import train_test_split
cols = np.array(['2017.Q3', '2017.Q4', '2018.Q1', '2018.Q2', '2018.Q3', '2018.Q4',
'2019.Q1'])
X = all_mean_df[cols]
y = all_mean_df['2019.Q2']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size = 0.3)
## Models
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import mean_squared_error, r2_score
model_list = [DecisionTreeClassifier(random_state=0),
RandomForestRegressor(random_state=1),
svm.SVC(gamma='scale'),
MLPClassifier(hidden_layer_sizes=(100,100,100), max_iter=20, alpha=0.01,
solver='sgd', verbose=10, random_state=21,tol=0.000000001)]
model_name = ['Decision Tree', 'Random Forest', 'SVM', 'Neural Network']
stats = []
for x in range(4):
model = model_list[x]
model.fit(X_train, y_train)
predicted = model.predict(X_train)
MSE_train = mean_squared_error(y_train, predicted)
R2_train = r2_score(y_train, predicted)
Accuracy_train = model.score(X_train, y_train)
predicted = model.predict(X_test)
MSE_test = mean_squared_error(y_test, predicted)
R2_test = r2_score(y_test, predicted)
Accuracy_test = model.score(X_test, y_test)
model = model_name[x]
stats.append((model, MSE_train, MSE_test, R2_train, R2_test, Accuracy_train, Accuracy_test))
stats_df = pd.DataFrame.from_records(stats)
stats_df.columns = ['Model', 'RMSE Train', 'RMSE Test', 'R-Squared Train', 'R-Squared Test',
'Accuracy Score Train', 'Accuracy Score Test']
stats_df.index = stats_df['Model']
stats_df = stats_df.drop('Model',
axis = 1)
### Top LGAs according to New Signed Bonds ###
## remove Total column from LGA, and get unique LGA values
sort_df = rent_df[rent_df['LGA']!='Total']
top_NB = sort_df.sort_values(by="NB Lodged", ascending = False)['LGA'].unique()[:10]
top_NB_arr = []
## Get the Mean QNB per quarter and year of top LGA for graphing
for x in range(len(top_NB)):
name = top_NB[x]
top_NB_df = rent_df[rent_df['LGA']==top_NB[x]].sort_values(by="NB Lodged", ascending = False)
top_NB_df = top_NB_df[top_NB_df['Dwelling Type'] == 'Total']
top_NB_df = top_NB_df[top_NB_df['Bedroom Number'] == 'Total']
top_NB_df = top_NB_df.sort_values(by=['Year', 'Quarter'])
top_NB_df = top_NB_df.reset_index()
for y in top_NB_df.index:
if y == 0:
Q1_15 = top_NB_df['NB Lodged'].loc[y]
if y == 1:
Q2_15 = top_NB_df['NB Lodged'].loc[y]
if y == 2:
Q3_15 = top_NB_df['NB Lodged'].loc[y]
if y == 3:
Q4_15 = top_NB_df['NB Lodged'].loc[y]
if y == 4:
Q1_16 = top_NB_df['NB Lodged'].loc[y]
if y == 5:
Q2_16 = top_NB_df['NB Lodged'].loc[y]
if y == 6:
Q3_16 = top_NB_df['NB Lodged'].loc[y]
if y == 7:
Q4_16 = top_NB_df['NB Lodged'].loc[y]
if y == 8:
Q1_17 = top_NB_df['NB Lodged'].loc[y]
if y == 9:
Q2_17 = top_NB_df['NB Lodged'].loc[y]
if y == 10:
Q3_17 = top_NB_df['NB Lodged'].loc[y]
if y == 11:
Q4_17 = top_NB_df['NB Lodged'].loc[y]
if y == 12:
Q1_18 = top_NB_df['NB Lodged'].loc[y]
if y == 13:
Q2_18 = top_NB_df['NB Lodged'].loc[y]
if y == 14:
Q3_18 = top_NB_df['NB Lodged'].loc[y]
if y == 15:
Q4_18 = top_NB_df['NB Lodged'].loc[y]
if y == 16:
Q1_19 = top_NB_df['NB Lodged'].loc[y]
if y == 17:
Q2_19 = top_NB_df['NB Lodged'].loc[y]
top_NB_arr.append((name, Q1_15, Q2_15, Q3_15, Q4_15, Q1_16, Q2_16, Q3_16, Q4_16,
Q1_17, Q2_17, Q3_17, Q4_17, Q1_18, Q2_18, Q3_18, Q4_18,
Q1_19, Q2_19))
## Get the Mean QNB per quarter and year of top LGA for graphing
top_nbmean_arr=[]
for x in range(len(top_NB)):
name = top_NB[x]
top_nbmean_df = rent_df[rent_df['LGA']==top_NB[x]].sort_values(by="Mean QNB", ascending = False)
top_nbmean_df = top_nbmean_df[top_nbmean_df['Dwelling Type'] == 'Total']
top_nbmean_df = top_nbmean_df[top_nbmean_df['Bedroom Number'] == 'Total']
top_nbmean_df = top_nbmean_df.sort_values(by=['Year', 'Quarter'])
top_nbmean_df = top_nbmean_df.reset_index()
for y in top_nbmean_df.index:
if y == 0:
Q1_15 = top_nbmean_df['Mean QNB'].loc[y]
if y == 1:
Q2_15 = top_nbmean_df['Mean QNB'].loc[y]
if y == 2:
Q3_15 = top_nbmean_df['Mean QNB'].loc[y]
if y == 3:
Q4_15 = top_nbmean_df['Mean QNB'].loc[y]
if y == 4:
Q1_16 = top_nbmean_df['Mean QNB'].loc[y]
if y == 5:
Q2_16 = top_nbmean_df['Mean QNB'].loc[y]
if y == 6:
Q3_16 = top_nbmean_df['Mean QNB'].loc[y]
if y == 7:
Q4_16 = top_nbmean_df['Mean QNB'].loc[y]
if y == 8:
Q1_17 = top_nbmean_df['Mean QNB'].loc[y]
if y == 9:
Q2_17 = top_nbmean_df['Mean QNB'].loc[y]
if y == 10:
Q3_17 = top_nbmean_df['Mean QNB'].loc[y]
if y == 11:
Q4_17 = top_nbmean_df['Mean QNB'].loc[y]
if y == 12:
Q1_18 = top_nbmean_df['Mean QNB'].loc[y]
if y == 13:
Q2_18 = top_nbmean_df['Mean QNB'].loc[y]
if y == 14:
Q3_18 = top_nbmean_df['Mean QNB'].loc[y]
if y == 15:
Q4_18 = top_nbmean_df['Mean QNB'].loc[y]
if y == 16:
Q1_19 = top_nbmean_df['Mean QNB'].loc[y]
if y == 17:
Q2_19 = top_nbmean_df['Mean QNB'].loc[y]
top_nbmean_arr.append((name, Q1_15, Q2_15, Q3_15, Q4_15, Q1_16, Q2_16, Q3_16, Q4_16,
Q1_17, Q2_17, Q3_17, Q4_17, Q1_18, Q2_18, Q3_18, Q4_18,
Q1_19, Q2_19))
## Graph LGA
top_NB_df = pd.DataFrame.from_records(top_NB_arr)
top_NB_df.columns = ['LGA', '2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2', '2017.Q3', '2017.Q4', '2018.Q1', '2018.Q2', '2018.Q3', '2018.Q4',
'2019.Q1', '2019.Q2']
top_NB_df.index = top_NB_df['LGA']
top_NB_df = top_NB_df.drop('LGA', axis=1)
## remove columns for the graph to look nice
top_NB_df = top_NB_df.drop(['2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2'], axis=1)
## MEAN
## Graph LGA
top_nbmean_df = pd.DataFrame.from_records(top_nbmean_arr)
top_nbmean_df.columns = ['LGA', '2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2', '2017.Q3', '2017.Q4', '2018.Q1', '2018.Q2', '2018.Q3', '2018.Q4',
'2019.Q1', '2019.Q2']
top_nbmean_df.index = top_nbmean_df['LGA']
top_nbmean_df = top_nbmean_df.drop('LGA', axis=1)
## remove columns for the graph to look nice
top_nbmean_df = top_nbmean_df.drop(['2015.Q1', '2015.Q2', '2015.Q3', '2015.Q4', '2016.Q1', '2016.Q2', '2016.Q3', '2016.Q4',
'2017.Q1', '2017.Q2'], axis=1)
### Dwelling Type Most People Rent ###
## dataframe per dwelling type
house_df = rent_df[rent_df['Dwelling Type']=='House']
flat_df = rent_df[rent_df['Dwelling Type']=='Flat/Unit']
townhouse_df = rent_df[rent_df['Dwelling Type']=='Townhouse']
## get total count per year
year = [2015, 2016, 2017, 2018, 2019]
bond_count_arr = []
for x in range(len(year)):
yr = year[x]
house = house_df[house_df['Year']==yr]['TB Lodged'].sum()
name = 'House'
bond_count_arr.append((yr, house, name))
flat = flat_df[flat_df['Year']==yr]['TB Lodged'].sum()
name = 'Flat/Unit'
bond_count_arr.append((yr, flat, name))
townhouse = townhouse_df[townhouse_df['Year']==yr]['TB Lodged'].sum()
name = 'Townhouse'
bond_count_arr.append((yr, townhouse, name))
### FLAT: number of bedrooms most people rent ###
bond_count_df = pd.DataFrame.from_records(bond_count_arr)
bond_count_df.columns = ['Year', 'Count', 'Dwelling Type']
BS_df = flat_df[flat_df['Bedroom Number']=='Bedsitter']
B1_df = flat_df[flat_df['Bedroom Number']=='1 Bedroom']
B2_df = flat_df[flat_df['Bedroom Number']=='2 Bedrooms']
B3_df = flat_df[flat_df['Bedroom Number']=='3 Bedrooms']
B4_df = flat_df[flat_df['Bedroom Number']=='4 or more Bedrooms']
flatbond_count_arr = []
for x in range(len(year)):
yr = year[x]
BS = BS_df[BS_df['Year']==yr]['TB Lodged'].sum()
name = 'Bedsitter'
flatbond_count_arr.append((yr, BS, name))
B1 = B1_df[B1_df['Year']==yr]['TB Lodged'].sum()
name = '1 Bedroom'
flatbond_count_arr.append((yr, B1, name))
B2 = B2_df[B2_df['Year']==yr]['TB Lodged'].sum()
name = '2 Bedrooms'
flatbond_count_arr.append((yr, B2, name))
B3 = B3_df[B3_df['Year']==yr]['TB Lodged'].sum()
name = '3 Bedrooms'
flatbond_count_arr.append((yr, B3, name))
B4 = B4_df[B4_df['Year']==yr]['TB Lodged'].sum()
name = '4 or more Bedrooms'
flatbond_count_arr.append((yr, B4, name))
flatbond_count_df = pd.DataFrame.from_records(flatbond_count_arr)
flatbond_count_df.columns = ['Year', 'Count', 'Number of Bedrooms']
|
[
"noreply@github.com"
] |
noreply@github.com
|
a94dbdf4fc6e774943ac77d02fc7c1c4ab4a4eff
|
99767736ea5f34be4438ce689fc27454dffbf15c
|
/build/lib/sqlalchemy_nav/__init__.py
|
f5667ece8a87d320adf715b232e0f99d96ab7b47
|
[
"MIT"
] |
permissive
|
dsbowen/sqlalchemy-nav
|
4600ff85c99878d98167fee000d5b9cd6a0a90bc
|
d60b28fe74cdde65de68a140d0c2845d92fb9b0f
|
refs/heads/master
| 2020-08-02T09:35:27.233849
| 2020-06-10T16:50:22
| 2020-06-10T16:50:22
| 211,304,650
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
"""Mixins for SQLAlchemy-Nav
SQLAlchemy-Nav Mixins can be used to create dynamic navigation bar models
compatible with Bootstrap 4. Navigation bars can contain navigation
items, dropdown items, and custom html.
All models store their HTML in a `MutableSoup` attribute, `body`. This is
essentially a `BeautifulSoup` object which you can use to insert custom
HTML.
`Navitem`s are nested in `Navbar`s, and `Dropdownitem`s are nested in
`Navitem`s.
"""
from sqlalchemy_nav.navbar import NavbarMixin
from sqlalchemy_nav.navitem import NavitemMixin
from sqlalchemy_nav.dropdownitem import DropdownitemMixin
|
[
"dsbowen@wharton.upenn.edu"
] |
dsbowen@wharton.upenn.edu
|
88d1ec2c7c8599a735b126fa18f086afd2c00131
|
41aafbb5f749f192e81f754bbbf52e4f7bd231c4
|
/app.py
|
ddccd8b64783330339baa5dcfd3fc81be129af7d
|
[] |
no_license
|
JasonCodez/flask_madlibs
|
84c31b2b413306053bedcb7c32e82b6653916315
|
cb9c1a65cd098d7b02810c6783454488fe801783
|
refs/heads/main
| 2023-05-13T11:14:23.528019
| 2021-06-04T23:16:59
| 2021-06-04T23:16:59
| 373,978,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from flask import Flask, render_template, request
from stories import story
app = Flask(__name__)
@app.route('/')
def ask_questions():
prompts = story.prompts
return render_template("questions.html", prompts=prompts)
@app.route("/story")
def show_story():
text = story.generate(request.args)
return render_template("story.html", text=text)
|
[
"jason13garrett@gmail.com"
] |
jason13garrett@gmail.com
|
de4031edd500d91f3c5f79daceda0b6ddd0c105d
|
53faa0ef3496997412eb5e697bc85eb09a28f8c9
|
/pipeline/0x02-databases/34-log_stats.py
|
cd026394d8134a1af7cdf365a1a6c146de8897f9
|
[] |
no_license
|
oran2527/holbertonschool-machine_learning
|
aaec2ffe762b959573f98a5f4e002272a5d643a3
|
8761eb876046ad3c0c3f85d98dbdca4007d93cd1
|
refs/heads/master
| 2023-08-14T00:37:31.163130
| 2021-09-20T13:34:33
| 2021-09-20T13:34:33
| 330,999,053
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
#!/usr/bin/env python3
""" stats about Nginx logs stored in MongoDB """
from pymongo import MongoClient
if __name__ == "__main__":
""" stats about Nginx logs stored in MongoDB """
client = MongoClient('mongodb://127.0.0.1:27017')
collection_logs = client.logs.nginx
num_docs = collection_logs.count_documents({})
print("{} logs".format(num_docs))
print("Methods:")
methods = ["GET", "POST", "PUT", "PATCH", "DELETE"]
for method in methods:
num_method = collection_logs.count_documents({"method": method})
print("\tmethod {}: {}".format(method, num_method))
filter_path = {"method": "GET", "path": "/status"}
num_path = collection_logs.count_documents(filter_path)
print("{} status check".format(num_path))
|
[
"orlago250183@gmail.com"
] |
orlago250183@gmail.com
|
0db6856e41bbe6b3773d8320f95dd2e5edbcc1d6
|
451f158c20fd425bc9d14c8e27e1a8f415423276
|
/novels_search/config/config.py
|
d38b0dcf8002cdb6327e83d0f826c02ffffffbc9
|
[
"Apache-2.0"
] |
permissive
|
TimeCharmer/novels-search
|
3767a77c237426a66f25287abae3c0a44528cf52
|
ab8152ff12d828dba0a8b52aa9c08675b21a1c5f
|
refs/heads/master
| 2021-01-19T14:21:28.438011
| 2017-04-12T09:37:48
| 2017-04-12T09:37:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
#!/usr/bin/env python
import logging
from aiocache import RedisCache
# Search engine
URL_PHONE = 'https://m.baidu.com/s'
URL_PC = 'http://www.baidu.com/s'
BAIDU_RN = 15
SO_URL = "https://www.so.com/s"
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
# logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
LOGGER = logging.getLogger('novels_search')
# aiocache
REDIS_DICT = dict(
IS_CACHE=True,
REDIS_ENDPOINT="",
REDIS_PORT=6379,
PASSWORD="",
CACHE_DB=0,
SESSION_DB=1,
POOLSIZE=4,
)
AIO_CACHE = RedisCache(endpoint=REDIS_DICT['REDIS_ENDPOINT'], port=REDIS_DICT['REDIS_PORT'], namespace="main")
# mongodb
MONGODB = dict(
HOST="",
PORT="",
USERNAME='',
PASSWORD='',
DATABASE='owllook',
)
# website
WEBSITE = dict(
IS_RUNNING=True,
TOKEN=''
)
AUTH = {
"Owllook-Api-Key": ""
}
HOST = ['owllook.net', 'www.owllook.net', '0.0.0.0:8000']
TIMEZONE = 'Asia/Shanghai'
|
[
"xiaozizayang@gmail.com"
] |
xiaozizayang@gmail.com
|
4239b59efd8be01546de57fd9436920f76c9aaf9
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_089/ch14_2020_03_09_13_31_11_006129.py
|
151ce51fd678c102515933e6bb62da962336d66d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
def calcula_distancia_do_projetil(v,t,h):
D= (v**2/2*9.8)*(1+(1+(2*9.8*h)/v**2*(math.sin(t))**2))*math.sin(2*t)
return D
|
[
"you@example.com"
] |
you@example.com
|
c802abd4dd480d611f8d66bd6588eeaecc35679e
|
4bc5ad9cc3036c56447c2d3da9c7e7b52e895495
|
/alumni/student/migrations/0003_auto_20190409_1042.py
|
2b8416a9180b4909bb1e1bd69b5a15dbdc7b7233
|
[] |
no_license
|
hariyanganesh/Alumini
|
ffd31939bb06040556517a9849c64760c48fe2b0
|
9f4aa1dfa005ce0b1d266176e3e206c6ace8ef56
|
refs/heads/master
| 2020-05-09T21:56:27.413868
| 2019-04-15T11:20:11
| 2019-04-15T11:20:11
| 181,453,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
# Generated by Django 2.2 on 2019-04-09 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0002_auto_20190409_0136'),
]
operations = [
migrations.AddField(
model_name='studinfo1',
name='branch',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='studinfo1',
name='jobstatus',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='studinfo1',
name='lastname',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='studinfo1',
name='name',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='studinfo1',
name='passout',
field=models.IntegerField(default=2000),
),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
cbe4d8dfdab89e21fe288bd6986ab78a30943da9
|
a1807bf5ca332fecc7e775c9bde25eeed318db9d
|
/disclosure_backend/tests/test_docgen.py
|
63a82b798a7aed4f2eaba4f1d41ba995ffbe972e
|
[] |
no_license
|
MrMaksimize/disclosure-backend
|
2c6a8936c08cd4c3ff328ee114a8050e410989cf
|
6d97305b4656bd630b9e12aef953daed51c84ed7
|
refs/heads/master
| 2020-12-26T00:46:07.104157
| 2016-01-06T17:04:38
| 2016-01-06T17:04:38
| 49,169,984
| 0
| 0
| null | 2016-02-12T14:05:13
| 2016-01-07T00:19:30
|
Python
|
UTF-8
|
Python
| false
| false
| 479
|
py
|
import os
from django.conf import settings
from django.core.management import call_command
from rest_framework.test import APITestCase
class DocGenerationTest(APITestCase):
def test_generate_docs(self):
""" Test createcalaccessrawmodeldocs"""
call_command('createcalaccessrawmodeldocs')
# Didn't throw; check some minimum level of output.
docs_dir = os.path.join(settings.REPO_DIR, 'docs')
self.assertTrue(os.path.exists(docs_dir))
|
[
"bcipolli@ucsd.edu"
] |
bcipolli@ucsd.edu
|
8817c54b5350de86ca658ecf083530659a7b4852
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/unreleased/azure-mgmt-eventhub/azure/mgmt/eventhub/models/consumer_group_create_or_update_parameters.py
|
22ea6ee0888f32dfc1f858599060ca80abe0a49a
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873
| 2016-10-26T21:35:20
| 2016-10-26T21:35:20
| 72,448,760
| 1
| 0
|
MIT
| 2023-05-04T17:15:01
| 2016-10-31T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConsumerGroupCreateOrUpdateParameters(Model):
"""Parameters supplied to the CreateOrUpdate Consumer Group operation.
:param location: Location of the resource.
:type location: str
:param type: ARM type of the namespace.
:type type: str
:param name: Name of the consumer group.
:type name: str
:param created_at: Exact time the message was created.
:type created_at: datetime
:param event_hub_path: The path of the event hub.
:type event_hub_path: str
:param updated_at: The exact time the message has been updated.
:type updated_at: datetime
:param user_metadata: The user metadata.
:type user_metadata: str
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'event_hub_path': {'key': 'properties.eventHubPath', 'type': 'str'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
'user_metadata': {'key': 'properties.userMetadata', 'type': 'str'},
}
def __init__(self, location, type=None, name=None, created_at=None, event_hub_path=None, updated_at=None, user_metadata=None):
self.location = location
self.type = type
self.name = name
self.created_at = created_at
self.event_hub_path = event_hub_path
self.updated_at = updated_at
self.user_metadata = user_metadata
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
f5fb1d2d332a2c3099ec44d0d0d7fb97c7ebe551
|
cb435ce9b9cb3a5e7f7d5555b59239aaa1533cd9
|
/notebooks/omniglot/proto_nets.py
|
9076ca24f522c4242d4e99a6dd0ffe9ec74f7985
|
[] |
no_license
|
kuan-li/Keras-FewShotLearning
|
5107aba13b561dd7a1ec857318cf338de048c681
|
988a4ff6021db7ae778f880cff85b7d4978b2835
|
refs/heads/master
| 2021-02-22T22:46:10.884663
| 2020-03-05T16:26:05
| 2020-03-05T16:26:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
#%%
import logging
from pathlib import Path
from unittest.mock import patch
import imgaug.augmenters as iaa
import numpy as np
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras import Model
from tensorflow.keras.saving import load_model
from keras_fsl.datasets import omniglot
from keras_fsl.models import SiameseNets
from keras_fsl.sequences import (
DeterministicSequence,
ProtoNetsSequence,
)
from keras_fsl.utils import patch_len, default_workers
# prevent issue with multiprocessing and long sequences, see https://github.com/keras-team/keras/issues/13226
patch_fit_generator = patch(
'tensorflow.keras.Model.fit_generator',
side_effect=default_workers(patch_len(Model.fit_generator)),
)
patch_fit_generator.start()
logging.getLogger('tensorflow').setLevel(logging.ERROR)
#%% Get data
train_set, test_set = omniglot.load_data()
#%% Update label columns to be able to mix alphabet during training
train_set = train_set.assign(label=lambda df: df.alphabet + '_' + df.label)
test_set = test_set.assign(label=lambda df: df.alphabet + '_' + df.label)
#%% Training ProtoNets
k_shot = 5
n_way = 5
proto_nets = SiameseNets(
branch_model='VinyalsNet',
head_model={
'name': 'ProtoNets',
'init': {'k_shot': k_shot, 'n_way': n_way}
},
)
val_set = train_set.sample(frac=0.3, replace=False)
train_set = train_set.loc[lambda df: ~df.index.isin(val_set.index)]
callbacks = [TensorBoard(), ModelCheckpoint('logs/proto_nets/best_weights.h5')]
(Path('logs') / 'proto_nets').mkdir(parents=True, exist_ok=True)
preprocessing = iaa.Sequential([
iaa.Affine(
translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)},
rotate=(-10, 10),
shear=(-0.8, 1.2),
)
])
train_sequence = ProtoNetsSequence(
train_set,
n_way=n_way,
preprocessing=preprocessing,
batch_size=16,
target_size=(28, 28, 3),
)
val_sequence = ProtoNetsSequence(val_set, batch_size=16, target_size=(28, 28, 3))
proto_nets.compile(optimizer='Adam', loss='categorical_crossentropy')
Model.fit_generator( # to use patched fit_generator, see first cell
proto_nets,
train_sequence,
validation_data=val_sequence,
callbacks=callbacks,
epochs=100,
steps_per_epoch=1000,
validation_steps=200,
use_multiprocessing=True,
)
#%% Prediction
proto_nets = load_model('logs/proto_nets/best_weights.h5')
encoder = proto_nets.get_layer('branch_model')
head_model = proto_nets.get_layer('head_model')
test_sequence = DeterministicSequence(test_set, batch_size=16, target_size=(28, 28, 3))
embeddings = encoder.predict_generator(test_sequence, verbose=1)
k_shot = 5
n_way = 5
support = (
test_set
.loc[lambda df: df.label.isin(df.label.drop_duplicates().sample(n_way))]
.groupby('label')
.apply(lambda group: group.sample(k_shot).drop('label', axis=1))
.reset_index('label')
)
query = (
test_set
.loc[lambda df: df.label.isin(support.label.unique())]
.loc[lambda df: ~df.index.isin(support.index)]
)
predictions = (
pd.concat([
query,
pd.DataFrame(head_model.predict([
embeddings[query.index],
*np.moveaxis(
embeddings[np.tile(support.index, reps=len(query))].reshape((len(query.index), k_shot * n_way, -1)),
1, 0,
)
]), columns=support.label.unique(), index=query.index),
], axis=1)
)
confusion_matrix = (
pd.crosstab(
predictions.label,
predictions.iloc[:, -n_way:].idxmax(axis=1),
margins=True,
)
)
|
[
"clementw@sicara.com"
] |
clementw@sicara.com
|
d4e3751b2d4796c72be497007fe4c7d8ca67e18e
|
6db97ab761d59452c05611354637dfb2ce693c96
|
/src/compas_fab/geometry/frame.py
|
60723945ebff9a82556936c9e69f14253f61e9df
|
[
"MIT"
] |
permissive
|
Mahdi-Soheyli/compas_fab
|
e885efbdd5531ae5f245bf02b2f1acce0a308680
|
0e7d426903a5d9a1bca947cd7a1251031c4c71b4
|
refs/heads/master
| 2020-05-02T16:53:13.265526
| 2019-03-20T13:37:37
| 2019-03-20T13:37:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
from compas.geometry import Frame
|
[
"casas@arch.ethz.ch"
] |
casas@arch.ethz.ch
|
26ca9926f70904dbde2689d810b066820a29a616
|
ddabf93201341cccfa05bb16e731543d6f3d428b
|
/recursion.py
|
4abdb12a45918e48435cb5e992fb5fe9dcfa5fec
|
[] |
no_license
|
saumyajit2000/recursion
|
d0e6d0e08dc87c1e4fa1827c0476e3aa6f7f2d34
|
176e23f6e75a56b0500a509ad6e01b06888da146
|
refs/heads/main
| 2023-08-10T21:44:54.417188
| 2021-09-28T10:11:10
| 2021-09-28T10:11:10
| 411,230,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,532
|
py
|
import random
import time
#FUNCTION FOR USING MERGE SORT------------
def mergeSort(numbers):
if len(numbers) > 1:
mid = len(numbers) // 2
left = numbers[:mid]
right = numbers[mid:]
# Recursive call on each half
mergeSort(left)
mergeSort(right)
# Two iterators for traversing the two halves
i = 0
j = 0
# Iterator for the main list
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
# The value from the left half has been used
numbers[k] = left[i]
# Move the iterator forward
i += 1
else:
numbers[k] = right[j]
j += 1
# Move to the next slot
k += 1
# For all the remaining values
while i < len(left):
numbers[k] = left[i]
i += 1
k += 1
while j < len(right):
numbers[k]=right[j]
j += 1
k += 1
#FUNCTION FOR USING QUICK SORT------------
def partition (array,low,high):
pivot=array[high]
i=low-1
for j in range(low,high):
if array[j]<=pivot:
i=i+1
(array[i],array[j])=(array[j],array[i])
(array[i+1],array[high])=(array[high],array[i+1])
return i+1
def quicksort(array,low,high):
if low<high:
pi=partition(array,low,high)
quicksort(array,low,pi-1)
quicksort(array,pi+1,high)
#FUNCTION FOR USING BUBBLE SORT --------------
def bubble_sort(numbers):
for i in range(0,len(numbers)-1):
for j in range(len(numbers)-1):
if(numbers[j]>numbers[j+1]):
temp = numbers[j]
numbers[j] = numbers[j+1]
numbers[j+1] = temp
return numbers
#FUNCTION FOR USING INSERTION SORT -------
def insertion_sort(numbers):
# Traverse through 1 to len(numbers)
for i in range(1, len(numbers)):
key = numbers[i]
j = i-1
while j >=0 and key < numbers[j] :
numbers[j+1] = numbers[j]
j -= 1
numbers[j+1] = key
#----------CODE FOR TAKING VALUES--------
n=int(input("How many random numbers you want ? : "))
start_value=int(input("Enter the starting range : "))
end_value=int(input("Enter the ending range : "))
start=time.time()
numbers=[]
for i in range(n):
num=((random.randint(start_value,end_value)))
numbers.append(num)
print("List of random numbers is : ",numbers)
#print("Unsorted data is : ",numbers)
# size=len(numbers)
# bubble_sort(numbers)
# print("Sorted data is -- " ,numbers)
# end=time.time()
# executiontime=(end)-(start)
# print("Execution time for BUBBLE SORT is : ",executiontime)
# size=len(numbers)
# mergeSort(numbers)
# # print("Sorted data using INSERTION SORT is -- " ,numbers)
# end=time.time()
# executiontime=(end)-(start)
# print("Execution time for MERGE SORT is : ",executiontime)
insertion_sort(numbers)
# print("Sorted data using INSERTION SORT is -- " ,numbers)
end=time.time()
executiontime=(end)-(start)
print("Execution time for Insertion Sort is : ",executiontime)
# quicksort(array, low, high)
# # print("Sorted data using INSERTION SORT is -- " ,numbers)
# end=time.time()
# executiontime=(end)-(start)
# print("Execution time for QUICK SORT is : ",executiontime)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5846bc204c7e1842e8b5ea77991c70bcba7181e3
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/52_14.py
|
ac7dd979fdab752a1073fb88aae9f43db82f325a
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,219
|
py
|
Python – Test for Even values dictionary values lists
Given a dictionary with lists as values, map Boolean values depending upon all
values in List are Even or not.
> **Input** : {“Gfg” : [6, 8, 10], “is” : [8, 10, 12, 16], “Best” : [10, 16,
> 14, 6]}
> **Output** : {‘Gfg’: True, ‘is’: True, ‘Best’: True}
> **Explanation** : All lists have even numbers.
>
> **Input** : {“Gfg” : [6, 5, 10], “is” : [8, 10, 11, 16], “Best” : [10, 16,
> 14, 6]}
> **Output** : {‘Gfg’: False, ‘is’: False, ‘Best’: True}
> **Explanation** : Only “Best” has even numbers.
**Method #1 : Using loop**
This is brute way in which this task can be performed. In this, we iterate for
all the values and check if all list values are Even if yes, we assign key as
True else False.
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Test for Even values dictionary values lists
# Using loop
# initializing dictionary
test_dict = {"Gfg" : [6, 7, 3],
"is" : [8, 10, 12, 16],
"Best" : [10, 16, 14, 6]}
# printing original dictionary
print("The original dictionary is : " + str(test_dict))
res = dict()
for sub in test_dict:
flag = 1
# checking for even elements
for ele in test_dict[sub]:
if ele % 2 != 0:
flag = 0
break
# adding True if all Even elements
res[sub] = True if flag else False
# printing result
print("The computed dictionary : " + str(res))
---
__
__
**Output**
The original dictionary is : {'Gfg': [6, 7, 3], 'is': [8, 10, 12, 16], 'Best': [10, 16, 14, 6]}
The computed dictionary : {'Gfg': False, 'is': True, 'Best': True}
**Method #2 : Using all() + dictionary comprehension**
This is yet another way in which this task can be performed. In this, we check
for all the elements using all() and dictionary comprehension is used to
remake the result.
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Test for Even values dictionary values lists
# Using all() + dictionary comprehension
# initializing dictionary
test_dict = {"Gfg" : [6, 7, 3],
"is" : [8, 10, 12, 16],
"Best" : [10, 16, 14, 6]}
# printing original dictionary
print("The original dictionary is : " + str(test_dict))
# using all to check for all even elements
res = {sub : all(ele % 2 == 0 for ele in
test_dict[sub]) for sub in test_dict}
# printing result
print("The computed dictionary : " + str(res))
---
__
__
**Output**
The original dictionary is : {'Gfg': [6, 7, 3], 'is': [8, 10, 12, 16], 'Best': [10, 16, 14, 6]}
The computed dictionary : {'Gfg': False, 'is': True, 'Best': True}
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
b6b51b242a71db81c78ac1325b12e83d53de3eaa
|
8a5fca5068f454ee11e81e9c7fa6faee44b84ae9
|
/CarND-Capstone/ros/src/tl_detector/light_publisher.py
|
7ed3dde7d3c242a24bb63cbf43e9ee8b59923ae9
|
[] |
no_license
|
abes975/CarND-Capstone
|
7910d5e4e775cb6d6dd40a69a5e0ea3b280b5aec
|
f3079bbddc368015511d01578ff5bc1eb04be793
|
refs/heads/master
| 2021-07-11T18:08:50.233079
| 2017-10-10T19:56:31
| 2017-10-10T19:56:31
| 105,473,446
| 0
| 0
| null | 2017-10-01T20:49:44
| 2017-10-01T20:49:43
| null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
#!/usr/bin/env python
import rospy
import tf
import cv2
import time
from styx_msgs.msg import TrafficLightArray, TrafficLight
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped
import numpy as np
import rospkg
import math
class TLPublisher(object):
def __init__(self):
rospy.init_node('tl_publisher')
self.traffic_light_pubs = rospy.Publisher('/vehicle/traffic_lights', TrafficLightArray, queue_size=1)
light = self.create_light(20.991, 22.837, 1.524, 0.08, 3)
lights = TrafficLightArray()
lights.header = light.header
lights.lights = [light]
self.lights = lights
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
self.traffic_light_pubs.publish(self.lights)
rate.sleep()
def create_light(self, x, y, z, yaw, state):
light = TrafficLight()
light.header = Header()
light.header.stamp = rospy.Time.now()
light.header.frame_id = 'world'
light.pose = self.create_pose(x, y, z, yaw)
light.state = state
return light
def create_pose(self, x, y, z, yaw=0.):
pose = PoseStamped()
pose.header = Header()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = 'world'
pose.pose.position.x = x
pose.pose.position.y = y
pose.pose.position.z = z
q = tf.transformations.quaternion_from_euler(0., 0., math.pi * yaw/180.)
pose.pose.orientation = Quaternion(*q)
return pose
if __name__ == '__main__':
try:
TLPublisher()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic publisher node.')
|
[
"sebastian.nietardt@googlemail.com"
] |
sebastian.nietardt@googlemail.com
|
ac33c346ad83106d32dfc516843c5835c52734e9
|
3ed70536d4d06b2ac43b64976ddc43a5d7025b31
|
/uri1091.py
|
4cb102241664ec3f00f7c77717e8df84b2c4c8f9
|
[] |
no_license
|
LuisHenrique01/Questoes_URI
|
7f1d397e3cd055349939184603eb86cb4bf43d65
|
35c8e77eb7cd9da96df4268b5d71f3ad87446c89
|
refs/heads/master
| 2020-07-22T08:12:12.700484
| 2020-04-12T17:39:29
| 2020-04-12T17:39:29
| 207,126,339
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
def main():
while True:
rep = int(input())
if rep == 0: break
div_x, div_y = list(map(int, input().split()))
for _ in range(rep):
ponto_x, ponto_y = list(map(int, input().split()))
if ponto_x == div_x or ponto_y == div_y:
print('divisa')
elif ponto_x > div_x and ponto_y > div_y:
print('NE')
elif ponto_x > div_x and ponto_y < div_y:
print('SE')
elif ponto_x < div_x and ponto_y > div_y:
print('NO')
else:
print('SO')
if __name__ == "__main__":
main()
|
[
"luizbueno1201@gmail.com"
] |
luizbueno1201@gmail.com
|
76bb7ef6f7c01dc420ea9e55a37a40caa24021e6
|
e06a3edbee8b0a5dfb26c3212b14fcce2fdca75b
|
/coding_templates_and_data_files/machine_learning/6. dimensionality_reduction/1. principal_component_analysis.py
|
ae99f07995e6c7f9727accef8db01746ef773088
|
[] |
no_license
|
tokenflow/Machine-Learning-101
|
8a1f41b37df0d0b5ca96773ec0e17f60292b022e
|
89b98b9c4709d8c53d9b749d6d341b062df4a192
|
refs/heads/master
| 2020-03-27T05:27:27.793688
| 2018-08-29T15:13:32
| 2018-08-29T15:13:32
| 146,021,041
| 2
| 0
| null | 2018-08-24T17:29:57
| 2018-08-24T17:29:57
| null |
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
# Principal Component Analysis
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('wine.csv')
X = dataset.iloc[:, :13].values
y = dataset.iloc[:, -1].values
# Spliting the dataset into the Training set & Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Applying PCA
from sklearn.decomposition import PCA
# pca = PCA(n_components = None)
pca = PCA(n_components = 2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# Used to identify percentages of IVs
explained_variance = pca.explained_variance_ratio_
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j, edgecolor='black')
plt.title('Logistic Regression (Training Set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
# Visualising the Test set results
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j, edgecolor='black')
plt.title('Logistic Regression (Test Set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
|
[
"rpartridge101@yahoo.co.uk"
] |
rpartridge101@yahoo.co.uk
|
acec6ef9375f55783a801f351a99bb2dc0a52676
|
c730f09a8d96e1ec84c2cabbc33e6489befae09a
|
/tutorial/polls/views.py
|
f569cc5da38c04ec14e942b4e9814ff763fc96d3
|
[] |
no_license
|
sosodoit/django
|
669a4b8abb27d1b4d062ac8063891dee3666108f
|
f0cdee32dd069b7c0ac7c417ac55aa4f063bdb1f
|
refs/heads/master
| 2023-06-01T01:26:13.786788
| 2021-06-30T08:28:39
| 2021-06-30T08:28:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
from polls.models import Question
from .forms import QuestionForm
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
def index(request):
question_list = \
Question.objects.order_by('-pub_date')
return render(
request, 'polls/question_list.html',
{'question_list': question_list}
)
# result = [ q.subject for q in question_list ]
# return JsonResponse(result, safe=False)
# return HttpResponse( str(result) )
def detail(request, question_id):
# question = \
# Question.objects.get(pk=question_id)
question = get_object_or_404(Question, id=question_id)
return render(
request, 'polls/question_detail.html',
{'question': question}
)
def answer_create(request, question_id):
question = get_object_or_404(Question, pk=question_id)
question.answer_set.create(
content=request.POST.get('content'),
create_date=timezone.now())
return redirect('/polls:detail/', question_id=question.id)
# aa = request.POST.get('aa')
# return HttpResponse(aa)
# return redirect('/polls/%s/' % question_id)
def question_create(request):
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.create_date = timezone.now()
question.save()
return redirect('polls:index')
else:
form = QuestionForm()
context = {'form': form}
return render(request, 'polls/question_form.html', context)
|
[
"sohuig@naver.com"
] |
sohuig@naver.com
|
a71c39e3394fc5cc6525d2128a4f4548fe0a677b
|
042bd40e554ac7fcd618c334ae98b4f43248a250
|
/interfaces/python/lib/ocean_dummy.py
|
41ebdac07d314bb378d87fb2fc951791b1c79acd
|
[
"Apache-2.0"
] |
permissive
|
kant/ocean-tensor-package
|
8a62df968335de2057ff095f0910e5ad5fcff8e1
|
fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d
|
refs/heads/master
| 2020-03-29T04:01:22.064480
| 2018-09-19T19:17:19
| 2018-09-19T19:17:19
| 149,511,923
| 0
| 0
|
Apache-2.0
| 2018-09-19T21:03:14
| 2018-09-19T21:03:14
| null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
# -------------------------------------------------------------------------
# Copyright 2018, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from pyOceanDummy_itf import *
from pyOceanDummy_cpu import *
# The GPU implementation is optional
try :
from pyOceanDummy_gpu import *
except ValueError :
# The module does not exist
pass
|
[
"evandenberg@us.ibm.com"
] |
evandenberg@us.ibm.com
|
7b379dbb43b66e2e49d8e764f6b4e5a1c4752c87
|
c14e3c9760b2c1a4f8aff459461913e78912811d
|
/workflow/src/gene_profile.py
|
8ec1381494dc6e75a93c384f3ff06b88310c3ba9
|
[] |
no_license
|
tianyabeef/metagenome
|
65f022b4a1b170338fdd9a820ba00dce669a697a
|
bc7d73527d0f6ba11faec38766b014ed4942b3d8
|
refs/heads/master
| 2020-04-04T04:03:30.423154
| 2016-12-08T03:48:19
| 2016-12-08T03:48:19
| 51,899,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,734
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
__author__ = "huangy"
__copyright__ = "Copyright 2016, The metagenome Project"
__version__ = "1.0.0-dev"
import os
import re
from ConfigParser import ConfigParser
from workflow.util.useful import mkdir
def gene_profile(config,sh_default_file,outpath,name):
commands = []
work_dir = os.path.dirname(config)
commands.append("/data_center_01/pipeline/huangy/metagenome/perlscript/06_geneabundance clean_reads_list gene_catalog.list gene_catalog.length")
commands.append("nohup /data_center_03/USER/zhongwd/bin/qsge --queue all.q --resource vf=10G --maxjob 10 --jobprefix MA --lines 1 --getmem shell_alignment/match.sh &")
commands.append("nohup /data_center_03/USER/zhongwd/bin/qsge --queue all.q --resource vf=10G --maxjob 10 --jobprefix AB --lines 2 --getmem shell_alignment/abun.sh &")
commands.append("ls alignment/*/*abundance |/data_center_01/pipeline/huangy/metagenome/perlscript/02_profile - > gene.profile")
commands.append("/data_center_01/pipeline/huangy/metagenome/perlscript/06_shannon gene.profile gene.alpha.div.tsv")
commands.append("head -4 gene.profile | sed '1s/^/Gene ID/g' > example.gene.profile.tsv")
commands.append("Rscript /data_center_01/pipeline/huangy/metagenome/Rscript/06_geneset.R")
commands.append("#差异分析")
config_gene = ConfigParser()
config_gene.read(config)
group = re.split("\s+|\t",config_gene.get("param","group"))
mkdir("%s/group/" % work_dir)
for subgroup in group:
subgroup = os.path.basename(subgroup)
subgroup_split =os.path.splitext(subgroup)[0]
mkdir("%s/group/%s/"%(work_dir,subgroup_split))
commands.append("python /data_center_01/pipeline/huangy/metagenome/pyscript/convert_abundance_group.py gene.profile ../group/%s group/%s/gene.profile genus" % (subgroup,subgroup_split))
commands.append("/data_center_01/pipeline/16S_ITS_pipeline_v3.0/script/03_otu_pca.py -i group/%s/gene.profile -g ../group/%s -o group/%s/09.pca --with_boxplot" % (subgroup_split,subgroup,subgroup_split))
mkdir("%s/group/%s/11-14.beta_div/"%(work_dir,subgroup_split))
mkdir("%s/group/%s/11-14.beta_div/gene/"%(work_dir,subgroup_split))
commands.append("cd group/%s/11-14.beta_div/gene; perl /data_center_01/pipeline/huangy/metagenome/perlscript/02_Beta_diversity.pl -p ../../../../group/%s/gene.profile -g ../../../../../group/%s -m bray -r; cd -" %(subgroup_split,subgroup_split,subgroup))
mkdir("%s/group/%s/15.LEfSe/" % (work_dir,subgroup_split))
commands.append("/data_center_01/pipeline/16S_ITS_pipeline_v3.0/script/05_tax_diff.py -i group/%s/gene.profile -o group/%s/gene_diff/ -g ../group/%s -c 0.05"%(subgroup_split,subgroup_split,subgroup))
commands.append("/data_center_01/pipeline/16S_ITS_pipeline_v3.0/script/05_diff_pca.py -i group/%s/gene_diff/profile.for_plot.txt -o group/%s/gene_diff/pca -g ../group/%s" %(subgroup_split,subgroup_split,subgroup))
commands.append("/data_center_01/pipeline/16S_ITS_pipeline_v3.0/script/03_tax_heatmap.py -f group/%s/gene_diff/profile.for_plot.txt -o group/%s/gene_diff/heatmap -g ../group/%s -t 30" % (subgroup_split,subgroup_split,subgroup))
commands.append(" /data_center_01/pipeline/16S_ITS_pipeline_v3.0/script/05_diff_boxplot.py -i group/%s/gene_diff/profile.for_plot.txt -o group/%s/gene_diff/boxplot -g ../group/%s -t 20"%(subgroup_split,subgroup_split,subgroup))
#commands.append("/data_center_01/pipeline/16S_ITS_pipeline_v3.0/script/05_LEfSe.py -i group/%s/gene.profile -l /data_center_03/USER/huangy/soft/LEfSe_lzb -g ../group/%s -o group/%s/15.LEfSe/ --LDA 2" %(subgroup_split,subgroup,subgroup_split))
return commands
|
[
"410378266@qq.com"
] |
410378266@qq.com
|
82dea43dea06c6bdb53b570bc5ff8fb63e66a0bc
|
d9f981d50d5476226ceffb9a4f75498ed3fd6127
|
/winequality/main.py
|
893ccfc9c451c39d03cc23efa0d4c65bce4e0224
|
[] |
no_license
|
bwriteLeaf/guochushiyan
|
509df750338ac988c8d050c790e273e02eb365f6
|
af9b3fd48874fcc18ea964c90d8a8994137a9523
|
refs/heads/master
| 2021-04-15T08:57:52.308169
| 2018-04-04T07:54:43
| 2018-04-04T07:54:43
| 126,176,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,644
|
py
|
# -*- coding: utf-8 -*-
import importlib,sys
import os
import time
from sklearn import metrics
import numpy as np
import pickle as pickle
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
#importlib.reload(sys)
#sys.setdefaultencoding('utf8')
# Multinomial Naive Bayes Classifier
def naive_bayes_classifier(train_x, train_y):
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB(alpha=0.01)
model.fit(train_x, train_y)
return model
# KNN Classifier
def knn_classifier(train_x, train_y):
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
model.fit(train_x, train_y)
return model
# Logistic Regression Classifier
def logistic_regression_classifier(train_x, train_y):
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(penalty='l2')
model.fit(train_x, train_y)
return model
# Random Forest Classifier
def random_forest_classifier(train_x, train_y):
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=8)
model.fit(train_x, train_y)
return model
# Decision Tree Classifier
def decision_tree_classifier(train_x, train_y):
from sklearn import tree
model = tree.DecisionTreeClassifier()
model.fit(train_x, train_y)
return model
# GBDT(Gradient Boosting Decision Tree) Classifier
def gradient_boosting_classifier(train_x, train_y):
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(n_estimators=200)
model.fit(train_x, train_y)
return model
# SVM Classifier
def svm_classifier(train_x, train_y):
from sklearn.svm import SVC
model = SVC(kernel='rbf', probability=True)
model.fit(train_x, train_y)
return model
# SVM Classifier using cross validation
def svm_cross_validation(train_x, train_y):
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
model = SVC(kernel='rbf', probability=True)
param_grid = {'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000], 'gamma': [0.001, 0.0001]}
grid_search = GridSearchCV(model, param_grid, n_jobs=1, verbose=1)
grid_search.fit(train_x, train_y)
best_parameters = grid_search.best_estimator_.get_params()
for para, val in best_parameters.items():
print
para, val
model = SVC(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True)
model.fit(train_x, train_y)
return model
def read_data(data_file):
import gzip
f = gzip.open(data_file, "rb")
train, val, test = pickle.load(f)
f.close()
train_x = train[0]
train_y = train[1]
test_x = test[0]
test_y = test[1]
return train_x, train_y, test_x, test_y
def trainTest(dataFile,attCnt,outFile,test_classifiers):
f = open(outFile, 'w')
thresh = 0.5
model_save_file = None
model_save = {}
# test_classifiers = ['NB', 'KNN', 'LR', 'RF', 'DT','GBDT']
classifiers = {'NB': naive_bayes_classifier,
'KNN': knn_classifier,
'LR': logistic_regression_classifier,
'RF': random_forest_classifier,
'DT': decision_tree_classifier,
'GBDT': gradient_boosting_classifier,
"SVM":svm_classifier
}
print('reading training and testing data...')
tmp = np.loadtxt(dataFile, dtype=np.str, delimiter=",")
X = tmp[1:, 0:attCnt].astype(np.float) # 加载数据部分
y = tmp[1:, attCnt].astype(np.float) # 加载类别标签部分
random_state = check_random_state(0) # 将样本进行随机排列
permutation = random_state.permutation(X.shape[0])
X = X[permutation]
y = y[permutation]
X = X.reshape((X.shape[0], -1))
train_x, test_x, train_y, test_y = train_test_split(
X, y, test_size=0.2)
scaler = StandardScaler()
# train_x = scaler.fit_transform(train_x) # 使用训练数据的均值方差对训练数据进行标准化
# test_x = scaler.transform(test_x)
num_train, num_feat = train_x.shape
num_test, num_feat = test_x.shape
is_binary_class = (len(np.unique(train_y)) == 2)
print('******************** Data Info *********************')
print('#training data: %d, #testing_data: %d, dimension: %d' % (num_train, num_test, num_feat))
print("name,precision,recall,accuracy", file=f)
for classifier in test_classifiers:
print('******************* %s ********************' % classifier)
print('%s' % classifier, file=f,end='')
start_time = time.time()
model = classifiers[classifier](train_x, train_y)
print('training took %fs!' % (time.time() - start_time))
predict = model.predict(test_x)
if model_save_file != None:
model_save[classifier] = model
if is_binary_class:
precision = metrics.precision_score(test_y, predict)
recall = metrics.recall_score(test_y, predict)
print('precision: %.2f%%, recall: %.2f%%' % (100 * precision, 100 * recall))
print(',%.2f%%,%.2f%%' % (100 * precision, 100 * recall), file=f,end='')
accuracy = metrics.accuracy_score(test_y, predict)
print(',%.2f%%' % (100 * accuracy), file=f)
if model_save_file != None:
pickle.dump(model_save, open(model_save_file, 'wb'))
|
[
"sara_sara_ok@163.com"
] |
sara_sara_ok@163.com
|
73f1e58dd9fbcaf1a877626b83d1a46362b545e9
|
ced35fa4a418173e0fc2ba1b183a4e5cb438dc6e
|
/search/views.py
|
6949be1c62921beb12bb84ac2d1bca5336e3549f
|
[] |
no_license
|
Code-Institute-Submissions/the_appleshop
|
d8c8bb051112236d3060baed70a52e13ec0f30e4
|
72ee732e99033ef53b54c2cc8281a7f3d1ad9b5a
|
refs/heads/master
| 2022-11-14T08:26:19.501103
| 2020-06-30T16:32:57
| 2020-06-30T16:32:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from django.shortcuts import render
from products.models import Product
from reviews.models import Review
def product_search(request):
searchterm = request.GET.get('q', "")
products = Product.objects.filter(name__icontains=searchterm)
return render(request, 'products.html', {'products': products,
'searchterm': 'searchterm'})
def review_search(request):
searchterm = request.GET.get('q', "")
reviews = Review.objects.filter(name__icontains=searchterm)
return render(request, 'reviews.html', {'reviews': reviews,
'searchterm': 'searchterm'})
|
[
"greenion@gmx.de"
] |
greenion@gmx.de
|
f768ba6d7d5ec4c7ff3607da8f5e4e0bacf8bf8c
|
49a9f1020b0d8caf32d1c0bc2b5ebd46596e4722
|
/learning_users/basic_app/forms.py
|
d352b4d6bbb9b44f057cad1de33b6740fe6673ba
|
[] |
no_license
|
ahtrahdis7/Django-learning
|
05e126fb822b2fe9770fc31246ad4fe939e15edf
|
d36dc1b78c50f2eb77c58ae806e0c8db57033937
|
refs/heads/master
| 2020-12-30T05:23:04.798006
| 2020-05-06T08:57:05
| 2020-05-06T08:57:05
| 238,874,799
| 0
| 0
| null | 2020-07-21T04:36:24
| 2020-02-07T08:20:08
|
Python
|
UTF-8
|
Python
| false
| false
| 446
|
py
|
from django import forms
from django.contrib.auth.models import User
from basic_app.models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta():
model = User
fields = ('username','email', 'password')
class UserProfileInfoForm(forms.ModelForm):
class Meta():
model = UserProfileInfo
fields = ('portfolio_site', 'profile_pic')
|
[
"b118055@iiit-bh.ac.in"
] |
b118055@iiit-bh.ac.in
|
ec288c5a2098fa65eae98cc0c0d11a54389af0ba
|
7b17e4a57df288fe5f3e7e81604eecacfd8ae3d1
|
/posts/migrations/0001_initial.py
|
5391fa2820d20c731a683eb38ed05bc46583cd83
|
[] |
no_license
|
samuelwongsy/forum
|
16f7468a4832cf41290cf22adcde61985d0bdc4e
|
9ac5e4b98c79b8df932dc804daeaffbbb0db3684
|
refs/heads/master
| 2022-11-17T17:01:03.127965
| 2020-07-13T15:15:00
| 2020-07-13T15:15:00
| 279,334,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
# Generated by Django 3.0.7 on 2020-07-12 13:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('message_html', models.TextField(editable=False)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='groups.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
'unique_together': {('user', 'message')},
},
),
]
|
[
"samuel@Samuels-MacBook-Pro.local"
] |
samuel@Samuels-MacBook-Pro.local
|
819b14de815119bce4a84a6fc1273bded53e6043
|
908b0e5b8a9dd8609d8196fdf4d8a16f74040a89
|
/projectEuler/p41.py
|
675cefa4699fffa071f068527bfc6cbdc7c8a341
|
[] |
no_license
|
poojithumeshrao/projectEuler
|
5b689de03295558a6c67b249a4ac1336f2507a21
|
c5ae6c985bb9199c72812e785aa02e5e3c238ad6
|
refs/heads/master
| 2022-02-22T13:58:17.849262
| 2022-02-08T10:06:37
| 2022-02-08T10:06:37
| 98,327,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
import pdb
def pan(a):
aa = list(str(a))
if '0' in aa:
return False
c = len(aa)
d = len(set(aa))
if c == d and int(max(aa)) == c:
return True
else:
return False
def isPrime(n) :
# Corner cases
if (n <= 1) :
return False
if (n <= 3) :
return True
# This is checked so that we can skip
# middle five numbers in below loop
if (n % 2 == 0 or n % 3 == 0) :
return False
i = 5
while(i * i <= n) :
if (n % i == 0 or n % (i + 2) == 0) :
return False
i = i + 6
return True
for i in range (999999999,2,-2):
#pdb.set_trace()
if i%5 != 0 and i%3 != 0 and i%7 != 0 and pan(i):
print(i)
if isPrime(i):
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
4b0e7d05d72b190fc3957af9c61e79e11a21b644
|
abccdbf9b0849b47960c3c352870793405debfed
|
/0x07-python-test_driven_development/4-print_square.py
|
fab6205b3d5afa34eec331a57f3ea50045fc96f1
|
[] |
no_license
|
hunterxx0/holbertonschool-higher_level_programming
|
88b1b0f31b536c6940f2e64a6924a06ba9cbf193
|
44064cf0722cd20d93f58b64ab185d2898770d73
|
refs/heads/master
| 2022-12-20T12:14:15.877147
| 2020-09-24T21:25:54
| 2020-09-24T21:25:54
| 259,276,369
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!/usr/bin/python3
"""
a square printing function: print_square()::
>>> def print_square(1):
#
"""
def print_square(size):
"""
Prints a square of size 'size'
"""
if type(size) is not int:
raise TypeError('size must be an integer')
elif size < 0:
raise ValueError('size must be >= 0')
else:
for x in range(size):
for y in range(size):
print("#", end="")
print()
|
[
"azouzimhamed@gmail.com"
] |
azouzimhamed@gmail.com
|
0db5b3deb80041a74fe00a76329d36249f0746ad
|
42dd79c614b775e6e8e782ea7ab332aef44251b9
|
/extra_apps/xadmin/views/website.py
|
02012eff0b7d66b2d7a36ed53d7a74ac75de61ae
|
[] |
no_license
|
Annihilater/imooc
|
114575638f251a0050a0240d5a25fc69ef07d9ea
|
547046cff32ce413b0a4e21714cb9ab9ce19bc49
|
refs/heads/master
| 2020-05-03T09:06:18.247371
| 2019-12-04T09:24:55
| 2019-12-04T09:24:55
| 178,545,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.decorators.cache import never_cache
from django.contrib.auth.views import LoginView as login
from django.contrib.auth.views import LogoutView as logout
from django.http import HttpResponse
from .base import BaseAdminView, filter_hook
from .dashboard import Dashboard
from xadmin.forms import AdminAuthenticationForm
from xadmin.models import UserSettings
from xadmin.layout import FormHelper
class IndexView(Dashboard):
title = _("Main Dashboard")
icon = "fa fa-dashboard"
def get_page_id(self):
return "home"
class UserSettingView(BaseAdminView):
@never_cache
def post(self, request):
key = request.POST["key"]
val = request.POST["value"]
us, created = UserSettings.objects.get_or_create(user=self.user, key=key)
us.value = val
us.save()
return HttpResponse("")
class LoginView(BaseAdminView):
title = _("Please Login")
login_form = None
login_template = None
@filter_hook
def update_params(self, defaults):
pass
@never_cache
def get(self, request, *args, **kwargs):
context = self.get_context()
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update(
{
"title": self.title,
"helper": helper,
"app_path": request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
)
defaults = {
"extra_context": context,
# 'current_app': self.admin_site.name,
"authentication_form": self.login_form or AdminAuthenticationForm,
"template_name": self.login_template or "xadmin/views/login.html",
}
self.update_params(defaults)
# return login(request, **defaults)
return login.as_view(**defaults)(request)
@never_cache
def post(self, request, *args, **kwargs):
return self.get(request)
class LogoutView(BaseAdminView):
logout_template = None
need_site_permission = False
@filter_hook
def update_params(self, defaults):
pass
@never_cache
def get(self, request, *args, **kwargs):
context = self.get_context()
defaults = {
"extra_context": context,
# 'current_app': self.admin_site.name,
"template_name": self.logout_template or "xadmin/views/logged_out.html",
}
if self.logout_template is not None:
defaults["template_name"] = self.logout_template
self.update_params(defaults)
# return logout(request, **defaults)
return logout.as_view(**defaults)(request)
@never_cache
def post(self, request, *args, **kwargs):
return self.get(request)
|
[
"yanmiexingkong@gmail.com"
] |
yanmiexingkong@gmail.com
|
4a1ea777a47726aed9d68c694b7608ad81f07ccc
|
148047ed643891aec025d895ae462bdf066a822d
|
/Practica 3 - Matplotlib y Scipy/Operaciones numericas/3.py
|
f9095d5ba6383dd12b6c90f613719bff36a87dee
|
[] |
no_license
|
santiperone/CursoPythonDSP
|
b87cde697e58308c3376b1b14aa6062fe9859160
|
ecf3094d0a618200ac87f2ea98d539460dbcf4d4
|
refs/heads/master
| 2022-12-01T02:43:34.588808
| 2020-08-13T04:02:58
| 2020-08-13T04:02:58
| 287,173,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 11:55:40 2020
@author: santi
"""
import numpy as np
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [9.53647, 151.323, 2223.38, 16441.1, 78212.4,
280053, 823687, 2.09733e+06, 4.78319e+06,
1.00003e+07]
menor_error = 1000
for i in range(1,50):
coef = np.polyfit(x,y,i)
print(np.mean((np.polyval(coef,x)-y)**2))
if np.mean((np.polyval(coef,x)-y)**2) < menor_error:
menor_error = np.mean((np.polyval(coef,x)-y)**2)
polinomio = np.polyval(coef,x)
print(coef)
x2 = np.linspace(1, 10,1000)
p2 = np.polyval(coef, x2)
plt.figure(1)
plt.plot(x, y, 'bo',label='Función original')
# plt.plot(x, polinomio, 'r', label='Mejor aproximacion')
plt.plot(x2, p2, 'g', label='a')
plt.xlabel('Variable independiente')
plt.ylabel('Variable dependiente')
# plt.title('Área con cumsum')
plt.legend()
plt.grid()
|
[
"sdperone97@gmail.com"
] |
sdperone97@gmail.com
|
a8c4360159626be4980ee48d7a6491db264ceafc
|
162e2588156cb2c0039c926c5c442363d9f77b00
|
/tests/integration_tests/data_steward/analytics/cdr_ops/report_runner_test.py
|
c00229bcb0b01b9d9828c4aa35f5c20ef5eb9760
|
[
"MIT"
] |
permissive
|
nishanthpp93/curation
|
38be687240b52decc25ffb7b655f25e9faa40e47
|
ac9f38b2f4580ae806121dd929293159132c7d2a
|
refs/heads/develop
| 2022-08-08T20:33:53.125216
| 2021-12-03T21:38:48
| 2021-12-03T21:38:48
| 155,608,471
| 1
| 0
|
MIT
| 2020-10-09T01:14:39
| 2018-10-31T18:54:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,081
|
py
|
import os
import unittest
from tempfile import NamedTemporaryFile
from pathlib import PurePath
from bs4 import BeautifulSoup as bs
from analytics.cdr_ops.report_runner import IPYNB_SUFFIX, HTML_SUFFIX, main
TEST_NOTEBOOK = """
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["parameters"]
project_id = ''
dataset_id = ''
table_name = ''
# -
print(
f'project_id={project_id}, dataset_id={dataset_id}, table_name={table_name}'
)
"""
class ReportRunnerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.temp_notebook_py_file = NamedTemporaryFile('w',
suffix='.py',
delete=True)
self.temp_notebook_py_file.write(TEST_NOTEBOOK.strip())
self.temp_notebook_py_file.flush()
self.notebook_py_path = self.temp_notebook_py_file.name
self.notebook_ipynb_path = PurePath(
self.notebook_py_path).with_suffix(IPYNB_SUFFIX)
self.notebook_html_path = PurePath(
self.notebook_py_path).with_suffix(HTML_SUFFIX)
self.parameters = {
'project_id': 'project_id',
'dataset_id': 'dataset_id',
'table_name': 'condition'
}
def tearDown(self):
# This removes the python file automatically
self.temp_notebook_py_file.close()
# Remove the ipynb and html files
os.remove(self.notebook_ipynb_path)
os.remove(self.notebook_html_path)
def test_main(self):
# Running the notebook and saving to the HTML page
main(self.notebook_py_path, self.parameters, self.notebook_py_path)
# Testing the content of the HTML page
with open(self.notebook_html_path, 'r') as f:
soup = bs(f, parser="lxml", features="lxml")
output_divs = soup.findAll('div', {"class": "jp-RenderedText"})
output_div_count = len(output_divs)
self.assertEqual(
output_div_count, 1,
f'Expected exactly 1 <div class="jp-RenderedText"> element, saw {output_div_count}'
)
output_pres = output_divs[0].findAll('pre')
output_pres_count = len(output_pres)
self.assertEqual(
output_pres_count, 1,
f'Expected exactly one <pre> element under <div class="jp-RenderedText">, saw {output_pres_count}'
)
actual = output_pres[0].get_text().strip()
expected = ', '.join(
[f'{k}={v}' for k, v in self.parameters.items()])
self.assertEqual(actual, expected)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c3ae3404fc4a1db0ceb96d6442dd4d9e6ffa2731
|
1e2f0c7e72d74fab23e5718de6afcfb9883091b1
|
/KAproject/forms.py
|
4e10762b072f9d71f13e0b2ebddcab594a2b86db
|
[] |
no_license
|
nizovtsov/KAproject-InWorkPage
|
9ab990aa0759b5d5d3a13931ffa0c9dc4511cd71
|
49c594bebcbe586f0e26387797590dfcdc2accc7
|
refs/heads/master
| 2021-09-14T22:28:03.756872
| 2018-05-21T10:31:42
| 2018-05-21T10:31:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
from django import forms
class ContactForm(forms.Form):
email = forms.EmailField()
|
[
"war11ok@mail.ru"
] |
war11ok@mail.ru
|
17460a786f7a8b1672a87c662fbcf64ff1def6e0
|
91a1ca6fbabfd5846b76548ebd627adcdc907f0b
|
/gene_hints/citations/citations.py
|
d301f00660839ce55934a88fceea6a61e0a5ab9e
|
[
"BSD-3-Clause"
] |
permissive
|
broadinstitute/gene-hints
|
72600e470ded0ce06a4f198eb47f505901554952
|
026ebe3f144bc2b54e0db67041de2e35969a8a3b
|
refs/heads/main
| 2023-04-18T01:57:10.984574
| 2021-12-01T01:01:29
| 2021-12-01T01:01:29
| 314,423,954
| 9
| 0
|
BSD-3-Clause
| 2021-12-01T01:01:30
| 2020-11-20T02:22:52
|
Python
|
UTF-8
|
Python
| false
| false
| 8,390
|
py
|
"""Output TSV of gene citation counts over time, and associated metrics
This module downloads citation and genomic data from PubMed and other
bioinformatics databases, combines that data, computes statistics, and writes
it to organism-specific TSV files. The output gene citation hints files
are combined with gene view hints files. These can then help power various
applications, like the gene hints ideogram at https://broad.io/gene-hints.
"""
# Inspired by https://github.com/pkerpedjiev/gene-citation-counts/blob/master/README
import argparse
import csv
from datetime import datetime, timedelta, timezone
import os
import glob
import sys
# Enable importing local modules when directly calling as script
if __name__ == "__main__":
cur_dir = os.path.join(os.path.dirname(__file__))
sys.path.append(cur_dir + "/..")
from lib import read_organisms, is_cached, download_gzip
from enrich_citations import EnrichCitations
from pmids_by_date import pmids_by_date
def format_date(days_before=None):
"""Get date strings in YYYY/MM/DD format, as expected by NCBI E-utils
"""
now = datetime.now(timezone.utc)
if (days_before):
return (now - timedelta(days=days_before)).strftime("%Y/%m/%d")
else:
return now.strftime("%Y/%m/%d")
class Citations():
def __init__(
self,
cache=0,
days=180,
cites_dir="./pubmed_citations/"
):
self.cites_dir = cites_dir
self.data_dir = cites_dir + "data/"
self.tmp_dir = self.data_dir + "tmp/"
self.cache = cache
self.days = days
def split_ncbi_file_by_org(self, input_path, output_filename, organisms):
"""Split a multi-organism file from NCBI into organism-specific files
Input file must be a TSV file with taxid as first column
"""
all_cached = True
output_paths_by_org = {}
org_names_by_taxid = {}
for org in organisms:
taxid = org["taxid"]
org = org["scientific_name"]
org_names_by_taxid[taxid] = org
output_path = self.data_dir + org + "/" + output_filename
output_paths_by_org[org] = output_path
if not is_cached(output_path, self.cache, 2):
all_cached = False
if all_cached:
print(
f"All NCBI organism files for {input_path} are cached, " +
"so not computing any."
)
return
with open(input_path, "r") as f:
lines_by_org = {}
for line in f:
line = line.strip()
taxid = line.split("\t")[0]
if taxid in org_names_by_taxid:
org = org_names_by_taxid[taxid] # scientific name
if org in lines_by_org:
lines_by_org[org].append(line)
else:
lines_by_org[org] = [line]
for org in lines_by_org:
lines = lines_by_org[org]
output_path = self.data_dir + org + "/" + output_filename
with open(output_path, "w") as f:
f.write("\n".join(lines))
def merge_daily_pmids(self, output_path, daily_pmid_dir, cache=0):
"""Aggregate per-day files into one file, to ease downstream processing
"""
pmids = []
if is_cached(output_path, cache, 2):
return
for fp in glob.glob(daily_pmid_dir + "/*tsv"):
with open(fp) as fd:
reader = csv.reader(fd, delimiter="\t")
for row in reader:
year = row[0] # TODO: Remove this column, use filename date
pmid = row[1] # PubMed ID, i.e. citation ID
pmids.append(year + "\t" + pmid)
with open(output_path, "w") as f:
lines = "\n".join(pmids)
f.write(lines)
def fetch_all_publications_over_time(self, path, prev_path, days):
"""Download IDs for articles published in the last n `days`
"""
start_date = format_date(days) # E.g. 60 days ago
end_date = format_date() # Today
prev_start_date = format_date(days * 2) # E.g. 120 days ago
prev_end_date = start_date # E.g. 60 days ago
output_dir = self.tmp_dir + "timeframe"
prev_output_dir= self.tmp_dir + "prev_timeframe"
pmids_by_date(start_date, end_date, output_dir, self.cache)
pmids_by_date(prev_start_date, prev_end_date, prev_output_dir, self.cache)
print("Combine daily publication counts")
self.merge_daily_pmids(path, output_dir)
self.merge_daily_pmids(prev_path, prev_output_dir)
def fetch_all_publications_per_organism(self, organisms):
"""Get IDs for articles published about our organisms of interest
"""
for org in organisms:
dir = self.data_dir + org["scientific_name"]
if not os.path.exists(dir):
os.makedirs(dir)
print("Download gene2pubmed")
url = "https://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2pubmed.gz"
output_name = "gene2pubmed"
output_path = self.data_dir + output_name
download_gzip(url, output_path, self.cache)
print("Split gene2pubmed by organism")
self.split_ncbi_file_by_org(output_path, output_name, organisms)
def fetch_gene_info(self, organisms):
print("Download gene_info")
url = "https://ftp.ncbi.nlm.nih.gov/gene/DATA/gene_info.gz"
output_name = "gene_info"
output_path = self.data_dir + output_name
download_gzip(url, output_path, self.cache)
print("Split gene_info by organism")
self.split_ncbi_file_by_org(output_path, output_name, organisms)
def download_data(self, pmid_dates_path, prev_pmid_dates_path, days):
"""Download citation and genomic data, preparing it for enrichment
"""
# Make tmp_dir, and thereby also the other dirs
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
# Download IDs for articles published in the last `days`
self.fetch_all_publications_over_time(
pmid_dates_path, prev_pmid_dates_path, days
)
organisms = read_organisms()
# Download IDs for articles published about our organisms of interest.
# We filter and join the "publications over time" and "publications per
# organism" lists in `enrich_citations`.
self.fetch_all_publications_per_organism(organisms)
# Download more genomic information
# TODO: Is data parsed from gene_info available in UCSC GTF files?
self.fetch_gene_info(organisms)
def run(self, sort_by="count"):
"""Output TSV of gene citation counts and related metrics over `days`
"""
days = self.days
pmid_dates_path = self.data_dir + "pmid_dates.tsv"
prev_pmid_dates_path = self.data_dir + "prev_pmid_dates.tsv"
# Download citation and genomic data
self.download_data(pmid_dates_path, prev_pmid_dates_path, days)
# Combine that downloaded data, compute statistics, and write to TSV
EnrichCitations().run(
pmid_dates_path, prev_pmid_dates_path, days, sort_by
)
# Command-line handler
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--days",
type=int,
help="Number of days to analyze",
default=180
)
parser.add_argument(
"--sort-by",
help="Metric by which to sort PubMed citations. Count is citations.",
choices=["count", "delta", "rank", "rank_delta"],
default="count"
)
parser.add_argument(
"--cache",
help=(
"Get fast but incomplete data. Dev setting. Levels:" +
"0: Don't cache. " +
"1: Cache download but not compute. " +
"2: like debug=1, and cache intermediate compute. " +
"(default: %(default)i)"
),
choices=[0, 1, 2],
default=0
)
args = parser.parse_args()
days = args.days
sort_by = args.sort_by
cache = args.cache
Citations(cache, days).run(sort_by)
|
[
"eweitz@broadinstitute.org"
] |
eweitz@broadinstitute.org
|
9d8b95b11ae0c4f34881e9d745ce9a40a7186e74
|
3497854f30c6dba6126d4456af4a5f86dbae144b
|
/testtakeMoney.py
|
34a335b76a62ba07b27e49946aa6024ced5ac9f0
|
[] |
no_license
|
HKRliduan/store
|
3965d8b2785d8329a2485628d2b279f051e57e25
|
42622717ae8cd9d13a1d19e3314643133d532c2e
|
refs/heads/master
| 2023-08-23T19:36:24.963717
| 2021-10-29T03:54:42
| 2021-10-29T03:54:42
| 405,851,500
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
from ddt import ddt
from ddt import data
from ddt import unpack
from unittest import TestCase
from Bank import bank_takeMoney
da=[
[123456780,123456,1000,1],
[123456789,654321,1000,2],
[123456789,123456,-1,3],
[123456789,123456,0,0],
[123456789,123456,1,0],
[123456789,123456,4999,0],
[123456788,654321,5001,3],
[123456788,654321,5000,0],
]
@ddt
class TestBank(TestCase):
@data(*da)
@unpack
def testsave(self,a,b,c,d):
result = bank_takeMoney(a,b,c)
self.assertEqual(d,result)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e6909e12c554550d60bdfede5c39f7070b75de43
|
c0dd1a4ffe1b515e5b9630691d685eff0b82c837
|
/chino (3)6.py
|
78183fd85b01e555e5edc6f9a903811d9cf932cc
|
[] |
no_license
|
WATANA-be/mojisaiten
|
7e6231586d770431c30f6cff45b63055093899b5
|
fc02b3add7dc0887b0a4eb99f1fbdfd12a1cfb99
|
refs/heads/master
| 2022-12-31T11:37:14.080095
| 2020-10-26T14:47:39
| 2020-10-26T14:47:39
| 287,859,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
import tkinter as tk
import tkinter.filedialog as fd
import PIL.Image
import PIL.ImageTk
import random
# 機械学習で使うモジュール
import sklearn.datasets
import sklearn.svm
import numpy
lbl = tk.Label(text='点数をここに表示します。')
# 画像ファイルを数値リストに変換
def imageToData(filename):
#画像を8x8のグレースケールに変換
grayImage = PIL.Image.open(filename).convert('L')
grayImage = grayImage.resize((8,8),PIL.Image.ANTIALIAS)
#その画像を表示
dispImage = PIL.ImageTk.PhotoImage(grayImage.resize((300,300)))
imageLabel.configure(image = dispImage)
imageLabel.image = dispImage
#数値をリストに変換
numImage = numpy.asarray(grayImage,dtype = float)
numImage = numpy.floor(16 - 16 * (numImage / 256))
numImage = numImage.flatten()
return numImage
# 数値予測
def predictDigits(data):
#学習用データ読み込み
digits = sklearn.datasets.load_digits()
#機械学習する
clf = sklearn.svm.SVC(gamma = 0.001)
clf.fit(digits.data, digits.target)
#予測結果を表示
n = clf.predict([data])
textLabel.configure(text = 'この画像は'+str(n)+'です!')
def dispLabel(point):
point = ['0','10','20','30','40','50','60','70','80','99','100']
lbl.configure(text=random.choice(point))
# ファイルダイアログを開く
def openFile():
fpath = fd.askopenfilename()
if fpath:
# 画像ファイルを数値リストに変換
data = imageToData(fpath)
#数値を予測
predictDigits(data)
# アプリのウィンドウ作成
root = tk.Tk()
root.geometry('600x500')
btn = tk.Button(root, text='ファイルを開く', command = openFile,font='Helvetica',size=40)
imageLabel = tk.Label()
btn.pack()
btn2 = tk.Button(root,text='採点します',command = dispLabel)
btn2.pack()
#予測結果の表示ラベル
textLabel = tk.Label(text='手書きの数字を認識しますね')
textLabel.pack()
lbl.pack()
tk.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
45d38a28de6ab489b77524b1a769aaa6209882ac
|
06775c1881d6c687d89fdf3dddecab961aa54f3a
|
/Kmodel/K_model_RC.py
|
c031556763ece844e51eb0923849ded80ee7d679
|
[] |
no_license
|
Siddharth3-14/Dynamical-Modelling-of-NGC-6503
|
53bb19bdaea7c7d0ed28568b3ee7be3eb45995ce
|
a817dcb3087851e5c393d87bc06ca397ee3c34c1
|
refs/heads/master
| 2022-11-06T16:47:39.275088
| 2020-06-16T07:22:49
| 2020-06-16T07:22:49
| 272,639,377
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,403
|
py
|
import pymc3 as pm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as tt
I_0_bulge = 2010.018
I_0_disk = 1907.2165
def simpsons_integration(a,b,N,func):
h = (b-a)/N
Xl0 = func(a) + func(b)
Xl1 = 0 #summation of odd terms
Xl2 = 0 #summation of even terms
for i in range(1,N):
X = a + i*h
if i%2 == 0:
Xl2 = Xl2 + func(X)
elif i%2 != 0:
Xl1 = Xl1 + func(X)
Xl = h*(Xl0 + 2*Xl2 + 4*Xl1)/3.0
return Xl
def Bulge(R,rb = 0.104,nb = 2.900):
temp = 4*I_0_bulge*np.pi*np.exp(- ((R/rb)**(1/nb) ))*R*R
return temp
def Disc(R,R_d = 0.759, R_h = 0.473, alpha = 1.599):
temp = 4*I_0_disk*np.pi*0.24*np.exp(-(R/R_d + (R/R_h)**(-alpha)))*R
return temp
#rotational curve
modelling_data = pd.read_csv("../Data/modelling_data.csv",delimiter = '\t',header = 'infer')
Radial_distance = np.array(modelling_data["Radial Distance"])
V_obs = np.array(modelling_data["V_obs"])
V_gas = np.array(modelling_data["V_gas"])
V_disk = np.array(modelling_data["V_disk"])
V_obs2 = (V_obs)*(V_obs)
V_gas2 = (V_gas)*(V_gas)
V_obs2 = V_obs2 - V_gas2
M_R_bulge = []
for i in Radial_distance:
M_R_bulge.append(simpsons_integration(0.0001,i,5000,Bulge)/i)
M_R_bulge = np.array(M_R_bulge)
M_R_disk = []
for i in Radial_distance:
M_R_disk.append(simpsons_integration(0.0001,i,5000,Disc)/i)
M_R_disk = np.array(M_R_disk)
total_model = pm.Model()
with total_model:
#priors
sigma = pm.HalfNormal("sigma" , sigma = 0.4)
gamma = pm.Gamma("gamma", alpha = 3, beta = 1)
ah = pm.Gamma("ah", alpha = 3, beta = 1)
Mh = pm.Gamma("Mh", alpha = 3, beta = 1)
M_by_L_bulge = pm.Gamma("M_by_L_bulge", alpha = 3, beta = 1)
M_by_L_disk = pm.Gamma("M_by_L_disc", alpha = 3, beta = 1)
bulge_rot = M_by_L_bulge*M_R_bulge
disk_rot = M_by_L_disk*M_R_disk
halo_rot = (Mh*Radial_distance**(gamma - 1))/((ah**gamma)*(1 + ((Radial_distance/ah)**(gamma-1)) ))
total_rot = bulge_rot + disk_rot + halo_rot
#likelihood
Y_obs = pm.Normal('Y_obs', mu = total_rot, sigma = sigma, observed = V_obs2)
step = pm.Metropolis()
trace = pm.sample(draws = 1000000, step = step, tune = 1000, cores = 2)
print(pm.summary(trace))
parameter_mean = pm.summary(trace)["mean"]
model_pred = (parameter_mean[4]*M_R_bulge + parameter_mean[5]*M_R_disk + (parameter_mean[3]*(Radial_distance**(parameter_mean[1] - 1)))/((parameter_mean[2]**parameter_mean[1])*(1 + ((Radial_distance/parameter_mean[2])**(parameter_mean[1]-1)) )))**0.5
bulge_pred = (parameter_mean[4]*M_R_bulge)**0.5
disk_pred = (parameter_mean[5]*M_R_disk)**0.5
halo_pred = ((parameter_mean[3]*(Radial_distance**(parameter_mean[1] - 1)))/((parameter_mean[2]**parameter_mean[1])*(1 + ((Radial_distance/parameter_mean[2])**(parameter_mean[1]-1)) )))**0.5
fig,axes = plt.subplots()
axes.scatter(Radial_distance,(V_obs2)**0.5,color = 'y' , label = 'Data')
plt.plot(Radial_distance,model_pred, color = 'k', label = 'Model Prediction')
plt.plot(Radial_distance,bulge_pred, color = 'r', label = 'bulge contribution')
plt.plot(Radial_distance,disk_pred, color = 'g', label = 'Disk contribution')
plt.plot(Radial_distance,halo_pred, color ='b', label = 'Halo contribution')
axes.set_ylabel("Rotational Velocity (Km/s) ")
axes.set_xlabel("Radial Distance (Kpc)")
plt.legend()
plt.show()
|
[
"sidd.ansh@gmail.com"
] |
sidd.ansh@gmail.com
|
44d7e761023d0c59d49f8e7dc69628d734013c37
|
7aa1857c48a0059a5abab8b69e0aed4bf3288dd3
|
/apicalls/admin.py
|
224c514c76189e31a0e81821b8fa2611040f75dd
|
[] |
no_license
|
SushekT/api-intern-younginnovation
|
3fd44d4fe2950d9fbf5a71cee479e0ecbf73e22f
|
f0ae8514befbf6b670e3315355adbc8c029870b5
|
refs/heads/master
| 2023-04-17T23:17:12.576471
| 2021-05-09T04:26:01
| 2021-05-09T04:26:01
| 365,662,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Year)
admin.site.register(Petroleum)
admin.site.register(Country)
admin.site.register(Main)
admin.site.register(AverageSale)
admin.site.register(TwoYearInterval)
admin.site.register(Leastyear)
|
[
"sushek@pandamotions.com"
] |
sushek@pandamotions.com
|
592f65c3845cec1b556e21772988fe41c2d61145
|
aca2258cf58e0d2c7e4939e73bcb82b6c135282c
|
/libs/Mailman/mailman/commands/tests/test_membership.py
|
6cf4802c6c8546a83b4d135e007b28482e0492be
|
[] |
no_license
|
masomel/py-import-analysis
|
cfe6749a1d7430b179559b9e0911b8c8df507be7
|
7edf8148e34b9f73ca6433ceb43a1770f4fa32c1
|
refs/heads/master
| 2021-03-16T10:00:24.205301
| 2019-08-01T20:32:34
| 2019-08-01T20:32:34
| 112,668,748
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
# Copyright (C) 2016-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Test the Leave command."""
import unittest
from mailman.app.lifecycle import create_list
from mailman.commands.eml_membership import Leave
from mailman.email.message import Message
from mailman.interfaces.mailinglist import SubscriptionPolicy
from mailman.interfaces.usermanager import IUserManager
from mailman.runners.command import Results
from mailman.testing.helpers import set_preferred
from mailman.testing.layers import ConfigLayer
from zope.component import getUtility
class TestLeave(unittest.TestCase):
layer = ConfigLayer
def setUp(self):
self._mlist = create_list('ant@example.com')
self._command = Leave()
def test_confirm_leave_not_a_member(self):
self._mlist.unsubscription_policy = SubscriptionPolicy.confirm
# Try to unsubscribe someone who is not a member. Anne is a real
# user, with a validated address, but she is not a member of the
# mailing list.
anne = getUtility(IUserManager).create_user('anne@example.com')
set_preferred(anne)
# Initiate an unsubscription.
msg = Message()
msg['From'] = 'anne@example.com'
results = Results()
self._command.process(self._mlist, msg, {}, (), results)
self.assertEqual(
str(results).splitlines()[-1],
'leave: anne@example.com is not a member of ant@example.com')
|
[
"msmelara@gmail.com"
] |
msmelara@gmail.com
|
c6654c8ede1e9d92c945f377a946208b70fb1d3d
|
51070872634750df2c0ce583c08c7bcf8860dec7
|
/2017/ewkzgjjModule.py
|
3cab202334913bbf10203440c42a59f55b1658dd
|
[] |
no_license
|
AndrewLevin/ZGamma
|
93eecc17c1d40ce662dcb321f5b0fb21998c4d16
|
c70380c4b38505421ba1673d09b0692176d18f31
|
refs/heads/master
| 2021-04-15T04:11:18.479840
| 2019-07-20T13:42:08
| 2019-07-20T13:42:08
| 126,224,421
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,033
|
py
|
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.tools import deltaR
from PhysicsTools.NanoAODTools.postprocessing.tools import deltaPhi
class ewkzgjjProducer(Module):
def __init__(self):
pass
def beginJob(self):
pass
def endJob(self):
pass
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.out = wrappedOutputTree
self.out.branch("lepton_pdg_id", "i");
self.out.branch("run", "i");
self.out.branch("lumi", "i");
self.out.branch("gen_weight", "F");
self.out.branch("event", "l");
self.out.branch("photon_pt", "F");
self.out.branch("photon_eta", "F");
self.out.branch("photon_phi", "F");
self.out.branch("lepton1_pt", "F");
self.out.branch("lepton1_eta", "F");
self.out.branch("lepton1_phi", "F");
self.out.branch("lepton1_iso", "F");
self.out.branch("lepton1_id", "F");
self.out.branch("lepton2_pt", "F");
self.out.branch("lepton2_eta", "F");
self.out.branch("lepton2_phi", "F");
self.out.branch("lepton2_iso", "F");
self.out.branch("lepton2_id", "F");
self.out.branch("mjj", "F");
self.out.branch("detajj", "F");
self.out.branch("zep", "F");
self.out.branch("mzg", "F");
self.out.branch("mll", "F");
self.out.branch("photon_selection", "I");
self.out.branch("photon_gen_matching", "I");
def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
electrons = Collection(event, "Electron")
muons = Collection(event, "Muon")
jets = Collection(event, "Jet")
photons = Collection(event, "Photon")
if hasattr(event,'nGenPart'):
genparts = Collection(event, "GenPart")
tight_muons = []
loose_but_not_tight_muons = []
tight_electrons = []
loose_but_not_tight_electrons = []
tight_photons = []
tight_jets = []
for i in range(0,len(muons)):
if muons[i].pt < 20:
continue
if abs(muons[i].eta) > 2.4:
continue
if muons[i].pfRelIso04_all < 0.4:
tight_muons.append(i)
for i in range (0,len(electrons)):
if electrons[i].pt < 20:
continue
if abs(electrons[i].eta) > 2.5:
continue
if (abs(electrons[i].eta + electrons[i].deltaEtaSC) < 1.479 and abs(electrons[i].dz) < 0.1 and abs(electrons[i].dxy) < 0.05) or (abs(electrons[i].eta + electrons[i].deltaEtaSC) > 1.479 and abs(electrons[i].dz) < 0.2 and abs(electrons[i].dxy) < 0.1):
if electrons[i].cutBased >= 3:
tight_electrons.append(i)
elif electrons[i].cutBased >= 1:
loose_but_not_tight_electrons.append(i)
for i in range (0,len(photons)):
if photons[i].pt < 25:
continue
if not ((abs(photons[i].eta) < 1.4442) or (1.566 < abs(photons[i].eta) and abs(photons[i].eta) < 2.5) ):
continue
if not (photons[i].cutBasedBitmap & (1 << 1) == (1 << 1)):
continue
# if photons[i].pixelSeed:
if not photons[i].electronVeto:
continue
pass_lepton_dr_cut = True
for j in range(0,len(tight_muons)):
if deltaR(muons[tight_muons[j]].eta,muons[tight_muons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
for j in range(0,len(tight_electrons)):
if deltaR(electrons[tight_electrons[j]].eta,electrons[tight_electrons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
for j in range(0,len(loose_but_not_tight_muons)):
if deltaR(muons[loose_but_not_tight_muons[j]].eta,muons[loose_but_not_tight_muons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
for j in range(0,len(loose_but_not_tight_electrons)):
if deltaR(electrons[loose_but_not_tight_electrons[j]].eta,electrons[loose_but_not_tight_electrons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
if not pass_lepton_dr_cut:
continue
tight_photons.append(i)
for i in range (0,len(photons)):
if photons[i].pt < 25:
continue
if not ((abs(photons[i].eta) < 1.4442) or (1.566 < abs(photons[i].eta) and abs(photons[i].eta) < 2.5) ):
continue
mask1 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 9) | (1 << 11) | (1 << 13)
mask2 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 9) | (1 << 11)
mask3 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 9) | (1 << 13)
mask4 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 11) | (1 << 13)
mask5 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 9) | (1 << 11) | (1 << 13) #invert the medium photon ID with the sigma_ietaieta cut removed
bitmap = photons[i].vidNestedWPBitmap & mask1
#after adding the photons that pass the full ID, add the photons that pass the inverted ID
if (bitmap == mask1):
continue
if not((bitmap == mask1) or (bitmap == mask2) or (bitmap == mask3) or (bitmap == mask4) or (bitmap == mask5)):
continue
# if photons[i].pixelSeed:
if not photons[i].electronVeto:
continue
pass_lepton_dr_cut = True
for j in range(0,len(tight_muons)):
if deltaR(muons[tight_muons[j]].eta,muons[tight_muons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
for j in range(0,len(tight_electrons)):
if deltaR(electrons[tight_electrons[j]].eta,electrons[tight_electrons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
for j in range(0,len(loose_but_not_tight_muons)):
if deltaR(muons[loose_but_not_tight_muons[j]].eta,muons[loose_but_not_tight_muons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
for j in range(0,len(loose_but_not_tight_electrons)):
if deltaR(electrons[loose_but_not_tight_electrons[j]].eta,electrons[loose_but_not_tight_electrons[j]].phi,photons[i].eta,photons[i].phi) < 0.7:
pass_lepton_dr_cut = False
if not pass_lepton_dr_cut:
continue
tight_photons.append(i)
for i in range(0,len(jets)):
if jets[i].pt < 30:
continue
if abs(jets[i].eta) > 4.7:
continue
pass_photon_dr_cut = True
for j in range(0,len(tight_photons)):
if deltaR(photons[tight_photons[j]].eta,photons[tight_photons[j]].phi,jets[i].eta,jets[i].phi) < 0.5:
pass_photon_dr_cut = False
if not pass_photon_dr_cut:
continue
pass_lepton_dr_cut = True
for j in range(0,len(tight_muons)):
if deltaR(muons[tight_muons[j]].eta,muons[tight_muons[j]].phi,jets[i].eta,jets[i].phi) < 0.4:
pass_lepton_dr_cut = False
for j in range(0,len(tight_electrons)):
if deltaR(electrons[tight_electrons[j]].eta,electrons[tight_electrons[j]].phi,jets[i].eta,jets[i].phi) < 0.4:
pass_lepton_dr_cut = False
for j in range(0,len(loose_but_not_tight_muons)):
if deltaR(muons[loose_but_not_tight_muons[j]].eta,muons[loose_but_not_tight_muons[j]].phi,jets[i].eta,jets[i].phi) < 0.4:
pass_lepton_dr_cut = False
for j in range(0,len(loose_but_not_tight_electrons)):
if deltaR(electrons[loose_but_not_tight_electrons[j]].eta,electrons[loose_but_not_tight_electrons[j]].phi,jets[i].eta,jets[i].phi) < 0.4:
pass_lepton_dr_cut = False
if not pass_lepton_dr_cut:
continue
tight_jets.append(i)
if (len(tight_muons) == 2) and (len(loose_but_not_tight_electrons)+ len(tight_electrons)+len(loose_but_not_tight_muons) == 0):
if len(tight_muons) == 2:
i1 = tight_muons[0]
i2 = tight_muons[1]
elif len(loose_but_not_tight_muons) == 2:
i1 = loose_but_not_tight_muons[0]
i2 = loose_but_not_tight_muons[1]
else:
i1 = tight_muons[0]
i2 = loose_but_not_tight_muons[0]
if muons[i1].charge == muons[i2].charge:
return False
if hasattr(event,'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8'):
if not event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8:
return False
else:
if not event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8:
return False
if muons[i1].pt < 20:
return False
if muons[i2].pt < 20:
return False
if abs(muons[i1].eta) > 2.4:
return False
if abs(muons[i2].eta) > 2.4:
return False
if ((muons[i1].p4() + muons[i2].p4()).M() > 110) or ((muons[i1].p4() + muons[i2].p4()).M() < 70) :
return False
# print "selected meng lu muon event: " + str(event.event) + " " + str(event.luminosityBlock) + " " + str(event.run)
if (len(tight_electrons) == 2) and (len(loose_but_not_tight_muons)+ len(tight_muons)+len(loose_but_not_tight_electrons) == 0):
i1 = tight_electrons[0]
i2 = tight_electrons[1]
if electrons[i1].charge == electrons[i2].charge:
return False
if not event.HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ and not event.HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL:
return False
if electrons[i1].pt < 20:
return False
if electrons[i2].pt < 20:
return False
if abs(electrons[i1].eta) > 2.5:
return False
if abs(electrons[i2].eta) > 2.5:
return False
if ((electrons[i1].p4() + electrons[i2].p4()).M() > 110) or ((electrons[i1].p4() + electrons[i2].p4()).M() < 70) :
return False
# print "selected meng lu electron event: " + str(event.event) + " " + str(event.luminosityBlock) + " " + str(event.run)
if len(tight_jets) < 2:
return False
if len(tight_photons) == 0:
return False
if jets[tight_jets[0]].pt < 30:
return False
if jets[tight_jets[1]].pt < 30:
return False
if abs(jets[tight_jets[0]].eta) > 4.7:
return False
if abs(jets[tight_jets[1]].eta) > 4.7:
return False
if (jets[tight_jets[0]].p4() + jets[tight_jets[1]].p4()).M() < 150:
return False
#if abs(jets[0].p4().Eta() - jets[1].p4().Eta()) < 2.5:
# return False
if photons[tight_photons[0]].pt < 25:
return False
if not ((abs(photons[tight_photons[0]].eta) < 1.4442) or (1.566 < abs(photons[tight_photons[0]].eta) and abs(photons[tight_photons[0]].eta) < 2.5) ):
return False
#if not abs(photons[tight_photons[0]].eta) < 1.4442:
# return False
# if not (photons[tight_photons[0]].cutBasedBitmap & (1 << 1) == (1 << 1)):
# return False
# if photons[tight_photons[0]].pixelSeed:
if not photons[tight_photons[0]].electronVeto:
return False
if len(loose_but_not_tight_muons) + len(loose_but_not_tight_electrons) + len(tight_electrons) + len(tight_muons) > 2:
return False
if len(tight_muons) == 2:
i1 = tight_muons[0]
i2 = tight_muons[1]
if hasattr(event,'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8'):
if not event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8:
return False
else:
if not event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8:
return False
if muons[i1].pt < 20:
return False
if muons[i2].pt < 20:
return False
if abs(muons[i1].eta) > 2.4:
return False
if abs(muons[i2].eta) > 2.4:
return False
# if muons[i1].pfRelIso04_all > 0.15:
# return False
# if muons[i2].pfRelIso04_all > 0.15:
# return False
# if not muons[i1].tightId:
# return False
# if not muons[i2].tightId:
# return False
if muons[i1].charge == muons[i2].charge:
return False
if ((muons[i1].p4() + muons[i2].p4()).M() > 110) or ((muons[i1].p4() + muons[i2].p4()).M() < 70) :
return False
self.out.fillBranch("zep",abs((muons[i1].p4() + muons[i2].p4() + photons[tight_photons[0]].p4()).Eta() - (jets[tight_jets[0]].eta + jets[tight_jets[1]].eta)/2))
self.out.fillBranch("mzg",(muons[i1].p4() + muons[i2].p4() + photons[tight_photons[0]].p4()).M())
self.out.fillBranch("mll",(muons[i1].p4() + muons[i2].p4()).M())
self.out.fillBranch("lepton_pdg_id",13)
self.out.fillBranch("lepton1_pt",muons[i1].pt)
self.out.fillBranch("lepton1_eta",muons[i1].eta)
self.out.fillBranch("lepton1_phi",muons[i1].phi)
self.out.fillBranch("lepton1_iso",muons[i1].pfRelIso04_all)
self.out.fillBranch("lepton1_id",bool(muons[i1].tightId))
self.out.fillBranch("lepton2_pt",muons[i2].pt)
self.out.fillBranch("lepton2_eta",muons[i2].eta)
self.out.fillBranch("lepton2_phi",muons[i2].phi)
self.out.fillBranch("lepton2_iso",muons[i2].pfRelIso04_all)
self.out.fillBranch("lepton2_id",bool(muons[i2].tightId))
elif len(tight_electrons) == 2:
i1 = tight_electrons[0]
i2 = tight_electrons[1]
if not event.HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ and not event.HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL:
return False
if electrons[i1].cutBased < 3:
return False
if electrons[i2].cutBased < 3:
return False
if electrons[i1].pt < 25:
return False
if electrons[i2].pt < 25:
return False
if abs(electrons[i1].eta) > 2.5:
return False
if abs(electrons[i2].eta) > 2.5:
return False
if electrons[i1].charge == electrons[i2].charge:
return False
if ((electrons[i1].p4() + electrons[i2].p4()).M() > 110) or ((electrons[i1].p4() + electrons[i2].p4()).M() < 70) :
return False
self.out.fillBranch("lepton_pdg_id",11)
self.out.fillBranch("lepton_pdg_id",11)
self.out.fillBranch("lepton1_pt",electrons[i1].pt)
self.out.fillBranch("lepton1_eta",electrons[i1].eta)
self.out.fillBranch("lepton1_phi",electrons[i1].phi)
self.out.fillBranch("lepton1_iso",0)
self.out.fillBranch("lepton1_id",0)
self.out.fillBranch("lepton2_pt",electrons[i2].pt)
self.out.fillBranch("lepton2_eta",electrons[i2].eta)
self.out.fillBranch("lepton2_phi",electrons[i2].phi)
self.out.fillBranch("lepton2_iso",0)
self.out.fillBranch("lepton2_id",0)
self.out.fillBranch("zep",abs((electrons[i1].p4() + electrons[i2].p4() + photons[tight_photons[0]].p4()).Eta() - (jets[tight_jets[0]].eta + jets[tight_jets[1]].eta)/2))
self.out.fillBranch("mzg",(electrons[i1].p4() + electrons[i2].p4() + photons[tight_photons[0]].p4()).M())
self.out.fillBranch("mll",(electrons[i1].p4() + electrons[i2].p4()).M())
else:
return False
print "selected event: " + str(event.event) + " " + str(event.luminosityBlock) + " " + str(event.run)
mask1 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 9) | (1 << 11) | (1 << 13)
mask2 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 9) | (1 << 11)
mask3 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 9) | (1 << 13)
mask4 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 7) | (1 << 11) | (1 << 13)
mask5 = (1 << 1) | (1 << 3) | (1 << 5) | (1 << 9) | (1 << 11) | (1 << 13) #invert the medium photon ID with the sigma_ietaieta cut removed
bitmap = photons[tight_photons[0]].vidNestedWPBitmap & mask1
if (bitmap == mask1):
self.out.fillBranch("photon_selection",2)
elif (bitmap == mask5):
self.out.fillBranch("photon_selection",1)
elif (bitmap == mask2) or (bitmap == mask3) or (bitmap == mask4):
self.out.fillBranch("photon_selection",0)
else:
assert(0)
isprompt_mask = (1 << 0) #isPrompt
isdirectprompttaudecayproduct_mask = (1 << 5) #isDirectPromptTauDecayProduct
photon_gen_matching=0
if hasattr(event,'nGenPart'):
for i in range(0,len(genparts)):
if genparts[i].pt > 5 and genparts[i].status == 1 and abs(genparts[i].pdgId) == 13 and ((genparts[i].statusFlags & isprompt_mask == isprompt_mask) or (genparts[i].statusFlags & isdirectprompttaudecayproduct_mask == isdirectprompttaudecayproduct_mask)) and deltaR(photons[tight_photons[0]].eta,photons[tight_photons[0]].phi,genparts[i].eta,genparts[i].phi) < 0.3:
photon_gen_matching += 1 #m -> g
if genparts[i].pt > 5 and genparts[i].status == 1 and abs(genparts[i].pdgId) == 11 and ((genparts[i].statusFlags & isprompt_mask == isprompt_mask) or (genparts[i].statusFlags & isdirectprompttaudecayproduct_mask == isdirectprompttaudecayproduct_mask)) and deltaR(photons[tight_photons[0]].eta,photons[tight_photons[0]].phi,genparts[i].eta,genparts[i].phi) < 0.3:
photon_gen_matching += 2 #e -> g
if genparts[i].pt > 5 and genparts[i].status == 1 and genparts[i].pdgId == 22 and ((genparts[i].statusFlags & isprompt_mask == isprompt_mask) or (genparts[i].statusFlags & isdirectprompttaudecayproduct_mask == isdirectprompttaudecayproduct_mask)) and deltaR(photons[tight_photons[0]].eta,photons[tight_photons[0]].phi,genparts[i].eta,genparts[i].phi) < 0.3:
if genparts[i].genPartIdxMother >= 0 and (abs(genparts[genparts[i].genPartIdxMother].pdgId) == 11 or abs(genparts[genparts[i].genPartIdxMother].pdgId) == 13 or abs(genparts[genparts[i].genPartIdxMother].pdgId) == 15):
photon_gen_matching += 8 #fsr photon
else:
photon_gen_matching += 4 #non-fsr photon
self.out.fillBranch("photon_gen_matching",photon_gen_matching)
self.out.fillBranch("photon_pt",photons[tight_photons[0]].pt)
self.out.fillBranch("photon_eta",photons[tight_photons[0]].eta)
self.out.fillBranch("photon_phi",photons[tight_photons[0]].phi)
self.out.fillBranch("mjj",(jets[tight_jets[0]].p4() + jets[tight_jets[1]].p4()).M())
self.out.fillBranch("detajj",abs(jets[tight_jets[0]].eta - jets[tight_jets[1]].eta))
self.out.fillBranch("event",event.event)
self.out.fillBranch("lumi",event.luminosityBlock)
self.out.fillBranch("run",event.run)
if hasattr(event,'Generator_weight'):
self.out.fillBranch("gen_weight",event.Generator_weight)
else:
self.out.fillBranch("gen_weight",0)
return True
# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed
ewkzgjjModule = lambda : ewkzgjjProducer()
|
[
"amlevin@cern.ch"
] |
amlevin@cern.ch
|
8355f29de4f46b28b142c9e461c978618c11156f
|
fbf97497019a2cf2d5a0d5f60dd7e4a81f1d8bd0
|
/backend/customauth/migrations/0007_auto_20190530_1156.py
|
21e655a3fdbb02aa7ebc858da51e3b44b71cee14
|
[] |
no_license
|
totop716/ugg
|
3f1bc60f4476fd98de369f7ae722ae2b52dec560
|
56ebf281b5950918ef8e85102335f52b0c76fecb
|
refs/heads/master
| 2022-12-10T14:19:35.885870
| 2020-03-23T11:26:29
| 2020-03-23T11:26:29
| 249,412,202
| 0
| 0
| null | 2022-12-08T03:53:19
| 2020-03-23T11:28:17
|
CSS
|
UTF-8
|
Python
| false
| false
| 486
|
py
|
# Generated by Django 2.1.7 on 2019-05-30 11:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customauth', '0006_auto_20190530_1153'),
]
operations = [
migrations.RenameField(
model_name='myuser',
old_name='po_box',
new_name='po_box_unit_number',
),
migrations.RemoveField(
model_name='myuser',
name='unit_number',
),
]
|
[
"totop716@gmail.com"
] |
totop716@gmail.com
|
948a1c43794b17ea0bc5ea2626374c0b268aca32
|
ebaed06f16af7a6bef21495287acebb5af76e3aa
|
/web-crawling/delight_crawler.py
|
125fcc9cc4c2f7dfa097e0eb8bab194ba2e4aa98
|
[] |
no_license
|
cherry979988/info-diff
|
50a4245720283c6d8057cdb096befb8b1c2d3154
|
de3f51187aab235a58512172f7134984bd68fcd1
|
refs/heads/master
| 2020-05-24T21:24:16.148099
| 2019-08-02T18:55:56
| 2019-08-02T18:55:56
| 187,473,982
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 15:16:38 2018
@author: xzzz
"""
import requests
import time
import xlwt
import re
from bs4 import BeautifulSoup
import gc
import read_bid
book_wr = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = book_wr.add_sheet('test',cell_overwrite_ok=True)
row = 0
col = 0
sheet.write(row, col, '用户名')
col += 1
sheet.write(row, col, '用户名id')
col += 1
sheet.write(row, col, '时间')
row += 1
col = 0
book_wr.save(r'attitude.xls')
#pre_url = 'https://weibo.cn/attitude/G41yqqTyi?rl=1&page='
headers = {
'cookie':'SCF=AuilS8C_l_Q4hcWzAntCupw6ySHu1JGSv06YfW1Q01YbXFuKIDQ-AbyRk_q7hteQzIb07nK0gJtceZM9F-G_aB4.; _T_WM=3e357b698836f1d3b5c126c4bf9138fc; SUB=_2A252S4HwDeRhGeBL61cS8S_Nyj-IHXVVty-4rDV6PUJbkdBeLW6kkW1NR06S8mnW27rUMebk6dSAlGJF-6SmrL7N; SUHB=0d4-eYtAjaj-SH',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
count = 61270
#bid = ['G41pGh8LI','G4bQKjSVz','G41oXiQIi','G46KS1owP','G42h8v9mg','G47a33K52','G423pbcaN','G41pS2uDe','G41BfE77C','G49eM0vPN','G4aLIbEzr','G41BppoWc','G41QC4W6p','G3XA1fBTI','G4uBpzOVh','G3WFTtTyN','G49rF0OTB']
#'G4uy99S0t','G426NybQw','G41LbxlOz','G46YHfcM8','G430Nj8J1','G40sQ64zZ','G3WLZseST','G46rojy34','G46zx2DB4','G42kw2N8c',
bid = read_bid.get_bid('content2.xls',248,294)
for blog_name in bid:
pre_url = 'https://weibo.cn/attitude/' + blog_name + '?rl=1&page='
url = pre_url + str(1)
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content, 'html.parser')
page_tag = soup.find('input', attrs={'name':'mp', 'type':'hidden'})
if page_tag is None:
page_number = 1
else:
page_number = int(page_tag.attrs['value'])
del url,r,soup, page_tag
gc.collect()
time.sleep(8)
while count <= page_number:
url = pre_url + str(count)
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content, 'html.parser')
mark_tag = soup.find('div', attrs={'id':'attitude'})
# 如果发生微博官方不予给出数据的情况,强行暂停一分钟之后再对原界面重新进行请求
while mark_tag is None:
del r, soup, mark_tag
gc.collect()
time.sleep(80)
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content, 'html.parser')
mark_tag = soup.find('div', attrs={'id':'attitude'})
d_list = mark_tag.find_all_next('div', attrs={'class':'c'})
d_list_len = len(d_list)
for j in range(d_list_len):
uname = d_list[j].a.text
sheet.write(row, col, uname)
col += 1
result = re.search(r'/(\d+)$',d_list[j].a.attrs['href'])
if not result is None:
sheet.write(row, col, result.group(1))
col += 1
sheet.write(row, col, d_list[j].span.text)
row += 1
col = 0
del uname, result
gc.collect()
book_wr.save(r'attitude.xls')
print('tweet_id '+blog_name+' '+'page '+str(count)+' has done')
del url, r, soup, mark_tag, d_list
count += 1
time.sleep(3)
count = 1
|
[
"yeqy15@mails.tsinghua.edu.cn"
] |
yeqy15@mails.tsinghua.edu.cn
|
845f54b066f9fb8c81d3520f9e1664a0ffc92505
|
364d173ccc8f916b54e2a0adf98ed5d46446a100
|
/Flatbuffer/canv_flt/sequence_details.py
|
e7f8639a375f21f482eb9b1584fa8e7236b807f9
|
[] |
no_license
|
tanveer941/miscellaneous_supporting_app
|
117e73357f4ca11e2e7cf95621daebab162bf33e
|
25c79341573860abf97a803f3e28395159898314
|
refs/heads/master
| 2020-03-27T12:23:54.290534
| 2019-09-11T04:21:39
| 2019-09-11T04:21:39
| 146,544,214
| 0
| 0
| null | 2019-10-29T21:26:35
| 2018-08-29T04:17:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace:
import flatbuffers
class sequence_details(object):
__slots__ = ['_tab']
@classmethod
def GetRootAssequence_details(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = sequence_details()
x.Init(buf, n + offset)
return x
# sequence_details
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# sequence_details
def FolderName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# sequence_details
def FileName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def sequence_detailsStart(builder): builder.StartObject(2)
def sequence_detailsAddFolderName(builder, FolderName): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(FolderName), 0)
def sequence_detailsAddFileName(builder, FileName): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(FileName), 0)
def sequence_detailsEnd(builder): return builder.EndObject()
|
[
"mohammed.tanveer@continental-corporation.com"
] |
mohammed.tanveer@continental-corporation.com
|
a4ca9efbd49fe56401b0c9f2e47ed03de5d2e30e
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/tree-big-3865.py
|
b81595c3efd5b844c317e6d5fbd8ebfbb6201a00
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,291
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:$Type) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
ed9a687c9e312704b2f96bd948983e7c9446acf6
|
73252cbe7b5e636a9b02c981af8489205a48052e
|
/polls/urls.py
|
7304f2dc9325c18cbb8ca7976aab796a467d6338
|
[] |
no_license
|
siddharthisaiah/pollsapi
|
6328168cd62f69408924f1a4377f6e69d56fa94b
|
f778d40e31909431cbfa02c1bbde5df7b529c433
|
refs/heads/master
| 2021-06-29T23:03:48.207782
| 2020-02-11T20:30:16
| 2020-02-11T20:30:16
| 238,857,342
| 0
| 0
| null | 2021-06-10T22:34:00
| 2020-02-07T06:21:46
|
Python
|
UTF-8
|
Python
| false
| false
| 855
|
py
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from .views import polls_list, polls_detail
from .apiviews import ChoiceList, CreateVote #, PollList, PollDetail
from .apiviews import PollViewSet, UserCreate, LoginView
router = DefaultRouter()
router.register('polls', PollViewSet, basename='polls')
urlpatterns = [
# path('polls/', PollViewSet, name='polls_list'),
# path('polls/<int:pk>/', PollViewSet, name='polls_detail'),
path('polls/<int:pk>/choices/', ChoiceList.as_view(), name='polls_choices'),
path('polls/<int:pk>/choices/<int:choice_pk>/vote/',
CreateVote.as_view(), name='create_vote'),
path('users/', UserCreate.as_view(), name='user_create'),
path('login/', views.obtain_auth_token, name='login')
]
urlpatterns += router.urls
|
[
"siddharthisaiah@gmail.com"
] |
siddharthisaiah@gmail.com
|
8c756bd265bcaa35b4e1bd739faefabb429ca07b
|
8371e46c6e96aea5da8bed8b0dcd34a57b144fb1
|
/Python/lab10-unitconvert23.py
|
981fdce8b1fa323fba0165c38258bf2eaea79fd8
|
[] |
no_license
|
PDXChloe/PDXcodeguild
|
4bfcd31072bfd17cb7959f71adfd867ff9e5d9ac
|
23ca3dc245bf51be932790d03e3333d89c462180
|
refs/heads/master
| 2021-08-24T08:13:36.027155
| 2017-12-08T20:30:14
| 2017-12-08T20:30:14
| 105,682,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
#Version 2 + 3
print("") #I just need whitespace
print("")
meter_dic = {"ft":0.3048, "mi":1609.34, "m":1, "km":1000, "yd":0.9144, "in":0.0254} #made a dictionary with key:value pairs of "units to convert":amount in meters
distance = input("What is the distance? \n")
final_unit = input("What are the units (ft, mi, m, km, yd, in)?\n")
units = meter_dic.get(final_unit)
meters = int(distance) * units
print(str(distance) + final_unit + " is " + str(meters)+" meters.")
|
[
"chloe_elliott@me.com"
] |
chloe_elliott@me.com
|
f6b360d0925b08bb26d5b5b7ca588703ea9ab596
|
73de523bde0c9e8398c63a924b44aadc46d11202
|
/test/test_worm_properties.py
|
ffd5d7b3cfcfae272dabbafe279dda6ef75a2c62
|
[
"MIT"
] |
permissive
|
Feyd-Aran/isilon_sdk_python
|
1c2fae306c1a95a99024dd13dc0fc3b120f9c1de
|
24e85a5577d15ac3db06862d07d5a261658c67b7
|
refs/heads/v8.0.0
| 2020-09-23T00:16:36.684270
| 2019-12-02T13:45:12
| 2019-12-02T13:45:12
| 225,351,700
| 0
| 0
|
MIT
| 2019-12-02T10:51:54
| 2019-12-02T10:51:53
| null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.models.worm_properties import WormProperties # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestWormProperties(unittest.TestCase):
"""WormProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWormProperties(self):
"""Test WormProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0.models.worm_properties.WormProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"Brandon.Krueger@isilon.com"
] |
Brandon.Krueger@isilon.com
|
d4b409d45e6630900d2c234c83fa37e1dee5197d
|
de1a4823b44484a16e84229ef6d4c2218cebc240
|
/miscellanous_code.py
|
3a8a3e2cce20abc23d1198a336fee7524f3a433f
|
[
"Apache-2.0"
] |
permissive
|
jaehobang/Eva
|
b5baca7f91e3c6c6d439573430485afdfa3c05e8
|
e7f649990b8bca3bc29b3832c0ecf32efb402647
|
refs/heads/master
| 2020-07-11T13:05:42.284943
| 2020-04-14T21:17:38
| 2020-04-14T21:17:38
| 204,546,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
#### draw multiple figures with plt #####
import numpy as np
import matplotlib.pyplot as plt
w=10
h=10
size = 20
fig=plt.figure(figsize=(size, size))
columns = 4
rows = 5
for i in range(1, columns*rows +1):
img = np.random.randint(10, size=(h,w))
fig.add_subplot(rows, columns, i)
plt.imshow(img)
plt.show()
#### drawing patches
# img: img to draw the patches on
# patches: list of rectangle points to draw
def draw_patches(img, patches, format='ml'):
import cv2
new_img = np.copy(img)
color = (0, 0, 255)
if format == 'cv':
if patches is not None:
for i in range(len(patches)):
cv2.rectangle(new_img, (int(patches[i][0]), int(patches[i][1])), \
(int(patches[i][0] + patches[i][2]), int(patches[i][1] + patches[i][3])), color, 2)
if format == 'ml':
if patches is not None:
for i in range(len(patches)):
cv2.rectangle(new_img, (int(patches[i][0]), int(patches[i][1])), \
(int(patches[i][2]), int(patches[i][3])), color, 2)
return new_img
|
[
"jaehob@andrew.cmu.edu"
] |
jaehob@andrew.cmu.edu
|
3e9cadeb3061c9ecf3ad2073260050420e43eb21
|
a8e435a42361d1ea69f97fd0b77271d94e840e7f
|
/buildoutbuilder.managers/buildoutbuilder/managers/errors.py
|
c745f48a66e5ad60da0ffab00bba8ef202f2cc2f
|
[] |
no_license
|
xkenneth/buildoutbuilder
|
2aab4930ef7d0b297470f52ca4e51369e9987088
|
0c4195f7996e7be8e3edf1ee38863df6caf6aed0
|
refs/heads/master
| 2016-09-06T21:29:30.630016
| 2008-08-19T08:31:19
| 2008-08-19T08:31:19
| 26,232
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
class BuildoutBuilderException(Exception):
def __init__(self, value):
self.message = self.__doc__
self.parameter = value
def __str__(self):
return repr(self.message)+repr(self.parameter)
class PartIsNotRecipe(BuildoutBuilderException):
"""Part does not have a recipe."""
class MissingPart(BuildoutBuilderException):
"""A part is missing."""
|
[
"xkenneth@gmail.com"
] |
xkenneth@gmail.com
|
0f23d8577d052f85a57c7570956f039776aea0c6
|
551dbe47bdeaee1a04c2e4757f7212e73f64790d
|
/costproject/costproject/settings.py
|
b3ce07ebc36e4cf9f1dc07049c4da64c5bd8ba12
|
[] |
no_license
|
ushanthaster/costproject
|
896acd96d85a30c1ed8829382a828f5c22f23d68
|
30ae89cbd432b0debc905c033ff2296e773d752a
|
refs/heads/master
| 2021-01-11T14:01:57.122610
| 2017-06-20T20:51:31
| 2017-06-20T20:51:31
| 94,934,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,683
|
py
|
"""
Django settings for costproject project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6)km=-p%dba6)#1f9fg866fooevxbhd$f^gk96q#%0yic%4#mo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'costapp'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'costproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'costproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'costproject.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'us/central'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
[
"evanhowell@consultant.com"
] |
evanhowell@consultant.com
|
9ea146ac2a09a9f0a3a98d4b6e5b4c9b4bb1bbf7
|
c0534d3104b97572fd8d74c3e6becbf1699b9954
|
/Arrays Left Rotation.py
|
9f45eb4f5a040cc7cc134ddebbc0368cce2f3fd3
|
[] |
no_license
|
VipinJain1/VIP-Algorithms-in-Python
|
a948ac422dcc5f7b33d598f4a19af77bee320a34
|
03b4734aa847f9cc25fa5b3f2393fdee18c6ac81
|
refs/heads/master
| 2020-05-30T07:00:43.402065
| 2019-06-19T00:52:16
| 2019-06-19T00:52:16
| 189,591,268
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
'''
A left rotation operation on an array shifts each of the array's elements unit to the left. For example, if left rotations are performed on array , then the array would become .
Given an array of integers and a number, , perform left rotations on the array. Return the updated array to be printed as a single line of space-separated integers.
Function Description
Complete the function rotLeft in the editor below. It should return the resulting array of integers.
rotLeft has the following parameter(s):
An array of integers .
An integer , the number of rotations.
Input Format
The first line contains two space-separated integers and , the size of and the number of left rotations you must perform.
The second line contains space-separated integers .
Constraints
Output Format
Print a single line of space-separated integers denoting the final state of the array after performing left rotations.
Sample Input
5 4
1 2 3 4 5
Sample Output
5 1 2 3 4
'''
#Solution:
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
newarr = [0] * len(a)
count =0
ln = len(a)
for i in a [d:ln]:
newarr[count] = i
count +=1
for i in a[0:d]:
newarr[count] = i
count +=1
return newarr
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
[
"vipjain@gmail.com"
] |
vipjain@gmail.com
|
5d0da2fb05da5122abf58245cb1e127d006acbc0
|
4e0e96aa663683642bf1b6dd19885a245c5d4129
|
/flowcharts/migrations/0008_auto_20191213_2017.py
|
c1007c055a7255f7a5a274c89055b4b26adf3aa9
|
[] |
no_license
|
atleastzero/XYearDjango
|
fe12707f0cd4d8c272452cc2b004360e3554645d
|
5d25ed268fd6fdba49f7fe8ac0ba08031876e235
|
refs/heads/master
| 2021-09-24T19:13:56.990118
| 2019-12-14T03:52:11
| 2019-12-14T03:52:11
| 222,778,871
| 0
| 0
| null | 2021-09-22T18:07:23
| 2019-11-19T20:00:53
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
# Generated by Django 3.0 on 2019-12-13 20:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flowcharts', '0007_auto_20191213_0625'),
]
operations = [
migrations.AlterField(
model_name='term',
name='end_date',
field=models.DateField(verbose_name='end_date'),
),
migrations.AlterField(
model_name='term',
name='start_date',
field=models.DateField(verbose_name='start_date'),
),
]
|
[
"megan.obryan52@gmail.com"
] |
megan.obryan52@gmail.com
|
b150f199a4268e8ab72d5c9a9ce49b2d6abe73d4
|
698cb8d24879fe75669af6f2667c3f88660a0a1e
|
/deepModel/s11b_ALS_CONCAT.py
|
29627c713b760a2dcfce82233dba73e25b24c24f
|
[] |
no_license
|
HuichuanLI/Recommand-Algorithme
|
c83c5d34d75eebd127e2aef7abc8b7152fc54f96
|
302e14a3f7e5d72ded73b72a538596b6dc1233ff
|
refs/heads/master
| 2023-05-11T03:01:30.940242
| 2023-04-30T08:03:19
| 2023-04-30T08:03:19
| 187,097,782
| 71
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
import numpy as np
from data_set import filepaths as fp
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
import torch
from sklearn.metrics import precision_score, recall_score, accuracy_score
from basic_sim import dataloader
class ALS_MLP(nn.Module):
def __init__(self, n_users, n_items, dim):
super(ALS_MLP, self).__init__()
'''
:param n_users: 用户数量
:param n_items: 物品数量
:param dim: 向量维度
'''
# 随机初始化用户的向量
self.users = nn.Embedding(n_users, dim, max_norm=1)
# 随机初始化物品的向量
self.items = nn.Embedding(n_items, dim, max_norm=1)
# 第一层的输入的维度是向量维度乘以2,因为用户与物品拼接之后的向量维度自然是原来2倍。
self.denseLayer1 = self.dense_layer(dim * 2, dim)
self.denseLayer2 = self.dense_layer(dim, dim // 2)
# 修后一层的输出维度是1,该值经Sigmoid激活后即最为模型输出
self.denseLayer3 = self.dense_layer(dim // 2, 1)
self.sigmoid = nn.Sigmoid()
def dense_layer(self, in_features, out_features):
# 每一个mlp单元包含一个线性层和激活层,当前代码中激活层采取Tanh双曲正切函数。
return nn.Sequential(
nn.Linear(in_features, out_features),
nn.Tanh()
)
def forward(self, u, v, isTrain=True):
'''
:param u: 用户索引id shape:[batch_size]
:param i: 用户索引id shape:[batch_size]
:return: 用户向量与物品向量的内积 shape:[batch_size]
'''
# [batch_size, dim]
u = self.users(u)
v = self.items(v)
# [batch_size, dim*2]
uv = torch.cat([u, v], dim=1)
# [batch_size, dim]
uv = self.denseLayer1(uv)
# [batch_size, dim//2]
uv = self.denseLayer2(uv)
# 训练时采取dropout来防止过拟合
if isTrain: uv = F.dropout(uv)
# [batch_size,1]
uv = self.denseLayer3(uv)
# [batch_size]
uv = torch.squeeze(uv)
logit = self.sigmoid(uv)
return logit
def doEva(net, d):
d = torch.LongTensor(d)
u, i, r = d[:, 0], d[:, 1], d[:, 2]
with torch.no_grad():
out = net(u, i, False)
y_pred = np.array([1 if i >= 0.5 else 0 for i in out])
y_true = r.detach().numpy()
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
acc = accuracy_score(y_true, y_pred)
return p, r, acc
def train(epochs=10, batchSize=1024, lr=0.001, dim=128, eva_per_epochs=1):
'''
:param epochs: 迭代次数
:param batchSize: 一批次的数量
:param lr: 学习率
:param dim: 用户物品向量的维度
:param eva_per_epochs: 设定每几次进行一次验证
'''
# 读取数据
user_set, item_set, train_set, test_set = \
dataloader.readRecData(fp.Ml_100K.RATING, test_ratio=0.1)
# 初始化ALS模型
net = ALS_MLP(len(user_set), len(item_set), dim)
# 定义优化器
optimizer = torch.optim.AdamW(net.parameters(), lr=lr, weight_decay=0.2)
# 定义损失函数
criterion = torch.nn.BCELoss()
# 开始迭代
for e in range(epochs):
all_lose = 0
# 每一批次地读取数据
for u, i, r in DataLoader(train_set, batch_size=batchSize, shuffle=True):
optimizer.zero_grad()
r = torch.FloatTensor(r.detach().numpy())
result = net(u, i)
loss = criterion(result, r)
all_lose += loss
loss.backward()
optimizer.step()
print('epoch {}, avg_loss = {:.4f}'.format(e, all_lose / (len(train_set) // batchSize)))
# 评估模型
if e % eva_per_epochs == 0:
p, r, acc = doEva(net, train_set)
print('train: Precision {:.4f} | Recall {:.4f} | accuracy {:.4f}'.format(p, r, acc))
p, r, acc = doEva(net, test_set)
print('test: Precision {:.4f} | Recall {:.4f} | accuracy {:.4f}'.format(p, r, acc))
if __name__ == '__main__':
train()
|
[
"lhc14124908@163.com"
] |
lhc14124908@163.com
|
0f9b4a02fec70df429500e3582e6a107c7546ee6
|
3a6199c490047fc444be8f94580655b10e6139aa
|
/deg_pln/deg_pln/planner/views.py
|
af66d8304f754117067d3e2444f4c26291e2cd71
|
[
"MIT"
] |
permissive
|
XheonNovus/DegreePlanner
|
cbcc13c3a48e1af8e7a5a323d91ccdb36d159553
|
e8ea17d4a9811767264cded7930642a7b333088f
|
refs/heads/master
| 2020-03-09T00:32:27.640137
| 2018-02-26T02:08:15
| 2018-02-26T02:08:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import loader
from django.urls import reverse
from .models import Class
from django.views import generic
# Create your views here.
class IndexView(generic.ListView):
template_name = 'planner/index.html'
context_object_name = 'classes'
def get_queryset(self):
return Class.objects.all()
class EligibleView(generic.ListView):
template_name = 'planner/eligible.html'
context_object_name = 'eligible_classes'
def get_queryset(self):
elig_class = Class.objects.filter(elig=False) # gets all clases that aren't eligible
elig_class = elig_class.filter(taken=False) # filters all courses that aren't eligeble and haven't been taken yet
for stuff in elig_class: # iterator
req1 = stuff.pre_req1 # gets the data in pre_req1 field
if req1 == '':
stuff.elig = True
stuff.save()
else:
#req2 = stuff.pre_req2
#req3 = stuff.pre_req3
c1 = Class.objects.get(code = req1) # gets the class(object) with the name and checks if that class (the prerequisite) has been taken
#c2 = Class.objects.get(name = req2)
#c3 = Class.objects.get(name = req3)
if c1.taken == True: #if the pre_req has been taken the class is updated to eligible.
stuff.elig = True
stuff.save() # saves the update
return Class.objects.filter(elig=True)
class TakenView(generic.ListView):
template_name = 'planner/taken.html'
context_object_name = 'taken_classes'
def get_queryset(self):
return Class.objects.filter(taken=True)
|
[
"parshad.anil@gmail.com"
] |
parshad.anil@gmail.com
|
8e0dea7c0f27c1eca36d8902f9f557d058dadf2f
|
5cad23c80ce4e22603a14d21eac48fa76517595c
|
/pickle2csv.py
|
3ae65a5e781efef6d6b38547d12bfa3412421a3d
|
[] |
no_license
|
cramraj8/adrc-clustering
|
4f8e63b62be4bbe986679894487d89423fb26a05
|
d033452b9cadd40833e49c297077244383239370
|
refs/heads/master
| 2021-06-13T11:13:14.003846
| 2017-04-06T17:36:46
| 2017-04-06T17:36:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
import csv
import pickle
import json
data = pickle.load(open("WordFluencyMultiTest.pickle", "rb"))
with open("WordFluencyMultiTest.csv", "w") as fh:
w = csv.writer(fh, delimiter='\t')
w.writerow(data[0].keys())
for record in data:
w.writerow(record.values())
|
[
"mohammed@c-ac-19-162.wireless.emory.edu"
] |
mohammed@c-ac-19-162.wireless.emory.edu
|
463cdc82d5cd7cd1180bc5eaf2219bb87377ff45
|
c9ad6ad969de505b3c8471c6f46dfd782a0fb498
|
/0x07-python-test_driven_development/2-matrix_divided.py
|
e9a56cfcebadaa9b6d3ac60f351c118538368d68
|
[] |
no_license
|
enterpreneur369/holbertonschool-higher_level_programming
|
002fd5a19b40c8b1db06b34c4344e307f24c17ac
|
dd7d3f14bf3bacb41e2116d732ced78998a4afcc
|
refs/heads/master
| 2022-06-20T00:57:27.736122
| 2020-05-06T14:26:10
| 2020-05-06T14:26:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
#!/usr/bin/python3
""" Module 2-matrix_mul
Module that contains the function matrix_mul
"""
def matrix_mul(m_a, m_b):
""" Returns a product matrix
Arguments:
m_a (list:int, list:float): First matrix
m_b (list:int, list:float): Second matrix
"""
res = []
row_err = "Each row of the matrix must have the\
same size"
tp_err = "div must be a number"
lt_erra = "m_a must be a list"
lt_errb = "m_b must be a list"
lt2d_erra = "m_a must be a list of lists"
lt2d_errb = "m_b must be a list of lists"
lt_emptya = "m_a can't be empty"
lt_emptyb = "m_a can't be empty"
lte_erra = "m_a should contain only integers or floats"
lte_errb = "m_b should contain only integers or floats"
lte_sizera = "each row of m_a must be of the same size"
lte_sizerb = "each row of m_b must be of the same size"
mul_err = "m_a and m_b can't be multiplied"
if not isinstance(m_a, list):
raise TypeError(lt_erra)
if not isinstance(m_b, list):
raise TypeError(lt_errb)
if m_a[0] is None or not isinstance(m_a[0], list):
raise TypeError(lt2d_erra)
if m_b[0] is None or not isinstance(m_b[0], list):
raise TypeError(lt2d_errb)
if m_a == [] or m_a == [[]]:
raise ValueError(lt_emptya)
if m_b == [] or m_b == [[]]:
raise ValueError(lt_emptyb)
lenr0, lenc0 = len(m_a), len(m_a[0])
i, j = 0, 0
typ = None
for i in range(lenr0):
for j in range(lenc0):
if i == 0 and j == 0:
if isinstance(m_a[i][j], int):
typ = int
elif isinstance(m_a[i][j], float):
typ = float
else:
raise TypeError(lte_erra)
else:
if isinstance(m_a[i][j], typ):
continue
else:
raise TypeError(lte_erra)
lenr0, lenc0 = len(m_b), len(m_b[0])
i, j = 0, 0
typ = None
for i in range(lenr0):
for j in range(lenc0):
if i == 0 and j == 0:
if isinstance(m_b[i][j], int):
typ = int
elif isinstance(m_b[i][j], float):
typ = float
else:
raise TypeError(lte_erra)
else:
if isinstance(m_b[i][j], typ):
continue
else:
raise TypeError(lte_errb)
lenr0, lenc0 = len(m_a), len(m_a[0])
n = lenr0
i, j, cs = 0, 0, 0
for i in range(lenr0):
for j in range(lenc0):
if len(m_a[i]) != lenc0:
raise TypeError(lte_sizera)
lenr0, lenc0 = len(m_b), len(m_b[0])
p = lenc0
i, j, cs = 0, 0, 0
for i in range(lenr0):
for j in range(lenc0):
if len(m_b[i]) != lenc0:
raise TypeError(lte_sizerb)
lenr0, lenc0 = len(m_b), len(m_b[0])
i, k, cs = 0, 0, 0
for i in range(n):
row = []
cs = 0
for k in range(p):
try:
cs += m_a[i][k] * m_b[k][j]
row.append(cs)
except ValueError:
raise ValueError(mul_err)
res.append(row)
return (res)
|
[
"jose.calderon@holbertonschool.com"
] |
jose.calderon@holbertonschool.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.