blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
55d9f6399b04b0ce86ed81f5a3b8c384235039dd
02be54b8a9ab6813274ae18feb428d0d1405b0b0
/products/widgets.py
85945a02257c7670ba7e55d443f81a90674423f4
[]
no_license
paulloy/msp4-brazen-mma
8ee98e2f2b12a4a24b14d30c1456233b02457dcd
33a430d6c81bb44525469bcee1beaee7c92b3f58
refs/heads/master
2023-03-27T13:29:12.825997
2021-03-31T03:30:46
2021-03-31T03:30:46
341,317,874
1
1
null
null
null
null
UTF-8
Python
false
false
362
py
from django.forms.widgets import ClearableFileInput from django.utils.translation import gettext_lazy as _ class CustomClearableFileInput(ClearableFileInput): clear_checkbox_label = ('Remove') initial_text = ('Current Image') input_text = _('') template_name = 'products/custom_widget_templates' + \ '/custom_clearable_file_input.html'
[ "paulloy020896@gmail.com" ]
paulloy020896@gmail.com
88919fdc89c292d3a1afd47cf1b67122a3121bdf
4957dd5d3583552f1ebc06f8125c10753383e81c
/protocols/Plate_Cells_Trough/Plate_Cells_Trough_2017-10-25.ot1.py
70480af1f758312124889a18e41a44ccae577152
[]
no_license
glebkuznetsov/Protocols
4072eb4bb1bf18e9324fa3bada953b0655db2f4d
0e3e9b03f1616c695f9e8e843dc5f9e3a59d1619
refs/heads/master
2020-03-27T05:43:11.285730
2018-05-23T21:33:58
2018-05-23T21:33:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,618
py
from opentrons import containers, instruments trough = containers.load('trough-12row', 'A1') trash = containers.load('trash-box', 'B1') tiprack = containers.load('tiprack-200ul', 'C1') plate1 = containers.load('96-deep-well', 'D1') plate2 = containers.load('96-deep-well', 'E1') plate3 = containers.load('96-deep-well', 'A2') plate4 = containers.load('96-deep-well', 'B2') plate5 = containers.load('96-deep-well', 'C2') plate6 = containers.load('96-deep-well', 'D2') plate7 = containers.load('96-deep-well', 'E2') all_plates = [plate1, plate2, plate3, plate4, plate5, plate6, plate7] p50multi = instruments.Pipette( axis='a', name='p50', max_volume=50, min_volume=5, channels=8, tip_racks=[tiprack], trash_container=trash) media = trough.wells(0, length=6) cells = trough.wells('A8') def run_custom_protocol(number_of_plates: int=7): plates = all_plates[0:number_of_plates] tube_vol = 20000 media_vol = 80 media_vol_per_plate = 96*media_vol media_tube = 0 cell_vol = 20 p50multi.pick_up_tip() for plate in plates: tube_vol = tube_vol - 7680 if tube_vol <= media_vol_per_plate: tube_vol = 20000 media_tube += 1 p50multi.distribute( media_vol, media[media_tube], plate.rows(), new_tip='never') p50multi.drop_tip() p50multi.pick_up_tip() p50multi.mix(5, p50multi.max_volume) for plate in plates: p50multi.distribute( cell_vol, cells, plate.rows(), new_tip='never') p50multi.drop_tip()
[ "laura@opentrons.com" ]
laura@opentrons.com
6346b5baf6d9b590f6b07edcd2f1fef680b4fe6f
32263e1e50b90ce7049661f85f515ae8b3f11120
/answer4.py
1ee21b24c5a4b68915a24f9f4b3a7c5b16233a2a
[]
no_license
harshitahluwalia7895/Thread_and_Processes
7e926f211c75d5d700be28b29da77fb5e1b48182
3e7ad17b7ea2056b124e5bfd082f2ab39ce09552
refs/heads/master
2020-03-19T06:44:48.014423
2018-06-04T16:18:27
2018-06-04T16:18:27
136,051,164
0
0
null
null
null
null
UTF-8
Python
false
false
232
py
from threading import * import time import math class abc(Thread): def run(self): m = int(input('Enter the Number to be Factorial:')) print('The factorial of {} is '.format(m),math.factorial(m)) t=abc() t.start()
[ "harshit.ahluwalia7895889924@gmail.com" ]
harshit.ahluwalia7895889924@gmail.com
d510b163bd7b37d3020df5d1bb6e1dca90b0d1ce
a879480e8ecff79be624df9d6f67d49c90448672
/src/webjob/log_history_manager.py
9f707cf56282b0a3f5e56c856d3672d1066cd71b
[]
no_license
fortesinformatica/SIGA
49299696e5de14a1e4fa8e31ef5d8529e2711ded
2b2473c7880becf4f47ea36925f09660379e5eb8
refs/heads/master
2022-07-27T16:56:10.499679
2019-06-17T13:22:18
2019-06-17T13:22:18
192,342,983
1
1
null
2021-03-25T22:41:08
2019-06-17T12:30:18
Python
UTF-8
Python
false
false
815
py
""" LogHistoryManager ===== Prover 1. Gerenciamento da quantidade de log mantidos no historico 2. Remoção dos log mais antigos Como usar Adicione este script como um WebJob Triggered no Azure App Service "siga-api" com a seguinte expressão CRON: 0 0 0 * * * """ import os import sys sys.path.append(os.getenv("APP_ROOT")) sys.path.append(os.getenv("APP_SITEPACKAGES")) days_ago = int(os.getenv("PERIOD_LOG_IN_DAYS")) log_directory = os.getenv("APP_LOG") if os.path.exists(log_directory): files = os.listdir(log_directory) files.sort() number_of_files = len(files) if number_of_files > days_ago: excess = number_of_files - days_ago to_remove = files[:excess] for file in to_remove: os.remove(log_directory + file) print(files)
[ "ronaldox2@gmail.com" ]
ronaldox2@gmail.com
29faaabb15279e3864e8a6f022d909da552617c5
edab5b4487eb5619d717acd64a988930ba069b4a
/src/pool/colors.py
8304ae2b4540b5142d4580d8f6ef6aa2acb40fe2
[]
no_license
18-500-b9/pool-simulator
e7a9313a567834797dfe5e0796796c202bc86650
e2efd5a9b8d70cebf8cb670fc8a587851a909e14
refs/heads/master
2020-04-21T03:04:41.149379
2019-03-21T13:55:38
2019-03-21T13:55:38
169,274,104
0
0
null
null
null
null
UTF-8
Python
false
false
405
py
""" RGB color values for PyGame """ AQUA = (0, 255, 255) BLACK = (0, 0, 0) BLUE = (0, 0, 255) FUCHSIA = (255, 0, 255) GRAY = (128, 128, 128) GREEN = (0, 128, 0) LIME = (0, 255, 0) MAROON = (128, 0, 0) NAVY_BLUE = (0, 0, 128) OLIVE = (128, 128, 0) PURPLE = (128, 0, 128) RED = (255, 0, 0) SILVER = (192, 192, 192) TEAL = (0, 128, 128) WHITE = (255, 255, 255) YELLOW = (255, 255, 0) ORANGE = (255, 165, 0)
[ "samuelkim523@gmail.com" ]
samuelkim523@gmail.com
ecc82156cf415769d849cd147b8a00a30b5f94b0
28cb13c23d37ea87d57eae173d85692df8cf4a87
/lab9_i_10/backend/todos/migrations/0001_initial.py
a8df615cf957eda7058e18ec8573d20d050894da
[]
no_license
jeremi420/aplikacje-internetowe-21686-185ic
e25ad1912a53242be5e7e7d46d6192bc59de9578
a08c937fadcbe9986b4bb2c5f0a3a768ee6d7e96
refs/heads/main
2023-03-07T10:18:24.601142
2021-02-16T23:44:40
2021-02-16T23:44:40
311,286,151
0
0
null
null
null
null
UTF-8
Python
false
false
605
py
# Generated by Django 3.1.5 on 2021-01-17 19:46 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Todo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=120)), ('description', models.TextField()), ('completed', models.BooleanField(default=False)), ], ), ]
[ "jerwier420@gmail.com" ]
jerwier420@gmail.com
13dd0603c64d79f959ef65d4b02c4e97751e6004
19a6255cd379a3bd446f022e408e6cc46f32746b
/venv/Scripts/easy_install-script.py
c8194e216a42d8c1f63e1cc8dc562e7f78aea851
[]
no_license
Li-Rui-QI/practice
6a78826d254f618e5b40ef85670d6f00aa3ea7e1
3830e4b3f6c328f486c5c2f82f31f9038b0acc55
refs/heads/master
2022-11-19T23:56:34.633858
2020-07-22T18:02:11
2020-07-22T18:02:11
279,687,293
0
0
null
null
null
null
UTF-8
Python
false
false
440
py
#!C:\liruiqi\college3\practice\venv\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install' __requires__ = 'setuptools==40.8.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')() )
[ "li995186428@gmail.com" ]
li995186428@gmail.com
c8ad214213403b0a248e5896f4ef5208e2cff18b
bb0ba7523f3ea5adf18acb5fe82bdfdc7423c9c9
/Final_App/basic_calculation.py
c96bae878c1babe3fdb118535ca1a27c3ef8899b
[]
no_license
MairisLV/Final_Project
a899ea363301198088da7b8c90d9cca3c12b03aa
d94c6753650665764e518cd5196404206e90ee18
refs/heads/main
2023-04-28T07:21:12.423801
2021-05-11T20:09:12
2021-05-11T20:09:12
366,501,594
0
0
null
null
null
null
UTF-8
Python
false
false
488
py
class Calculate_Loan(): def __init__(self, loan_amount, term, interest_rate): self.loan_amount = loan_amount self.term = term self.interest_rate = interest_rate def interest(self): # S=K(1+i)^n for period in range(int(self.term)): total = float(self.loan_amount) * float((1+float(self.interest_rate))**float(period+1)) total = total - int(self.loan_amount) return total
[ "mairis.baumanis@gmail.com" ]
mairis.baumanis@gmail.com
3dc58c5a41c2665912c4b9f79f021b245d79b4cb
c76585f0e1a8cc35018bb11c6c01733fa2226df3
/predictor-service/settings.py
b65c225a235d823d45160bfc596d9bff87c25056
[ "MIT", "Apache-2.0" ]
permissive
akvelon/Bitcoin-Transaction-Optimization
9e8099b3ae64612bbaf95603e470763b22da9c5e
d5a9eb4322370b81721023daf8dd0eef0d3189a8
refs/heads/master
2023-06-08T18:28:03.464518
2023-05-29T16:10:41
2023-05-29T16:10:41
173,724,819
8
1
Apache-2.0
2022-11-21T22:18:54
2019-03-04T10:35:27
Jupyter Notebook
UTF-8
Python
false
false
949
py
""" Copyright 2019 Akvelon Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json class Settings: # Settings keys MODEL_PATH = 'model_path' SCALER_PATH = 'scaler_path' # Other constants CONFIG_PATH = 'config/config.json' def __init__(self): with open(Settings.CONFIG_PATH) as f: self.settings = json.load(f) def __getitem__(self, key): return self.settings.get(key, '')
[ "sergey.kubasov@akvelon.com" ]
sergey.kubasov@akvelon.com
7d32ccc1b8c31ca35bf202a233e4f3f95ec0f4cc
23239bbcca2fe6dc2b14688063b7f75abdc0e29d
/QuoteEngine/TextIngestor.py
46fbd256e017acec8e388625f83aafe6be85321f
[]
no_license
luke-fs/udacity_meme_generator
1d110aedbfbb203f9a071bc28daa51b576a64fc6
30c9cc5c9ea8fa5cb76a78dc66a651e2ef231712
refs/heads/main
2023-03-30T04:27:14.174334
2021-04-04T14:41:10
2021-04-04T14:41:10
353,123,922
0
0
null
2021-04-04T14:41:10
2021-03-30T19:51:19
HTML
UTF-8
Python
false
false
872
py
from typing import List import subprocess import os from .IngestorInterface import IngestorInterface from .QuoteModel import QuoteModel class TextIngestor(IngestorInterface): """The TXT Ingestor Class, to ingestor TXT quotes Args: IngestorInterface: This class inherits from Interface """ allowed_extensions = ['txt'] @classmethod def parse(cls, path: str) -> List[QuoteModel]: if not cls.can_ingest(path): raise Exception('cannot ingest exception') file_ref = open(path, "r") quotes = [] for line in file_ref.readlines(): line = line.strip('\n\r').strip() if len(line) > 0: parse = line.split(' - ') new_quote = QuoteModel(parse[0], parse[1]) quotes.append(new_quote) file_ref.close() return quotes
[ "luke.strauer1@googlemail.com" ]
luke.strauer1@googlemail.com
293fa724238f069bbc6baeb29ccc88ff5522b35d
68aa8afaae7429ea8e1760e4483ddc4cde90fe45
/benchmark/addressbook.proto.py
7311a99f5cdc479b9b5d120dfae92406b1e32d0e
[ "BSD-2-Clause" ]
permissive
amluto/capnpc-python-cpp
0329e9eb99f9283a023d0bc9c911fec2696e88a6
2fe61781f512d4f7ae0bdc5c0ece8672b24274f5
refs/heads/master
2021-01-18T10:01:48.233946
2013-08-28T21:46:53
2013-08-28T21:46:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,342
py
from __future__ import print_function import addressbook_pb2 as addressbook import os print = lambda *x: x def writeAddressBook(fd): addressBook = addressbook.AddressBook() alice = addressBook.person.add() alice.id = 123 alice.name = 'Alice' alice.email = 'alice@example.com' alicePhones = [alice.phone.add()] alicePhones[0].number = "555-1212" alicePhones[0].type = addressbook.Person.MOBILE bob = addressBook.person.add() bob.id = 456 bob.name = 'Bob' bob.email = 'bob@example.com' bobPhones = [bob.phone.add(), bob.phone.add()] bobPhones[0].number = "555-4567" bobPhones[0].type = addressbook.Person.HOME bobPhones[1].number = "555-7654" bobPhones[1].type = addressbook.Person.WORK message_string = addressBook.SerializeToString() fd.write(message_string) def printAddressBook(fd): addressBook = addressbook.AddressBook() addressBook.ParseFromString(fd.read()) for person in addressBook.person: print(person.name, ':', person.email) for phone in person.phone: print(phone.type, ':', phone.number) print() if __name__ == '__main__': for i in range(10000): f = open('example', 'w') writeAddressBook(f) f = open('example', 'r') printAddressBook(f) os.remove('example')
[ "github@jparyani.com" ]
github@jparyani.com
09c0fefdd010970f39b250148bf0b0160b5f65a1
a00fdfc743262d3d9253bab1f2e8b10f99f013ee
/Bambu/bambuToNero.py
88f058034181c1d5bdb4ff97c5bcf43358b2fc8b
[]
no_license
pdoming/NeroProducer
2a97101002c626d7f23f3c80e1abfaacc5c81968
8082361fa0a05c83cc6c6aacb1bdd5de24f65115
refs/heads/master
2021-01-15T17:35:58.814592
2015-07-25T16:50:39
2015-07-25T16:50:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,400
py
from MitAna.TreeMod.bambu import mithep, analysis import os mitdata = os.environ['MIT_DATA'] from MitPhysics.Mods.GoodPVFilterMod import goodPVFilterMod from MitPhysics.Mods.JetCorrectionMod import jetCorrectionMod from MitPhysics.Mods.JetIdMod import jetIdMod from MitPhysics.Mods.MetCorrectionMod import metCorrectionMod from MitPhysics.Mods.PFTauIdMod import pfTauIdMod pfTauIdMod.AddCutDiscriminator(mithep.PFTau.kDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits, 5., False) from MitPhysics.Mods.ElectronIdMod import electronIdMod from MitPhysics.Mods.MuonIdMod import muonIdMod from MitPhysics.Mods.PhotonIdMod import photonIdMod from MitPhysics.Mods.SeparatePileUpMod import separatePileUpMod generatorMod = mithep.GeneratorMod( IsData = False, CopyArrays = False, MCMETName = "GenMet" ) electronTightId = electronIdMod.clone('ElectronTightId', IsFilterMode = False, InputName = electronIdMod.GetOutputName(), OutputName = 'TightElectronId', IdType = mithep.ElectronTools.kPhys14Tight, IsoType = mithep.ElectronTools.kPhys14TightIso ) muonTightId = muonIdMod.clone('MuonTightId', IsFilterMode = False, InputName = muonIdMod.GetOutputName(), OutputName = 'TightMuonId', IdType = mithep.MuonTools.kMuonPOG2012CutBasedIdTight, IsoType = mithep.MuonTools.kPFIsoBetaPUCorrected ) muonTightIdMask = mithep.MaskCollectionMod('TightMuons', InputName = muonIdMod.GetOutputName(), MaskName = muonTightId.GetOutputName(), OutputName = 'TightMuons' ) fatJetCorrectionMod = mithep.JetCorrectionMod('FatJetCorrection', InputName = 'AKt8PFJetsCHS', CorrectedJetsName = 'CorrectedFatJets', RhoAlgo = mithep.PileupEnergyDensity.kFixedGridFastjetAll ) if analysis.isRealData: fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L1FastJet_AK8PFchs.txt") fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L2Relative_AK8PFchs.txt") fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L3Absolute_AK8PFchs.txt") fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L2L3Residual_AK8PFchs.txt") else: fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L1FastJet_AK8PFchs.txt") fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L2Relative_AK8PFchs.txt") fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L3Absolute_AK8PFchs.txt") fatJetIdMod = jetIdMod.clone('FatJetId', InputName = fatJetCorrectionMod.GetOutputName(), OutputName = 'GoodFatJets', MVATrainingSet = mithep.JetIDMVA.nMVATypes ) photonMediumId = photonIdMod.clone('PhotonMediumId', IsFilterMode = False, InputName = photonIdMod.GetOutputName(), OutputName = 'PhotonMediumId', IdType = mithep.PhotonTools.kPhys14Medium, IsoType = mithep.PhotonTools.kPhys14Medium ) photonTightId = photonMediumId.clone('PhotonTightId', OutputName = 'PhotonTightId', IdType = mithep.PhotonTools.kPhys14Tight, IsoType = mithep.PhotonTools.kPhys14Tight ) head = 'HEAD' tag = 'BAMBU_041' fillers = [] fillers.append(mithep.nero.EventFiller( RhoAlgo = mithep.PileupEnergyDensity.kFixedGridFastjetAll )) fillers.append(mithep.nero.VertexFiller( VerticesName = goodPVFilterMod.GetOutputName() )) fillers.append(mithep.nero.JetsFiller( JetsName = jetIdMod.GetOutputName(), VerticesName = goodPVFilterMod.GetOutputName(), JetIDMVA = jetIdMod.GetJetIDMVA() )) fillers.append(mithep.nero.TausFiller( TausName = pfTauIdMod.GetOutputName() )) fillers.append(mithep.nero.LeptonsFiller( ElectronsName = electronIdMod.GetOutputName(), MuonsName = muonIdMod.GetOutputName(), ElectronIdsName = electronTightId.GetOutputName(), MuonIdsName = muonTightId.GetOutputName(), VerticesName = goodPVFilterMod.GetOutputName(), PFCandsName = mithep.Names.gkPFCandidatesBrn, NoPUPFCandsName = separatePileUpMod.GetPFNoPileUpName(), PUPFCandsName = separatePileUpMod.GetPFPileUpName() )) fillers.append(mithep.nero.FatJetsFiller( FatJetsName = fatJetIdMod.GetOutputName() )) fillers.append(mithep.nero.MetFiller( MetName = metCorrectionMod.GetOutputName(), MuonsName = muonTightIdMask.GetOutputName(), GenMetName = generatorMod.GetMCMETName() )) fillers.append(mithep.nero.PhotonsFiller( PhotonsName = photonIdMod.GetOutputName(), MediumIdName = photonMediumId.GetOutputName(), TightIdName = photonTightId.GetOutputName(), VerticesName = goodPVFilterMod.GetOutputName() )) fillers.append(mithep.nero.MonteCarloFiller()) fillers.append(mithep.nero.TriggerFiller()) fillers.append(mithep.nero.AllFiller()) neroMod = mithep.NeroMod( Info = 'Nero', Head = head, Tag = tag, FileName = 'nero.root', PrintLevel = 0 ) for filler in fillers: neroMod.AddFiller(filler) sequence = goodPVFilterMod if not analysis.isRealData: sequence *= generatorMod sequence *= separatePileUpMod * \ jetCorrectionMod * \ jetIdMod * \ metCorrectionMod * \ pfTauIdMod * \ electronIdMod * \ muonIdMod * \ photonIdMod * \ electronTightId * \ muonTightId * \ muonTightIdMask * \ fatJetCorrectionMod * \ fatJetIdMod * \ photonMediumId * \ photonTightId * \ neroMod analysis.SetAllowNoHLTTree(True) analysis.setSequence(sequence)
[ "yiiyama@mit.edu" ]
yiiyama@mit.edu
f75b7cea7eec79e082a31dcc921f3ddc5b829510
4774250f47b717f78f5be54b152bfc0fd46e49b3
/Planet Class.py
f476aedd5a763a55a0d0799dcaaaf07717eb9f63
[]
no_license
LuckySalmon/SolarSystemSimulation
dfb6641aff8127a87260565eed4a1994cf46d494
7c3917674ad2d5b70c2d768e7e6a98a2a140a9f7
refs/heads/master
2022-08-12T18:23:53.437959
2020-05-02T23:21:43
2020-05-02T23:21:43
260,792,816
0
0
null
null
null
null
UTF-8
Python
false
false
2,133
py
import random, math # SBC is the Stefan Boltzmann constant, used in calculating luminosity. SBC = 5.670367 * (10 ** -8) # g is the gravitational constant g=6.674 * (10 ** -11) # Name can be changed later. # Velocity will later be changed to only allow stable orbits. # Habitable will later be changed to depend on the star's habitable zone. class Planet: def __init__(self, sun, name, distance): self.name = name #self.distanceFromStar = float(random.random()*10) # measured in AU self.distanceFromStar = distance self.velocity = float(random.random()*10) # measured in years self.mass = float(random.random()*10) # measured in Earths self.radius = float(random.random()*10) # measured in Earths self.volume = ((4/3)*math.pi*(self.radius**3)) self.density = (self.mass)/self.volume if self.distanceFromStar > star.habitableZoneInner and self.distanceFromStar < star.habitableZoneOuter: isHabitable = True else: self.isHabitable = False self.gravity = "placeholder" # input the gravity calculation self.sun = sun class Star: def __init__(self): self.name = "Sun" self.radius = float(random.randint(10000, 7000000)) # measured in km self.temperature = random.randint(2000, 27000) # measured in kelvin if self.temperature <= 3500: self.color = "Red" elif self.temperature > 3500 and self.temperature <= 5000: self.color = "Orange" elif self.temperature > 5000 and self.temperature <= 8000: self.color = "Yellow" elif self.temperature > 8000 and self.temperature <= 17500: self.color = "Yellow" else: self.color = "Blue" # luminosity is used in calculating the habitable zone of a star self.surfaceArea = 4*math.pi*self.radius ** 2 self.luminosity = SBC*self.surfaceArea*(self.temperature ** 4) # THIS IS INCORRECT self.habitableZoneInner = math.sqrt(self.luminosity)*0.95 self.habitableZoneOuter = math.sqrt(self.luminosity)*1.37
[ "LuckySalmon@users.noreply.github.com" ]
LuckySalmon@users.noreply.github.com
c383a0ab8b68f0818c9f72c933f6d128dad4b8a6
3de707e3e7f3fcbf46700e1bf8d6c394a71410a2
/augpathlib/remotes.py
bc67fd0ba4611b6910d60347b5944246e289d464
[ "MIT" ]
permissive
tmsincomb/augpathlib
984f1c8418e7e8eaa5675a3c209cbd745cdee3e7
ed9c0edff540741fca866780a3d043a3b7644f08
refs/heads/master
2022-11-17T12:03:13.852433
2020-06-29T10:10:23
2020-06-29T10:10:23
276,260,552
0
0
MIT
2020-07-01T02:47:31
2020-07-01T02:47:30
null
UTF-8
Python
false
false
33,457
py
import os import sys import atexit import pathlib import warnings import subprocess from augpathlib import exceptions as exc from augpathlib.meta import PathMeta from augpathlib import caches, LocalPath from augpathlib.utils import _bind_sysid_, StatResult, cypher_command_lookup, log if os.name != 'nt': # pexpect on windows does not support pxssh # because it is missing spawn from pexpect import pxssh class RemotePath: """ Remote data about a remote object. """ _cache_class = None _debug = False # ugh this is such a bad implementation, let the remote paths exists # and init, and then just check if they exist, a path is not an object # that always dereferences ... what the heck was I thinking when I did this ... # we use a PurePath becuase we still want to key off this being local path # but we don't want any of the local file system operations to work by accident # so for example self.stat should return the remote value not the local value # which is what would happen if we used a PosixPath as the base class # need a way to pass the session information in independent of the actual path # abstractly having remote.data(global_id_for_local, self) # should be more than enough, the path object shouldn't need # to know that it has a remote id, the remote manager should # know that @classmethod def _new(cls, local_class, cache_class): """ when constructing a new remote using _new you MUST call init afterward to bind the remote api """ # FIXME 1:1ness issue from local -> cache # probably best to force the type of the cache # to switch if there are multiple remote mappings # since there can be only 1 local file with the same # path, a composite cache or a multi-remote cache # seems a bit saner, or require explicit switching of # the active remote if one-at-a-time semantics are desired newcls = type(cls.__name__, (cls,), dict(_local_class=local_class, _cache_class=cache_class)) local_class._remote_class = newcls local_class._cache_class = cache_class cache_class._remote_class = newcls cache_class._local_class = local_class newcls.weighAnchor() cache_class.weighAnchor() return newcls @classmethod def init(cls, identifier): """ initialize the api from an identifier and bind the root """ if not hasattr(cls, '_api'): cls._api = cls._api_class(identifier) cls.root = cls._api.root else: raise ValueError(f'{cls} already bound an api to {cls._api}') @classmethod def anchorToCache(cls, cache_anchor, init=True): # FIXME need to check for anchor after init and init after anchor if not hasattr(cls, '_cache_anchor'): if init: if not hasattr(cls, '_api'): cls.init(cache_anchor.id) if hasattr(cls, 'root') and cls.root != cache_anchor.id: raise ValueError('root and anchor ids do not match! ' f'{cls.root} != {cache_anchor.id}') cls._cache_anchor = cache_anchor return cls._cache_anchor else: raise ValueError(f'already anchored to {cls._cache_anchor}') @classmethod def anchorTo(cls, path, create=False): """ You already know the rock you want and you want the anchor stuck to it. """ # FIXME should we fail on create=True and exists? if isinstance(path, caches.CachePath): # FIXME the non-existence problem rears its head again return cls.anchorToCache(path) elif isinstance(path, LocalPath): # FIXME the non-existence problem rears its head again if path.cache: return cls.anchorToCache(path.cache) else: root = cls.root if isinstance(cls.root, cls) else cls(cls.root) if path.name != root.name: # unlike git you cannot clone to a folder with a different # name (for now ... maybe can figure out how in the future) raise ValueError('Path name and root name do not match.' f'{path.name} != {cls.root.name}') if create: return cls.dropAnchor(path.parent) # existing folder dealt with in dropAnchor else: raise ValueError(f'not creating {path} since create=False') else: raise TypeError(f"Don't know how to anchor to a {type(path)} {path}") @classmethod def _get_local_root_path(cls, parent_path=None): if parent_path is None: parent_path = cls._local_class.cwd() else: parent_path = cls._local_class(parent_path) root = cls(cls.root) # FIXME formalize the use of root path = parent_path / root.name return root, path @classmethod def smartAnchor(cls, parent_path=None): # work around the suspect logic # in the implementation below try: return cls.dropAnchor(parent_path=parent_path) except exc.RemoteAlreadyAnchoredError as e: root, path = cls._get_local_root_path(parent_path) if cls._cache_anchor == path.cache: return cls._cache_anchor else: raise e # possibly check if the anchor is the same? except exc.CacheExistsError as e: root, path = cls._get_local_root_path(parent_path) cls._cache_anchor = path.cache return cls._cache_anchor except exc.DirectoryNotEmptyError as e: root, path = cls._get_local_root_path(parent_path) if path.cache: cls._cache_anchor = path.cache return cls._cache_anchor else: raise e @classmethod def dropAnchor(cls, parent_path=None): """ If a _cache_anchor does not exist then create it, otherwise raise an error. If a local anchor already exists do not use this method. You know that the ship (path) is more or less in the right place but you don't know for sure exactly which rock the anchor will catch on (you don't know the name of the remote). """ if not hasattr(cls, '_cache_anchor'): root, path = cls._get_local_root_path(parent_path) if not path.exists(): if root.is_file(): raise NotImplementedError( 'Have not implemented mapping for individual files yet.') elif root.is_dir(): path.mkdir() else: raise NotImplementedError(f'What\'s a {root}?!') elif list(path.children): raise exc.DirectoryNotEmptyError(f'has children {path}') cls._cache_anchor = path.cache_init(root.id, anchor=True) # we explicitly do not handle the possible CacheExistsError here # so that there is a path where anchoring can fail loudly # we may not need that at the end of the day, but we will see return cls._cache_anchor else: raise exc.RemoteAlreadyAnchoredError(f'{cls} already anchored to ' f'{cls._cache_anchor}') @classmethod def weighAnchor(cls): # TODO determine whether the current behavior is correct # calling this will not cause the cache class to weigh anchor # but there is a small chance that it should # TODO is _abstract_class needed here? or do we not need it # because remote paths don't have the crazy hierarchy that # pathlib derived paths do? and will this change when we fix # everything ... if hasattr(cls, '_cache_anchor'): delattr(cls, '_cache_anchor') @classmethod def setup(cls, local_class, cache_class): """ call this once to bind everything together """ cn = self.__class__.__name__ warnings.warn(f'{cn}.setup is deprecated please switch to RemotePath._new', DeprecationWarning, stacklevel=2) cache_class.setup(local_class, cls) def bootstrap(self, recursive=False, only=tuple(), skip=tuple(), sparse=tuple()): #self.cache.remote = self # duh # if you forget to tell the cache you exist of course it will go to # the internet to look for you, it isn't quite smart enough and # we're trying not to throw dicts around willy nilly here ... return self.cache.bootstrap(self.meta, recursive=recursive, only=only, skip=skip, sparse=sparse) def __init__(self, thing_with_id, cache=None): if isinstance(thing_with_id, str): id = thing_with_id elif isinstance(thing_with_id, PathMeta): id = thing_with_id.id elif isinstance(thing_with_id, RemotePath): id = thing_with_id.id else: raise TypeError(f'Don\'t know how to initialize a remote from {thing_with_id}') self._id = id if cache is not None: self._cache = cache self.cache._remote = self self._errors = [] @property def id(self): return self._id @property def errors(self): raise NotImplementedError @property def cache(self): if hasattr(self, '_cache_anchor') and self._cache_anchor is not None: return self._cache else: # cache is not real class NullCache: @property def local(self, remote=self): raise TypeError(f'No cache for {remote}') @property def _are_we_there_yet(self, remote=self): # this is useless since these classes are ephemoral if hasattr(remote, '_cache_anchor') and remote._cache_anchor is not None: remote.cache_init() def __rtruediv__(self, other): return None def __truediv__(self, other): return None return NullCache() def cache_init(self, parents=False): try: return self._cache_anchor / self except FileNotFoundError: if parents: #parent, *rest = self.parent.cache_init(parents=parents) #return (self.cache_init(), parent, *rest) parent = self.parent parent_cache = parent.cache_init(parents=parents) parent_cache.local.cache_init(parent.meta) # FIXME hrm we shouldn't have to do this # and it isn't working anyway ... the xattrs don't seem to be getting set return self.cache_init() else: raise @property def _cache(self): """ To catch a bad call to set ... """ if hasattr(self, '_c_cache'): return self._c_cache @_cache.setter def _cache(self, cache): if not isinstance(cache, caches.CachePath): raise TypeError(f'cache is a {type(cache)} not a CachePath!') #elif cache.meta is None: # useful for certain debugging situations #raise ValueError(f'cache has no meta {cache}') self._c_cache = cache def _cache_setter(self, cache, update_meta=True): cache._remote = self # FIXME in principle # setting cache needs to come before update_meta # in the event that self.meta is missing file_id # if meta updater fails we unset self._c_cache self._cache = cache if update_meta: try: cache._meta_updater(self.meta) except BaseException as e: self._c_cache = None delattr(self, '_c_cache') raise e @property def parent_id(self): """ BEWARE if self.parent hits the network so will this. In the event that it does, overwrite this method. """ return self.parent.id def _parent_changed(self, cache): return self.parent_id != cache.parent.id def _on_cache_move_error(self, error, cache): """ called after a failure to move a cached file to a new location """ raise error def update_cache(self, cache=None, fetch=True): """ Update a cache object using the metadata attached to this remote. This is different form _cache_setter in that it runs update_meta by default, handles many more edge cases, and checks for consistency. _cache_setter is usually invoked internally by a CachePath method that wants to register itself with a remote as an implementaiton detail. """ if cache is not None and self.cache is not None: # TODO see if there are any exceptions to this behavior raise TypeError('cannot accept cache kwarg when self.cache not None') elif cache is None: cache = self.cache parent_changed = self._parent_changed(cache) if self.cache is None: # HACK test if cache is not None before it may have been reassigned if cache.name != self.name: msg = ('Cannot update the name and content of a file at the ' 'same time.\nAre you sure you have passed the right ' f'cache object?\n{cache.name} != {self.name}') raise ValueError(msg) elif parent_changed: msg = ('Cannot update the parent and content of a file at the ' 'same time.\nAre you sure you have passed the right ' f'cache object?\n{cache.parent.id} != {self.parent_id}') raise ValueError(msg) log.debug(f'maybe updating cache for {self.name}') file_is_different = cache._meta_updater(self.meta, fetch=fetch) # update the cache first # FIXME this may be out of order ... # then move to the new name if relevant # prevents moving partial metadata onto existing files if cache.name != self.name or parent_changed: # this is localy correct # the issue is that move is now smarter # and will detect if a parent path has changed try: cache.move(remote=self) except exc.WhyDidntThisGetMovedBeforeError as e: # AAAAAAAAAAAAAAAAAAAAAAAAAAAAA # deal with the sadness that is non-unique filenames # I am 99.999999999999999% certain that users do not # expect this behavior ... log.error(e) self._on_cache_move_error(e, cache) return file_is_different @property def local(self): return self.cache.local # FIXME there are use cases for bypassing the cache ... @property def local_direct(self): # kind of uninstrumeted ??? return self._local_class(self.as_path()) @property def anchor(self): """ the semantics of anchor for remote paths are a bit different RemotePath code expects this function to return a RemotePath NOT a string as is the case for core pathlib. """ raise NotImplementedError @property def _meta(self): # catch stragglers raise NotImplementedError def refresh(self): """ Refresh the local in memory metadata for this remote. Implement actual functionality in your subclass. """ raise NotImplementedError # could be fetch or pull, but there are really multiple pulls as we know # clear the cached value for _meta if hasattr(self, '_meta'): delattr(self, '_meta') @property def data(self): raise NotImplementedError self.cache.id for chunk in chunks: yield chunk @property def meta(self): # on blackfynn this is the package id or object id # this will error if there is no implementaiton if self.id raise NotImplementedError #return PathMeta(id=self.id) def _meta_setter(self, value): raise NotImplementedError @property def annotations(self): # these are models etc in blackfynn yield from [] raise NotImplementedError def as_path(self): """ returns the relative path construction for the child so that local can make use of it """ return pathlib.PurePath(*self.parts) def _parts_relative_to(self, remote, cache_parent=None): parent_names = [] # FIXME massive inefficient due to retreading subpaths :/ # have a look at how pathlib implements parents parent = self.parent if parent != remote: parent_names.append(parent.name) # FIXME can this go stale? if so how? #log.debug(cache_parent) if cache_parent is not None and parent.id == cache_parent.id: for c_parent in cache_parent.parents: if c_parent is None: continue elif c_parent.name == remote.name: # FIXME trick to avoid calling id parent_names.append(c_parent.name) # since be compare one earlier we add here break else: parent_names.append(c_parent.name) else: for parent in parent.parents: if parent == remote: break elif parent is None: continue # value error incoming else: parent_names.append(parent.name) else: self._errors += ['file-deleted'] msg = f'{remote} is not one of {self}\'s parents' log.error(msg) #raise ValueError() args = (*reversed(parent_names), self.name) elif self == parent: args = ('',) else: args = self.name, return args @property def parts(self): if self == self.anchor: return tuple() if not hasattr(self, '_parts'): if self.cache: cache_parent = self.cache.parent else: cache_parent = None self._parts = tuple(self._parts_relative_to(self.anchor, cache_parent)) return self._parts @property def parent(self): """ The atomic parent operation as understood by the remote. """ raise NotImplementedError @property def parents(self): parent = self.parent while parent: yield parent parent = parent.parent @property def children(self): # uniform interface for retrieving remote hierarchies decoupled from meta raise NotImplementedError @property def rchildren(self): # uniform interface for retrieving remote hierarchies decoupled from meta yield from self._rchildren() def _rchildren(self, create_cache=True, sparse=False): raise NotImplementedError def children_pull(self, existing): # uniform interface for asking the remote to # update children using its own implementation raise NotImplementedError def iterdir(self): # I'm guessing most remotes don't support this raise NotImplementedError def glob(self, pattern): raise NotImplementedError def rglob(self, pattern): raise NotImplementedError def __eq__(self, other): return self.id == other.id def __ne__(self, other): return not self == other def __repr__(self): return f'{self.__class__.__name__}({self.id!r})' class SshRemote(RemotePath, pathlib.PurePath): """ Testing. To be used with ssh-agent. StuFiS The stupid file sync. """ _cache_class = None # set when calling __new__ encoding = 'utf-8' _meta = None # override RemotePath dragnet _meta_maker = LocalPath._meta_maker sysid = None _bind_sysid = classmethod(_bind_sysid_) @classmethod def _new(cls, local_class, cache_class): newcls = super()._new(local_class, cache_class) # must run before we can get the sysid, which is a bit odd # given that we don't actually sandbox the filesystem newcls._bind_flavours() return newcls @classmethod def _bind_flavours(cls, pos_helpers=tuple(), win_helpers=tuple()): pos, win = cls._get_flavours() if pos is None: pos = type(f'{cls.__name__}Posix', (*pos_helpers, cls, pathlib.PurePosixPath), {}) if win is None: win = type(f'{cls.__name__}Windows', (*win_helpers, cls, pathlib.PureWindowsPath), {}) cls.__abstractpath = cls cls.__posixpath = pos cls.__windowspath = win @classmethod def _get_flavours(cls): pos, win = None, None for subcls in cls.__subclasses__(): # direct only if subcls._flavour is pathlib._posix_flavour: pos = subcls elif subcls._flavour is pathlib._windows_flavour: win = subcls else: raise TypeError(f'unknown flavour for {cls} {cls._flavour}') return pos, win def __new__(cls, *args, **kwargs): if not hasattr(cls, '_flavour'): cls = cls.__windowspath if os.name == 'nt' else cls.__posixpath if isinstance(args[0], str) and args[0].startswith(cls.host + ':'): # FIXME not great but allows less verbose where possible ... # also possibly an opportunity to check if hostnames match? # ugh unix everything is a stream of bytes is annoying here _, *args = (args[0].split(':', 1), *args[1:]) _self = pathlib.PurePath.__new__(cls, *args) # no kwargs since the only kwargs are for init _self.remote_platform = _self._remote_platform return _self # TODO this isn't quite working yet due to bootstrapping issues as usual # it also isn't working because we want access to all paths in many cases # the root remains and the calculation against anchor remains for any # relative path that is provided, and the default behavior for absolute # paths protects us from sillyness if _self.id != cls.root: #_cache_anchor.id: self = _self.relative_to(_self.anchor) else: self = pathlib.PurePath.__new__(cls, '.') # FIXME make sure this is interpreted correctly ... self._errors = [] return self @classmethod def init(cls, host_path): """ should only be invoked after _new has bound local and cache classes """ if not hasattr(cls, '_anchor'): cls.root = host_path # I think this is right ... host, path = host_path.split(':', 1) if not hasattr(cls, '_flavour'): cls = cls.__windowspath if os.name == 'nt' else cls.__posixpath cls._anchor = pathlib.PurePath.__new__(cls, path) session = pxssh.pxssh(options=dict(IdentityAgent=os.environ.get('SSH_AUTH_SOCK'))) session.login(host, ssh_config=LocalPath('~/.ssh/config').expanduser().as_posix()) cls._rows = 200 cls._cols = 200 session.setwinsize(cls._rows, cls._cols) # prevent linewraps of long commands session.prompt() atexit.register(lambda:(session.sendeof(), session.close())) cls.host = host cls.session = session cls._uid, *cls._gids = [int(i) for i in (cls._ssh('echo $(id -u) $(id -G)') .decode().split(' '))] else: raise ValueError(f'{cls} already bound an remote to {cls._anchor}') @classmethod def anchorToCache(cls, cache_anchor, init=True): anchor = super().anchorToCache(cache_anchor=cache_anchor, init=init) # _cache_anchor has to be bound for _bind_sysid to work # that binding happens after init so we do this here cls._bind_sysid() return anchor def __init__(self, thing_with_id, cache=None): if isinstance(thing_with_id, pathlib.PurePath): thing_with_id = thing_with_id.as_posix() super().__init__(thing_with_id, cache=cache) @property def anchor(self): return self._anchor #return self._cache_anchor.remote # FIXME warning on relative paths ... # also ... might be convenient to allow # setting non-/ anchors, but perhaps for another day #return self.__class__('/', host=self.host) @property def id(self): return f'{self.host}:{self.rpath}' #return self.host + ':' + self.as_posix() # FIXME relative to anchor? @property def cache_key(self): """ since some systems have compound ids ... """ raise NotImplementedError @property def rpath(self): # FIXME relative paths when the anchor is set differently # the anchor will have to be stored as well since there coulde # be many possible anchors per host, thus, if an anchor relative # identifier is supplied then we need to construct the full path # conveniently in this case if self is a fully rooted path then # it will overwrite the anchor path # TODO make sure that the common path is the anchor ... return (self.anchor / self).as_posix() def _parts_relative_to(self, remote, cache_parent=None): if remote == self.anchor: # have to build from self.anchor._parts because it is the only # place the keeps the original parts remote = pathlib.PurePath(*self.anchor._parts) return self.relative_to(remote).parts def refresh(self): # TODO probably not the best idea ... raise NotImplementedError('This baby goes to the network every single time!') def access(self, mode): """ types are 'read', 'write', and 'execute' """ try: st = self.stat() except (PermissionError, FileNotFoundError) as e: return False r, w, x = 0x124, 0x92, 0x49 read = ((r & st.st_mode) >> 2) & (mode == 'read' or mode == os.R_OK) * x write = ((w & st.st_mode) >> 1) & (mode == 'write' or mode == os.W_OK) * x execute = (x & st.st_mode) & (mode == 'execute' or mode == os.X_OK) * x current = read + write + execute u, g, e = 0x40, 0x8, 0x1 return (u & current and st.st_uid == self._uid or g & current and st.st_gid in self._gids or e & current) def open(self, mode='wt', buffering=-1, encoding=None, errors=None, newline=None): if mode not in ('wb', 'wt'): raise TypeError('only w[bt] mode is supported') # TODO ... #breakpoint() return class Hrm: session = self.session def write(self, value): self.session #cmd = ['ssh', self.host, f'"cat - > {self.rpath}"'] #self.session #p = subprocess.Popen() @property def data(self): cmd = ['scp', self.id, '/dev/stdout'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) while True: data = p.stdout.read(4096) # TODO hinting if not data: break yield data p.communicate() # reuse meta from local # def meta (make it easier to search for this) meta = LocalPath.meta # magic #def _ssh(self, remote_cmd): @classmethod def _ssh(cls, remote_cmd): #print(remote_cmd) if len(remote_cmd) > cls._cols: raise exc.CommandTooLongError n_bytes = cls.session.sendline(remote_cmd) cls.session.prompt() raw = cls.session.before out = raw[n_bytes + 1:].strip() # strip once here since we always will #print(raw) #print(out) return out @property def _remote_platform(self): remote_cmd = "uname -a | awk '{ print tolower($1) }'" return self._ssh(remote_cmd).decode(self.encoding) @property def cypher_command(self): # this one is a little backwards, because we can control # whatever cypher we want, unlike in other cases return cypher_command_lookup[self._cache_class.cypher] def checksum(self): remote_cmd = (f'{self.cypher_command} {self.rpath} | ' 'awk \'{ print $1 }\';') hex_ = self._ssh(remote_cmd).decode(self.encoding) log.debug(hex_) return bytes.fromhex(hex_) def _stat_cmd(self, stat_format=StatResult.stat_format, path=None): # TODO use _stat_format_darwin for cases where gstat is missing cmd = 'gstat' if self.remote_platform == 'darwin' else 'stat' if path is None: path = self.rpath if path == '': _path = path else: _path = f' "{path}"' return f'{cmd} -c {stat_format}{_path}' def stat(self): remote_cmd = self._stat_cmd() out = self._ssh(remote_cmd) try: return StatResult(out) except ValueError as e: if out.endswith(b'Permission denied'): raise PermissionError(out.decode()) elif out.endswith(b'No such file or directory'): raise FileNotFoundError(out.decode()) else: log.error(remote_cmd) raise ValueError(out) from e def exists(self): try: st = self.stat() return bool(st) # FIXME except FileNotFoundError: # FIXME there will be more types here ... pass @property def __parent(self): # no longer needed since we inherit from path directly # because the identifiers are paths if we move # file.ext to another folder, we treat it as if it were another file # at least for this SshRemote path, if we move a file on our end # the we had best update our cache # if someone else moves the file on the remote, well, then # that file simply vanishes since we weren't notified about it # if there is a remote transaction log we can replay if there isn't # we have to assume the file was deleted or check all the names and # hashes of new files to see if it has moved (and not been changed) # a move and change without a sync will be bad for us # If you have an unanchored path then resolve() # always operates under the assumption that the # current working directory which I think is incorrect # as soon as you start passing unresolved paths around # the remote system doesn't know what context you are in # so we need to fail loudly # basically force people to manually resolve their paths return self.__class__(self.cache.parent) # FIXME not right ... def is_dir(self): remote_cmd = self._stat_cmd(stat_format="%F") out = self._ssh(remote_cmd) return out == b'directory' def is_file(self): remote_cmd = self._stat_cmd(stat_format="%F") out = self._ssh(remote_cmd) return out == b'regular file' @property def children(self): # this is amusingly bad, also children_recursive ... drop the maxdepth #("find ~/files/blackfynn_local/SPARC\ Consortium -maxdepth 1 " #"-exec stat -c \"'%n' %o %s %W %X %Y %Z %g %u %f\" {} \;") # chechsums when listing children? maybe ... #\"'%n' %o %s %W %X %Y %Z %g %u %f\" if self.is_dir(): # no children if it is a file sadly remote_cmd = (f"cd {self.rpath};" f"{self._stat_cmd(path='')} {{.,}}*;" "echo '----';" f"{self.cypher_command} {{.,}}*;" # FIXME fails on directories destroying alignment 'cd "${OLDPWD}"') out = self._ssh(remote_cmd) stats, checks = out.split(b'\r\n----\r\n') #print(stats) stats = {sr.name:sr for s in stats.split(b'\r\n') for sr in (StatResult(s),)} checks = {fn:bytes.fromhex(cs) for l in checks.split(b'\r\n') if not b'Is a directory' in l for cs, fn in (l.decode(self.encoding).split(' ', 1),)} return stats, checks # TODO def _mkdir_child(self, child_name): raise NotImplementedError('implement in subclass and/or fix instantiation/existence issues') def __repr__(self): return f'{self.__class__.__name__}({self.rpath!r}, host={self.host!r})' SshRemote._bind_flavours()
[ "tgbugs@gmail.com" ]
tgbugs@gmail.com
a7d0d5f5718864db29eb82f729063d29482ad9a9
812521a7dc172551572b36ee1ecca4bd76e6f8b2
/tests/test_flask_reverse_proxy_middleware_path_prefix.py
d9a7c1e9df33f5f4002294c8a974b22147f4b0d9
[ "OGL-UK-3.0" ]
permissive
rachmadaniHaryono/flask-reverse-proxy-fix
8b1128646431706218fe52007c3282ab6996221a
a6a9b6e91d9e2152a1f34109249905ab609377a5
refs/heads/master
2023-01-23T08:18:00.977832
2023-01-21T02:33:38
2023-01-21T02:33:38
239,060,743
1
3
null
2020-02-08T03:04:11
2020-02-08T03:04:10
null
UTF-8
Python
false
false
1,797
py
import unittest from http import HTTPStatus from flask_reverse_proxy_fix.middleware import ReverseProxyPrefixFix from app import create_app_with_middleware, create_app_without_middleware class FlaskReverseProxyMiddlewarePathPrefixTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): self.app_context.pop() def test_with_prefix(self): self.app = create_app_with_middleware() self.app_context = self.app.app_context() self.app_context.push() self.client = self.app.test_client() self.app.config['REVERSE_PROXY_PATH'] = '/foo' ReverseProxyPrefixFix(self.app) expected_url = 'http://localhost:9000/test/sample' response = self.client.get( '/sample', base_url='http://localhost:9000' ) json_response = response.get_json() self.assertEqual(response.status_code, HTTPStatus.OK) self.assertIn('links', json_response.keys()) self.assertIn('self', json_response['links'].keys()) self.assertEqual(expected_url, json_response['links']['self']) def test_without_prefix(self): self.app = create_app_without_middleware() self.app_context = self.app.app_context() self.app_context.push() self.client = self.app.test_client() expected_url = 'http://localhost:9000/sample' response = self.client.get( '/sample', base_url='http://localhost:9000' ) json_response = response.get_json() self.assertEqual(response.status_code, HTTPStatus.OK) self.assertIn('links', json_response.keys()) self.assertIn('self', json_response['links'].keys()) self.assertEqual(expected_url, json_response['links']['self'])
[ "felix@felixfennell.co.uk" ]
felix@felixfennell.co.uk
ba45777ebf476d635254faf1c942e070055b6fc5
c463e77c3d76e6b4810e202541d3f3f7f91bcf60
/build/PCL-ROS-cluster-Segmentation/cmake/sensor_stick-genmsg-context.py
31a011a3d2b1087f74bbb8bde784bccea1893805
[]
no_license
MGRNascimento/Tese
18087ee59dfee96ee000c9f16c646d1750174285
bf78d417849a74d9c5a520d40dcbebeadf084706
refs/heads/master
2020-06-23T13:57:01.699657
2019-10-23T21:47:19
2019-10-23T21:47:19
198,638,709
0
0
null
null
null
null
UTF-8
Python
false
false
992
py
# generated from genmsg/cmake/pkg-genmsg.context.in messages_str = "/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/DetectedObjectsArray.msg;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/DetectedObject.msg;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/SegmentedClustersArray.msg" services_str = "/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/srv/GetNormals.srv" pkg_name = "sensor_stick" dependencies_str = "std_msgs;sensor_msgs" langs = "gencpp;geneus;genlisp;gennodejs;genpy" dep_include_paths_str = "sensor_stick;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg" PYTHON_EXECUTABLE = "/usr/bin/python" package_has_static_sources = '' == 'TRUE' genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
[ "relvas.cvg@gmail.com" ]
relvas.cvg@gmail.com
feab3ebba8930e7e527605d29f696b086b58d027
4c3094a869f59be8836993469b28f088fef9fff1
/Questions/Q_093_RentalCarLocations.py
35739a04cd88935d0ee54e3e84963fad486f00b2
[]
no_license
Bharadwaja92/DataInterviewQuestions
d885d40da4d546a164eee37e7250ddb519fc8954
5b002f34c3b1440f4347a098f7ce1db84fc80e7f
refs/heads/master
2022-11-06T08:57:49.283013
2020-06-22T09:10:40
2020-06-22T09:10:40
269,247,468
0
0
null
null
null
null
UTF-8
Python
false
false
909
py
""""""""" Suppose you're working for a car rental company, looking to model potential location distribution of their cars at major airports. The company operates in LA, SF, and San Jose. Customers regularly pickup a car in one of these 3 cities and drop it off in another. The company is looking to compute how likely it is that a given car will end up in a given city. You can model this as a Markov chain (where each time step corresponds to a new customer taking the car). The transition probabilities of the company's car allocation by city is as follows: SF | LA | San Jose 0.6 0.1 0.3 | SF 0.2 0.8 0.3 | LA 0.2 0.1 0.4 | San Jose As shown, the probability a car stays in SF is 0.6, the probability it moves from SF to LA is 0.2, SF to San Jose is 0.2, etc. Using the information above, determine the probability a car will start in SF but move to LA right after. """
[ "saibharadwaj.kh@gaiansolutions.com" ]
saibharadwaj.kh@gaiansolutions.com
e8b3288c3110b8995c15af8e6e5b8b9a674bc56d
3d4d6dc268e605f81c280d94cdf16d7633313a15
/app/app.py
998ea2648ab00a7a7fb927656282f36feeed4e18
[]
no_license
mcadhoc/Flask-AppBuilder-Skeleton
6e0e4d78b0aee11c7632442e7f9dfd0a45d371f2
e840fd3db6e065cf87e7c1333aea26292080f3fc
refs/heads/master
2020-06-20T07:21:56.102240
2019-07-16T19:17:42
2019-07-16T19:17:42
197,040,763
0
0
null
2019-07-16T19:17:43
2019-07-15T17:09:54
Python
UTF-8
Python
false
false
1,493
py
from flask import Flask, render_template, redirect, url_for,request from flask import make_response app = Flask(__name__) @app.route("/") def home(): return "hi" @app.route("/index") def index(): return render_template('login.html', message='') url = 'https://www.foxnews.com/politics/aoc-squad-news-conference-trump-call-go-back-home' @app.route('/login', methods=['GET', 'POST']) def login(): message = None if request.method == 'GET': datafromjs = request print(datafromjs) # Add Summarization and Bias code here. result = subjectivity(url) resp = make_response('{"response": "'+str(result)+'"}') resp.headers['Content-Type'] = "application/json" print(resp) return resp from textblob import TextBlob from bs4 import BeautifulSoup from urllib.request import urlopen from lxml import etree def subjectivity(url): html = urlopen(url) soup = BeautifulSoup(html, "lxml") # https://stackoverflow.com/questions/22799990/beatifulsoup4-get-text-still-has-javascript for script in soup(["script", "style"]): script.decompose() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) text = '\n'.join(chunk for chunk in chunks if chunk) return TextBlob(text).sentiment.subjectivity # return text.encode("utf-8") if __name__ == "__main__": app.run(debug = True)
[ "yottzumm@gmail.com" ]
yottzumm@gmail.com
6e61a410a9a7f1971524fc55c3bfdf11aa95a064
ceba110faf303886894c256739c759842d0f36e7
/mymoney/core/tests/test_validators.py
b31f4512e412d79bf186b3732ec103b5037e1679
[ "BSD-3-Clause" ]
permissive
dss2194/mymoney-server
2ded0eec3a6fe31df0a25161d09002778ec76e47
40dc9fdd08b3561287a9153342b25c58de8ad8ce
refs/heads/master
2022-03-06T13:21:06.482583
2018-09-06T12:23:56
2018-09-09T10:33:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,294
py
from django.test import TestCase from rest_framework import serializers from ..validators import MinMaxValidator class MinMaxValidatorTestCase(TestCase): def test_no_min(self): validator = MinMaxValidator(field_min='min', field_max='max') validator(data={'max': 10}) def test_no_max(self): validator = MinMaxValidator(field_min='min', field_max='max') validator(data={'min': 10}) def test_zero_lower(self): validator = MinMaxValidator(field_min='min', field_max='max') validator(data={'min': 0, 'max': 10}) def test_zero_greater(self): validator = MinMaxValidator(field_min='min', field_max='max') with self.assertRaises(serializers.ValidationError): validator(data={'min': 10, 'max': 0}) def test_greater(self): validator = MinMaxValidator(field_min='min', field_max='max') with self.assertRaises(serializers.ValidationError): validator(data={'min': -10, 'max': -20}) def test_equal(self): validator = MinMaxValidator(field_min='min', field_max='max') validator(data={'min': 5.5, 'max': 5.5}) def test_lower(self): validator = MinMaxValidator(field_min='min', field_max='max') validator(data={'min': 5.4, 'max': 5.5})
[ "yannick.chabbert@gmail.com" ]
yannick.chabbert@gmail.com
a1b04624df6910adad210fe98bb6ade2e31d986b
b772048db1d84de6071dcb3978b6f548d2b42ae4
/tests/test_ner.py
25161ef7c203bccec745b1000a646113cac4af98
[ "BSD-2-Clause" ]
permissive
yutanakamura-tky/MedNER-J
46ca13d87b6c4977b4042915ff2105ab4dc62d88
a0c68a32553bbbdb9f5ae5fd41584198951bc14c
refs/heads/master
2023-08-21T23:05:22.645001
2021-08-10T02:34:45
2021-08-10T02:34:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,623
py
import unittest from medner_j import Ner class TestNer(unittest.TestCase): @classmethod def setUpClass(cls): cls.model = Ner.from_pretrained(model_name="BERT", normalizer="dict") cls.examples = ['それぞれの関節に関節液貯留は見られなかった', 'その後、左半身麻痺、CTにて右前側頭葉の出血を認める。 '] cls.xmls = ['それぞれの関節に<CN value="かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留">関節液貯留</CN>は見られなかった', 'その後、<C value="ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺">左半身麻痺</C>、CTにて右前側頭葉の<C value="しゅっけつ;icd=R58;lv=S/freq=高;出血">出血</C>を認める。 '] cls.dicts = [ [{"span": (8, 13), "type": "CN", "disease":"関節液貯留", "norm":"かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留"}], [{"span": (4, 9), "type": "C", "disease": "左半身麻痺", "norm": "ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺"}, {"span": (20, 22), "type": "C", "disease": "出血", "norm": "しゅっけつ;icd=R58;lv=S/freq=高;出血"}] ] def test_xml(self): results = self.model.predict(self.examples) self.assertEqual(results, self.xmls) def test_dict(self): results = self.model.predict(self.examples, output_format="dict") self.assertEqual(results, self.dicts) @classmethod def tearDownClass(cls): del cls.model del cls.examples del cls.xmls del cls.dicts
[ "suzzz428@gmail.com" ]
suzzz428@gmail.com
16ac483d71ea83b3969b16eb429e6f84f5674a9f
34d5a8760dc3cfe71aab7a6f70d61eb319308b1e
/student/affaircourse/migrations/0005_studentcourse_cournum.py
35a061c3294f36d3577218c73b734652921aa47b
[]
no_license
Lmagicport/Student-affairs
a4c5a9e0d92967b5ebbf926fee98ea786692cc27
007f35f0d30e16635e9a386fb95351895f5b092a
refs/heads/main
2023-02-01T06:25:10.583127
2020-12-15T08:33:34
2020-12-15T08:33:34
307,300,571
5
2
null
null
null
null
UTF-8
Python
false
false
434
py
# Generated by Django 3.1.2 on 2020-11-19 13:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('affaircourse', '0004_studentcourse_ispass'), ] operations = [ migrations.AddField( model_name='studentcourse', name='CourNum', field=models.IntegerField(default=0), preserve_default=False, ), ]
[ "2034913866@qq.com" ]
2034913866@qq.com
529c3a6e35142acdd0bfc6805bdf3d08e6bb3cfb
d3ad5201a6479e7522e87598b50600468a1a3c97
/workers/proxies.py
c20818e6d77437bff982605e2db13d1327de43ea
[]
no_license
ludalex/storechecker.io
ba736e9869018db699d2351adb1683c600d069af
c396d34dc57068058ae88427e36b059f824f43e5
refs/heads/master
2021-01-23T22:38:28.426417
2014-11-15T13:27:33
2014-11-15T13:27:33
6,759,656
2
0
null
2014-11-15T13:24:48
2012-11-19T12:00:10
PHP
UTF-8
Python
false
false
2,252
py
from datetime import datetime import sqlite3 from requests import session, __version__ from requests.exceptions import ConnectionError, ProxyError, Timeout from bs4 import BeautifulSoup import re conn = sqlite3.connect('/var/www/n4checker/db/storechecker.db') db = conn.cursor() db.execute("delete from proxies") conn.commit() with session() as c: for page in range(1, 12): hma = c.get("http://hidemyass.com/proxy-list/"+str(page)) soup = BeautifulSoup(hma.text) hma_proxies = [] trs = soup.findAll("tr") trs.pop(0) for tr in trs: tds = tr.findAll("td") ips = tds[1] port = tds[2].renderContents() proxyCountry = tds[3].find("span").text.strip() proxySpeed_style = tds[4].find("div").find("div")['style'] proxySpeed = re.findall('width\:(.*?)\%;', proxySpeed_style )[0].strip() proxyConnectionTime_style = tds[5].find("div").find("div")['style'] proxyConnectionTime = re.findall('width\:(.*?)\%;', proxyConnectionTime_style )[0].strip() proxyType = tds[6].renderContents() classesToStrip = [] style = ips.find("style") for styleLine in style.renderContents().split('\n'): if styleLine: if "display:none" in styleLine or "display: none" in styleLine: classesToStrip.append( re.findall('.(.*?){', styleLine) ) [s.extract() for s in ips.select( '[style~="display:none"]' )] for classToStrip in classesToStrip: [s.extract() for s in ips.select( '[class~="' + classToStrip[0] + '"]' )] [s.extract() for s in ips.select( 'style' )] ip_raw = ips.text #ip_raw = nltk.clean_html(ips.renderContents()) if proxyType is not 'socks4/5': #hma_proxies.append({'ip': str(ip_raw.replace(" ", "")), 'port': port.replace("\n", ""), 'type': proxyType, 'country': str(proxyCountry), 'speed': proxySpeed, 'connectionTime': proxyConnectionTime}) ip = str(ip_raw.replace(" ", "")) port = port.replace("\n", "") type = proxyType country = str(proxyCountry) speed = proxySpeed connectionTime = proxyConnectionTime dateTime = datetime.now() db.execute("INSERT INTO proxies VALUES (?,?,?,?,?,?,?,'hma','','')", (ip, port, type, country, speed, connectionTime, dateTime) ) print ip + " added" conn.commit() #return hma_proxies
[ "ludalex@gmail.com" ]
ludalex@gmail.com
af80ef60a13d2e3be5f9fadcee5100682e0db9d3
8fb567f62189d1669439c797e19502dd560e4fb6
/relief_canvas.py
2c563e55e99831bf3b00d56fbf734106ef3bc6f7
[]
no_license
AndiEcker/kivy_playground
22cabf8f77fb2087d2f8c4f3719a4c3d367d1874
d86c736c752b08172760d2ea145527df4eb58199
refs/heads/master
2021-07-06T23:51:29.284502
2021-05-05T22:02:03
2021-05-05T22:02:03
236,343,361
0
0
null
null
null
null
UTF-8
Python
false
false
22,125
py
""" relief canvas """ from typing import Any, Callable, Tuple, Union from kivy.factory import Factory from kivy.graphics import Color, Line from kivy.graphics.instructions import InstructionGroup from kivy.lang import Builder from kivy.properties import NumericProperty, ObjectProperty from kivy.uix.label import Label from kivy.uix.button import Button from kivy.uix.widget import Widget from ae.gui_app import id_of_flow from ae.kivy_app import FlowButton, FlowToggler, KivyMainApp DEF_NUM_PROP_VAL = "99px" ANGLE_BEG = 87 ANGLE_END = 267 ColorRGB = Tuple[float, float, float] #: color with Red, Green and Blue parts between 0.0 and 1.0 ColorRGBA = Tuple[float, float, float, float] #: ink is rgb color and alpha ColorOrInk = Union[ColorRGB, ColorRGBA] #: color or ink type ReliefColors = Union[Tuple[ColorRGB, ColorRGB], Tuple] #: tuple of (top, bottom) relief colors or empty tuple ReliefBrightness = Tuple[float, float] #: top and bottom brightness/darken factor def relief_colors(color_or_ink: ColorOrInk = (0, 0, 0), darken_factors: ReliefBrightness = (0.6, 0.3)) -> ReliefColors: """ calculate the (top and bottom) colors used for the relief lines/drawings. :param color_or_ink: color used for to calculate the relief colors from, which will first be lightened until one of the color parts (R, G or B) reach the value 1.0; then the darken factors will be applied to the color parts. If not passed then grey colors will be returned. .. note:: If the alpha value of paramref:`~relief_colors.color_or_ink` is zero then no relief colors will be calculated and an empty tuple will be returned (disabling relief). :param darken_factors: two factors for to darken (1) the top and (2) the bottom relief color parts. :return: tuple with darkened colors calculated from ink or an empty tuple if the alpha value of paramref:`~relief_colors.ink` has a zero value. """ if len(color_or_ink) > 3 and not color_or_ink[3]: return () max_col_part = max(color_or_ink[:3]) if max_col_part == 0: # prevent zero division if color_or_ink is black/default lightened_color = (1.0, 1.0, 1.0) else: brighten_factor = 1 / max_col_part lightened_color = tuple([(col * brighten_factor) for col in color_or_ink[:3]]) return tuple([tuple([col_part * darken for col_part in lightened_color]) for darken in darken_factors]) class ReliefCanvas: # (Widget): # also works without Widget/any ancestor """ relief behavior """ relief_ellipse_inner_colors: ReliefColors = ObjectProperty(()) relief_ellipse_inner_lines: NumericProperty = NumericProperty('6sp') relief_ellipse_inner_offset: NumericProperty = NumericProperty('1sp') relief_ellipse_outer_colors: ReliefColors = ObjectProperty(()) relief_ellipse_outer_lines: NumericProperty = NumericProperty('6sp') relief_square_inner_colors: ReliefColors = ObjectProperty(()) relief_square_inner_lines: NumericProperty = NumericProperty('3sp') relief_square_inner_offset: NumericProperty = NumericProperty('1sp') relief_square_outer_colors: ReliefColors = ObjectProperty(()) relief_square_outer_lines: NumericProperty = NumericProperty('3sp') _relief_graphic_instructions: InstructionGroup # attributes provided by the class to be mixed into x: float y: float width: float height: float canvas: Any bind: Any def __init__(self, **kwargs): super().__init__(**kwargs) self.bind(pos=self._relief_refresh) self.bind(size=self._relief_refresh) self.bind(relief_ellipse_inner_colors=self._relief_refresh) self.bind(relief_ellipse_inner_lines=self._relief_refresh) self.bind(relief_ellipse_inner_offset=self._relief_refresh) self.bind(relief_ellipse_outer_colors=self._relief_refresh) self.bind(relief_ellipse_outer_lines=self._relief_refresh) self.bind(relief_square_inner_colors=self._relief_refresh) self.bind(relief_square_inner_lines=self._relief_refresh) self.bind(relief_square_inner_offset=self._relief_refresh) self.bind(relief_square_outer_colors=self._relief_refresh) self.bind(relief_square_outer_lines=self._relief_refresh) self._relief_graphic_instructions = InstructionGroup() def _relief_refresh(self, *_args): """ pos/size or color changed event handler. """ if self._relief_graphic_instructions.length(): self.canvas.after.remove(self._relief_graphic_instructions) self._relief_graphic_instructions.clear() add = self._relief_graphic_instructions.add pos_size = self.x, self.y, self.width, self.height if self.relief_ellipse_inner_colors and self.relief_ellipse_inner_lines: self._relief_ellipse_inner_refresh(add, *self.relief_ellipse_inner_colors, *pos_size) if self.relief_ellipse_outer_colors and self.relief_ellipse_outer_lines: self._relief_ellipse_outer_refresh(add, *self.relief_ellipse_outer_colors, *pos_size) if self.relief_square_inner_colors and self.relief_square_inner_lines: self._relief_square_inner_refresh(add, *self.relief_square_inner_colors, *pos_size) if self.relief_square_outer_colors and self.relief_square_outer_lines: self._relief_square_outer_refresh(add, *self.relief_square_outer_colors, *pos_size) if self._relief_graphic_instructions.length(): self.canvas.after.add(self._relief_graphic_instructions) def _relief_ellipse_inner_refresh(self, add_instruction: Callable, top_color: ColorRGB, bottom_color: ColorRGB, wid_x: float, wid_y: float, wid_width: float, wid_height: float): """ ellipse pos/size or color changed event handler. """ lines = int(self.relief_ellipse_inner_lines) offset = int(self.relief_ellipse_inner_offset) for line in range(1, lines + 1): alpha = 0.9 - (line / lines) * 0.81 line += offset line2 = 2 * line in_x1 = wid_x + line in_y1 = wid_y + line in_width = wid_width - line2 in_height = wid_height - line2 add_instruction(Color(*top_color, alpha)) # inside top left add_instruction(Line(ellipse=[in_x1, in_y1, in_width, in_height, ANGLE_END, 360 + ANGLE_BEG])) add_instruction(Color(*bottom_color, alpha)) # inside bottom right add_instruction(Line(ellipse=[in_x1, in_y1, in_width, in_height, ANGLE_BEG, ANGLE_END])) def _relief_ellipse_outer_refresh(self, add_instruction: Callable, top_color: ColorRGB, bottom_color: ColorRGB, wid_x: float, wid_y: float, wid_width: float, wid_height: float): """ ellipse pos/size or color changed event handler. """ lines = int(self.relief_ellipse_outer_lines) for line in range(1, lines + 1): alpha = 0.9 - (line / lines) * 0.81 line2 = 2 * line out_x1 = wid_x - line out_y1 = wid_y - line out_width = wid_width + line2 out_height = wid_height + line2 add_instruction(Color(*top_color, alpha)) # outside top left add_instruction(Line(ellipse=[out_x1, out_y1, out_width, out_height, ANGLE_END, 360 + ANGLE_BEG])) add_instruction(Color(*bottom_color, alpha)) # outside bottom right add_instruction(Line(ellipse=[out_x1, out_y1, out_width, out_height, ANGLE_BEG, ANGLE_END])) def _relief_square_inner_refresh(self, add_instruction: Callable, top_color: ColorRGB, bottom_color: ColorRGB, wid_x: float, wid_y: float, wid_width: float, wid_height: float): """ square pos/size or color changed event handler. """ lines = int(self.relief_square_inner_lines) offset = int(self.relief_square_inner_offset) for line in range(1, lines + 1): alpha = 0.9 - (line / lines) * 0.81 line += offset line2 = 2 * line in_x1 = wid_x + line in_x2 = in_x1 + wid_width - line2 in_y1 = wid_y + line in_y2 = in_y1 + wid_height - line2 add_instruction(Color(*top_color, alpha)) # inside top left add_instruction(Line(points=[in_x1, in_y1, in_x1, in_y2, in_x2, in_y2])) add_instruction(Color(*bottom_color, alpha)) # inside bottom right add_instruction(Line(points=[in_x1, in_y1, in_x2, in_y1, in_x2, in_y2])) def _relief_square_outer_refresh(self, add_instruction: Callable, top_color: ColorRGB, bottom_color: ColorRGB, wid_x: float, wid_y: float, wid_width: float, wid_height: float): """ square pos/size or color changed event handler. """ lines = int(self.relief_square_outer_lines) for line in range(1, lines + 1): alpha = 0.9 - (line / lines) * 0.81 line2 = 2 * line out_x1 = wid_x - line out_x2 = out_x1 + wid_width + line2 out_y1 = wid_y - line out_y2 = out_y1 + wid_height + line2 add_instruction(Color(*top_color, alpha)) # outside upper left add_instruction(Line(points=[out_x1, out_y1, out_x1, out_y2, out_x2, out_y2])) add_instruction(Color(*bottom_color, alpha)) # outside bottom right add_instruction(Line(points=[out_x1, out_y1, out_x2, out_y1, out_x2, out_y2])) class ReliefLabel(ReliefCanvas, Label): """ relief label """ class ReliefButton(ReliefCanvas, Button): """ relief button """ class ReliefFlowButton(ReliefCanvas, FlowButton): """ relief flow button """ class ReliefFlowToggler(ReliefCanvas, FlowToggler): """ relief flow toggle button """ if __name__ == '__main__': Builder.load_string("""\ #: import relief_colors relief_canvas.relief_colors #: import ERR relief_canvas.DEF_NUM_PROP_VAL <ReliefHelpToggler@ReliefCanvas+HelpToggler>: <Main@FloatLayout>: BoxLayout: orientation: 'vertical' padding: 3 spacing: 6 BoxLayout: padding: 3 spacing: 6 size_hint_y: None height: 90 #HelpToggler: # needed for to run kivy_app ReliefHelpToggler: relief_square_outer_colors: relief_colors((1, 1, 0) if app.help_layout else (1, 0, 1), darken_factors=(.81, .51)) relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text) relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text) relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text) FlowButton: text: "toggle theme" on_release: app.main_app.change_app_state('light_theme', not app.app_states['light_theme']) BoxLayout: orientation: 'vertical' size_hint_x: 3 BoxLayout: ImageLabel: text: "lines" ImageLabel: text: "outer" ImageLabel: text: "inner" ImageLabel: text: "offset" BoxLayout: ImageLabel: text: "ellipse" FlowInput: id: ell_out text: "6sp" background_color: (1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1) FlowInput: id: ell_inn text: "3sp" background_color: (1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1) FlowInput: id: ell_off text: "1sp" background_color: (1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1) BoxLayout: ImageLabel: text: "square" FlowInput: id: squ_out text: "6sp" background_color: (1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1) FlowInput: id: squ_inn text: "3sp" background_color: (1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1) FlowInput: id: squ_off text: "1sp" background_color: (1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1) BoxLayout: padding: app.main_app.correct_num_prop_value(ell_out.text) spacing: 69 ReliefFlowButton: text: "FlowDarkFac36" ellipse_fill_ink: .6, .3, .3, 1 on_ellipse_fill_ink: print("INK changed", args) #relief_ellipse_outer_lines: '9sp' #relief_ellipse_inner_lines: '6sp' relief_ellipse_inner_colors: relief_colors(self.ellipse_fill_ink, darken_factors=(0.3, 0.6)) on_relief_ellipse_inner_colors: print("COL changed", args) relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text) relief_ellipse_outer_colors: relief_colors(self.ellipse_fill_ink) on_relief_ellipse_outer_colors: print("COL changed", args) relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text) relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text) size_hint: 1, 1 on_release: app.main_app.toggle_color_picker(self, 'ellipse_fill_ink') ReliefFlowButton: text: "Flow" ellipse_fill_ink: .7, .7, .3, 1 relief_ellipse_inner_colors: relief_colors(self.ellipse_fill_ink) relief_ellipse_outer_colors: relief_colors(self.ellipse_fill_ink) relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text) relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text) relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text) size_hint: 1, 1 on_release: app.main_app.toggle_color_picker(self, 'ellipse_fill_ink') ReliefFlowButton: text: "0 alpha" ellipse_fill_ink: .4, .7, .7, 0 # the 0 alpha is preventing relief relief_ellipse_colors: relief_colors(self.ellipse_fill_ink) relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text) relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text) relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text) square_fill_ink: .6, .6, .6, .6 size_hint: None, 1 width: self.height on_release: app.main_app.toggle_color_picker(self, 'ellipse_fill_ink') ReliefFlowToggler: text: "Toggler" ellipse_fill_ink: .42, .63, .93, 1 relief_ellipse_inner_colors: relief_colors(self.ellipse_fill_ink, darken_factors=(0.3, 0.6)) relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text if self.state == 'down' else '18sp') relief_ellipse_outer_colors: relief_colors(self.ellipse_fill_ink) relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text if self.state == 'down' else '12sp') relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text) size_hint: None, 1 width: self.height on_state: print("Ellipse Toggler state change", args) BoxLayout: padding: app.main_app.correct_num_prop_value(squ_out.text) spacing: 69 ReliefLabel: text: "kivy label" color: (0, 0, 0, 1) if app.app_states['light_theme'] else (1, 1, 1, 1) relief_square_inner_colors: relief_colors((1, 1, 1), darken_factors=(0.3, 0.6)) relief_square_outer_colors: relief_colors((1, 1, 1)) relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text) relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text) relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text) ReliefButton: text: "kivy button" color: (0, 0, 0, 1) if app.app_states['light_theme'] else (1, 1, 1, 1) relief_square_inner_colors: relief_colors((1, 1, 0), darken_factors=(0.3, 0.6)) relief_square_outer_colors: relief_colors((0, 0, 1)) relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text) relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text) relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text) ReliefFlowButton: text: "flow button" square_fill_ink: .42, .63, .93, 1 relief_square_inner_colors: relief_colors(self.square_fill_ink, darken_factors=(0.3, 0.6)) relief_square_outer_colors: relief_colors(self.square_fill_ink) relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text) relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text) relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text) size_hint: 1, 1 on_release: app.main_app.toggle_color_picker(self) ReliefFlowToggler: text: "flow toggler" square_fill_ink: .42, .63, .93, 1 relief_square_inner_colors: relief_colors(self.square_fill_ink, darken_factors=(0.3, 0.6)) relief_square_outer_colors: relief_colors(self.square_fill_ink) relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text) if self.state == 'down' else '18sp' relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text) if self.state == 'down' else '9sp' relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text) size_hint: 1, 1 on_release: app.main_app.toggle_color_picker(self) <ColorPickerDD@FlowDropDown>: ColorPicker: id: col_pic size_hint_y: None height: self.width on_color: app.main_app.debug_print("PIC changed", args) """) Factory.register('ReliefCanvas', ReliefCanvas) class NumPropTester(Widget): """ test NumericProperty values """ num_prop = NumericProperty() class ReliefCanvasApp(KivyMainApp): """ app """ color_picker: Any = None color_dropdown: Any = None @staticmethod def correct_num_prop_value(num_prop_value: Union[str, int, float]) -> Union[str, int, float]: """ test if num_prop_value has a valid/assignable NumericProperty value and if not correct it to 21sp """ wid = NumPropTester() try: wid.num_prop = num_prop_value except ValueError: print(f"ReliefCanvasApp.correct_num_prop_value() got invalid numeric property value '{num_prop_value}'") return DEF_NUM_PROP_VAL return num_prop_value def debug_print(self, *args, **kwargs): """ added to find out why the color got lightened when opening color picker dropdown. """ print("APP_DEBUG_PRINT", args, kwargs) def toggle_color_picker(self, wid, color_name='square_fill_ink'): """ show or hide color picker""" print("TOGGLE COLOR PICKER", getattr(wid, color_name), self.color_picker) is_open = self.color_dropdown and self.color_dropdown.attach_to if is_open: self.color_dropdown.dismiss() if self.color_dropdown: self.color_picker.unbind(color=wid.setter(color_name)) self.color_picker = None self.color_dropdown = None if not is_open: self.color_dropdown = Factory.ColorPickerDD() self.change_flow(id_of_flow('suggest')) self.color_dropdown.open(wid) self.color_picker = self.color_dropdown.ids.col_pic self.color_picker.color = getattr(wid, color_name) self.color_picker.bind(color=wid.setter(color_name)) ReliefCanvasApp().run_app()
[ "aecker2@gmail.com" ]
aecker2@gmail.com
ef19beb3ab81659669a7ebc9612b50f9315b39a6
429d014690e2e2f7ea49ac7a2e0fa01455a4125e
/katas/scraping/get_honor.py
b6974268c09fa9df7f80fab797afaa1a87d2f6af
[]
no_license
harrietty/python-katas
4c2b12c7a29d9103b9d3eeffc1aa38f5273ab62f
07957c3c5a1a35a9f2359f02f43e433fddbb8de6
refs/heads/master
2021-05-14T12:20:39.361071
2018-01-27T13:42:51
2018-01-27T13:42:51
116,405,926
1
0
null
null
null
null
UTF-8
Python
false
false
425
py
# Returns the honour for a specific username import requests from bs4 import BeautifulSoup def get_honor(username): URL = 'https://www.codewars.com/users/{}'.format(username) r = requests.get(URL) page = BeautifulSoup(r.text, 'html.parser') stats_area = page.find_all(attrs={'class': 'stat-row'})[0] honor_section = stats_area.find_all('b') return int(honor_section[1].next_sibling.replace(',', ''))
[ "harriethryder@gmail.com" ]
harriethryder@gmail.com
d1cd60b8ac3a89b9dd0b4a456d9c166b93f4ffe5
67c5269fa4720cf728d4c1dd572c09d5e4e7a321
/convert_mcnp71.py
db687aef0e14ec73a1479e0f9dc3959d89a76938
[]
no_license
SamPUG/data
cff882327f5fe79ce2c2fca70d217173300c4f85
457755083bb8e05e58bbc3765f52bf8c756abb9c
refs/heads/master
2020-12-19T14:57:12.806099
2020-03-06T08:30:47
2020-03-06T08:30:47
235,767,080
0
0
null
2020-02-25T14:43:04
2020-01-23T09:58:38
Python
UTF-8
Python
false
false
4,330
py
#!/usr/bin/env python3 import argparse from collections import defaultdict from pathlib import Path import sys import openmc.data # Make sure Python version is sufficient assert sys.version_info >= (3, 6), "Python 3.6+ is required" description = """ Convert ENDF/B-VII.1 ACE data from the MCNP6 distribution into an HDF5 library that can be used by OpenMC. This assumes that you have a directory containing subdirectories 'endf71x' and 'ENDF71SaB'. Optionally, if a recent photoatomic library (e.g., eprdata14) is available, it can also be converted using the --photon argument. """ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass parser = argparse.ArgumentParser( description=description, formatter_class=CustomFormatter ) parser.add_argument('-d', '--destination', type=Path, default=Path('mcnp_endfb71'), help='Directory to create new library in') parser.add_argument('--libver', choices=['earliest', 'latest'], default='earliest', help="Output HDF5 versioning. Use " "'earliest' for backwards compatibility or 'latest' for " "performance") parser.add_argument('-p', '--photon', type=Path, help='Path to photoatomic data library (eprdata12 or later)') parser.add_argument('mcnpdata', type=Path, help='Directory containing endf71x and ENDF71SaB') args = parser.parse_args() # Check arguments to make sure they're valid assert args.mcnpdata.is_dir(), 'mcnpdata argument must be a directory' if args.photon is not None: assert args.photon.is_file(), 'photon argument must be an existing file' # Get a list of all ACE files endf71x = list(args.mcnpdata.glob('endf71x/*/*.7??nc')) endf71sab = list(args.mcnpdata.glob('ENDF71SaB/*.??t')) # Check for fixed H1 files and remove old ones if present hydrogen = args.mcnpdata / 'endf71x' / 'H' if (hydrogen / '1001.720nc').is_file(): for i in range(10, 17): endf71x.remove(hydrogen / f'1001.7{i}nc') # There's a bug in H-Zr at 1200 K thermal = args.mcnpdata / 'ENDF71SaB' endf71sab.remove(thermal / 'h-zr.27t') # Check for updated TSL files and remove old ones if present checks = [ ('sio2', 10, range(20, 37)), ('u-o2', 30, range(20, 28)), ('zr-h', 30, range(20, 28)) ] for material, good, bad in checks: if (thermal / f'{material}.{good}t').is_file(): for suffix in bad: f = thermal / f'{material}.{suffix}t' if f.is_file(): endf71sab.remove(f) # Group together tables for the same nuclide tables = defaultdict(list) for p in sorted(endf71x + endf71sab): tables[p.stem].append(p) # Create output directory if it doesn't exist (args.destination / 'photon').mkdir(parents=True, exist_ok=True) library = openmc.data.DataLibrary() for name, paths in sorted(tables.items()): # Convert first temperature for the table p = paths[0] print(f'Converting: {p}') if p.name.endswith('t'): data = openmc.data.ThermalScattering.from_ace(p) else: data = openmc.data.IncidentNeutron.from_ace(p, 'mcnp') # For each higher temperature, add cross sections to the existing table for p in paths[1:]: print(f'Adding: {p}') if p.name.endswith('t'): data.add_temperature_from_ace(p) else: data.add_temperature_from_ace(p, 'mcnp') # Export HDF5 file h5_file = args.destination / f'{data.name}.h5' print(f'Writing {h5_file}...') data.export_to_hdf5(h5_file, 'w', libver=args.libver) # Register with library library.register_file(h5_file) # Handle photoatomic data if args.photon is not None: lib = openmc.data.ace.Library(args.photon) for table in lib.tables: # Convert first temperature for the table print(f'Converting: {table.name}') data = openmc.data.IncidentPhoton.from_ace(table) # Export HDF5 file h5_file = args.destination / 'photon' / f'{data.name}.h5' print(f'Writing {h5_file}...') data.export_to_hdf5(h5_file, 'w', libver=args.libver) # Register with library library.register_file(h5_file) # Write cross_sections.xml library.export_to_xml(args.destination / 'cross_sections.xml')
[ "paul.k.romano@gmail.com" ]
paul.k.romano@gmail.com
3e7f64e403c62cb91b90b2d22413862faa747cfd
0fc78cbe3dcf339faf5302de82db8e81ae4a952a
/task3_snkrs/spiders/neiman_spider.py
ad904a7a1fcc7eef5ca7216a6f973c731e28d06f
[]
no_license
waseesoft/task3_snkrs
6e9de5a292d28dbe3a0986167da6d2a6f696a182
15e14d01135f0f9208e5a6c8aa6c96b9c866c909
refs/heads/master
2021-06-24T06:41:59.354166
2021-04-01T21:46:22
2021-04-01T21:46:22
201,049,018
0
0
null
2019-10-24T13:42:33
2019-08-07T12:46:46
Python
UTF-8
Python
false
false
8,301
py
import json from scrapy.spiders import CrawlSpider, Rule, Request from scrapy.linkextractors import LinkExtractor from task3_snkrs.items import NeimanItem class NeimanSpider(CrawlSpider): name = 'neimanmarcus' HTTPS = 'https:' custom_settings = { 'USER_AGENT': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36" } allowed_domains = [ 'neimanmarcus.com', ] start_urls = [ 'https://www.neimanmarcus.com/en-cn/index.jsp', ] cookies = { 'tms_data': '{DT-2017.03}a3HwxssoiZzaMm5Pj2Bv6OVfV52gp3oiNhZzH+Q1Et/Dqvj4UZJdUi3o2xJa8' 'M7HpxN6YGZIZum7DZENeq2mpKWsMMyjHzZzskVuLSi5glDPBfxqHllZ2ATy6SBwEqoLuEFuFVN' 'hhw9H6USiUQk2MevVCpxX43W932GrkdrWBWOZ10w0I54Cujvasl9kPL6NE/N0GxD1AkccaD6JWG' 'dOXiXFLSyAYhxfiwLqbrXsVfFrMH6XHHgvBstXkyq9kUG4IChvW47uZiQ+jAxwXSW/Ntm2X9NpzG' 'mhOp+i/CGDKbq9ExXV4hL92pOz47MfElVC5s91r6+5gB7jaH62Nnzt8A+kYcGo1PzCSEFeBvbKmXd' '/UQaNS9npeuy296A5gmaaUtWQgp+J9A91MzoIpTo5PZ5CkCwIllUtuyVNcy/XxtjRjozw2k36quitU' 'KtOqIE3Y0di38hvqLx5Y9ZS5tqi127/sj1E0AwyB5IGnP2vpuheaKsICNkiPPIWc4FBYlN49JWVRHlm' 'o0ApsItKZCQgjHCozMyntDUHvtbH7bIeXeTIcxia6/Zss4sz+jgsQh8t3+ggHCty76ZxrT9Kwrb54rEX' 'GkSanU9W5IyiJmrYcCb84IbHXsPw/eJjp7UjP2C0uMV5NDEbxpFJYdZLkGGuHy9dZx5h3XINJorm2r8XN' 'iYZtbheJvfkxkpM3pXdsG9RarRp52UEcsPVsJreUHygoLJF8DI6A/P9G5bkAZUWmUdOkpejNE6nWFn/wzW' 'tTk6XH2F/FHK8yYGl7vu/Zdrvu3XaUnmOliCgqKZJulwli6EMjFh+oo57Qu8k3q/+NQ7lfO18FeTD8flEte' '4D9CEnqWgTRKmcnqcbvrE8LHY4MRgvFWT6EVUPA/rTo2wx9+qojGkfbwWrNA+L/0ojXjIvddFI+4AoKTsKA' '63gqYmoRcYrbu6OGXSrlzuVvTaxKE+qzGBxEF9Sb96krdEeD0fWUQ==', } cook = { 'tms_data': '{DT-2017.03}a3HwxssoiZzaMm5Pj2Bv6L13Gv8Ad/WZkJm2zLBiJ22quEI2eCcOen+zdhEJJvHeeOOXi+MO99UjG2/1D+hl4AXI0xqxMcBJcNKRoDmB8W5Ptb0z0I9kIPIlYImXaHDdOdwGiYZVK7VYetLzT9+AlvcXAgQLwm8YRoSydQX9y/iR/GCfWSi0wro7/kwt7J5Bi/FFkjSxBX6XHCkgroK52hUKgW/YC1MZ5sJseydRx1IoiHRiDZy5ztxXq6ZzvseaBT8nS56U9EH3crgXmw7726TvadPC383EPCcEAJZTuPTQi1SjH0Vww5owXy0GVtVTHgQUbpz2HR2jg/liv8BYRgT2uIMscZUHtj+3+LXEgL6h5VNpAM7BXr6dpAo87UmpKZAaZhUufcW4Hj6OhLjcc5Ae8ZOY/g3Ei3DxvzB6aoaOI4FwvVc1FhRMcX3UGkXfsYvcNKgQnb6ELb7f4yJm9mSzR3oVmqDMXFPe1HnsR95VAvDwEZlbY18XLrU5bGYP4J/0xyiH9OE+PfysOstZsnMZxsPhNo1VZiWNo5S8enqzFf7dhClsTL5qAjscfQTNv1JIrXORKfF6DcBf6i/91Q8zGK1KAKTv37mIV3btzFSeNu7fPUOTtBM/TFgJzzGLe7AYYInEvPqfxx0yQ+d5xzRk7NNsgoykAQK83uIbFnVuJmCggiAv7tabslD7R/ZCKyfdbvFE1siLr8Lhn00KWEBdt9hvDuoEV1DiP+oaNg7B5sIaxERI55GR2VgK/9C0UqiFyyO95itCF45/y/ruyNakse0Ttc80Q/BXLhImKOOi5HrGsbxf+PEuy5H84QG5/6EhXwB4UpWJQc82EysqOlMBhT/Jya6TmzWB9Ztp6jH4a2Wox15pF6VYlVHKTbLEIjmMZm1x+b3GYJaY0NPLNV8jeFLpB3Tbs9RoUsHPbuN1gR29OXRa7GfW2oCg6AHm0/shfgNgeMf+9AsLDt7Mhg==' } c = '{DT-2017.03}a3HwxssoiZzaMm5Pj2Bv6L13Gv8Ad/WZkJm2zLBiJ22quEI2eCcOen+zdhEJJvHeeOOXi+MO99UjG2/1D+hl4AXI0xqxMcBJcNKRoDmB8W5Ptb0z0I9kIPIlYImXaHDdOdwGiYZVK7VYetLzT9+AlvcXAgQLwm8YRoSydQX9y/iR/GCfWSi0wro7/kwt7J5Bi/FFkjSxBX6XHCkgroK52hUKgW/YC1MZ5sJseydRx1IoiHRiDZy5ztxXq6ZzvseaBT8nS56U9EH3crgXmw7726TvadPC383EPCcEAJZTuPTQi1SjH0Vww5owXy0GVtVTHgQUbpz2HR2jg/liv8BYRgT2uIMscZUHtj+3+LXEgL6h5VNpAM7BXr6dpAo87UmpKZAaZhUufcW4Hj6OhLjcc5Ae8ZOY/g3Ei3DxvzB6aoaOI4FwvVc1FhRMcX3UGkXfsYvcNKgQnb6ELb7f4yJm9mSzR3oVmqDMXFPe1HnsR95VAvDwEZlbY18XLrU5bGYP4J/0xyiH9OE+PfysOstZsnMZxsPhNo1VZiWNo5S8enqzFf7dhClsTL5qAjscfQTNv1JIrXORKfF6DcBf6i/91Q8zGK1KAKTv37mIV3btzFSeNu7fPUOTtBM/TFgJzzGLe7AYYInEvPqfxx0yQ+d5xzRk7NNsgoykAQK83uIbFnVuJmCggiAv7tabslD7R/ZCKyfdbvFE1siLr8Lhn00KWEBdt9hvDuoEV1DiP+oaNg7B5sIaxERI55GR2VgK/9C0UqiFyyO95itCF45/y/ruyNakse0Ttc80Q/BXLhImKOOi5HrGsbxf+PEuy5H84QG5/6EhXwB4UpWJQc82EysqOlMBhT/Jya6TmzWB9Ztp6jH4a2Wox15pF6VYlVHKTbLEIjmMZm1x+b3GYJaY0NPLNV8jeFLpB3Tbs9RoUsHPbuN1gR29OXRa7GfW2oCg6AHm0/shfgNgeMf+9AsLDt7Mhg==' def start_requests(self): for url in self.start_urls: yield Request(url, callback=self.parse, cookies=self.cookies) listings_css = [ '.arrow-button--right', '.menu-wrapper a', ] rules = [ Rule(LinkExtractor(restrict_css=listings_css)), Rule(LinkExtractor(restrict_css='.product-thumbnail__link'), callback='parse_product', process_request='add_cookie_in_req', follow=True), ] def parse_product(self, response): yield from super().parse(response) yield from self.get_products(response) def add_cookie_in_req(self, request): request.cookies['tms_data'] = self.c return request def get_raw_product(self, response): return json.loads(response.css('#state::text').get()) def get_products(self, response): raw = self.get_raw_product(response) p_info = raw['utag']['product']['productInfo'] products, prices, = [], [] p_ids = p_info['product_id'] brands = p_info['product_brand'] old_price_flags = p_info['product_pricing_adornment_flag'] for i, values in enumerate(zip(p_ids, p_info['product_name'], brands, p_info['product_price'], old_price_flags)): product_id, name, brand, price, old_price_flag = values url, description, currency = '', '', '' images, old_prices = [], [] item = NeimanItem() item['product_id'] = product_id item['name'] = name item['brand'] = brand if isinstance(brands, list) else brands if old_price_flag == 'true' or (old_price_flag == 'false' and len(p_ids) > 1): p = raw['productCatalog']['group']['childProducts'][product_id] url = p['linkedData']['url'] currency = p['price']['currencyCode'] description = p['linkedData']['description'] images += self.get_media_images(p) + self.get_images(p) old_prices = [e['price'] for e in p['price'].get('adornments', []) if e['price'] != price] elif old_price_flag == 'false' and len(p_ids) == 1: p = raw['productCatalog']['product'] currency = p['price']['currencyCode'] raw_data = p['linkedData'] description = raw_data['description'] url = raw_data['url'] images += self.get_media_images(p) + self.get_images(p) item['url'] = url item['image_urls'] = list(set(images)) item['description'] = description # item['headers'] = response.headers products.append(item) prices.append( { 'price': price, 'old_prices': old_prices, 'currency': currency, }, ) self.get_skus(raw['utag']['product']['productAnalytics'], products, prices) return products def get_images(self, raw_product): urls = [] raw_urls = raw_product['options']['productOptions'] for raw in raw_urls: if raw.get('label') != 'color': continue for value in raw.get('values'): urls += self.get_media_images(value) return urls def get_media_images(self, raw_media): urls = [] media = raw_media.get('media', {}) alternates = media.get('alternate', {}) url = media.get('main', {}).get('medium', {}).get('url') if url: urls.append(self.HTTPS + url) for e in alternates: url = alternates[e].get('medium', {}).get('url') if url: urls.append(self.HTTPS + url) return urls def get_skus(self, product_analytics, products, prices): for i, e in enumerate(product_analytics['details']['products']): skus = {} for s in e['skus']: sku = prices[i] sku['availability'] = s['inStock'] sku['color'] = s['color'] sku['size'] = s.get('size', 'one-size') skus[s['id']] = sku products[i].update( { 'skus': skus, }, )
[ "mrarslan101@gmail.com" ]
mrarslan101@gmail.com
2dd90c00d550fb8a4a73f533eae06e6108524325
11cff97013030eb41bf5afdb08a0c3c640f8c961
/Tensorflow/roi-pooling/code/neptune_handler.py
f783c2c9ab1f3906b7e8c27cbb5d5a1647b679de
[]
no_license
mellophi/Codes
86dcc5c45591f97bdaca79f81fc988954841dd5a
110fe75961354c259e31cb7cb9a9b5fda32b70bc
refs/heads/master
2022-11-20T19:53:21.498960
2018-02-18T04:32:47
2018-02-18T04:32:47
118,418,556
0
1
null
2022-10-29T00:45:51
2018-01-22T06:55:33
Jupyter Notebook
UTF-8
Python
false
false
2,742
py
from __future__ import print_function from __future__ import division from __future__ import absolute_import from builtins import range from builtins import object from deepsense import neptune import numpy as np import cv2 from PIL import Image class NeptuneHandler(object): def __init__(self, num_channel_names, charts_desc, im_channel_names): self.ctx = neptune.Context() self.learn_rate = self.ctx.params.learning_rate self.num_epochs = int(self.ctx.params.num_epochs) self.roi_folder = self.ctx.params.roidb self.im_folder = self.ctx.params.im_folder self.pretrained_path = self.ctx.params.pretrained_path self.create_numeric_channels(num_channel_names, self.ctx) self.create_charts(charts_desc, self.ctx) self.create_image_channels(im_channel_names, self.ctx) def create_numeric_channels(self, channel_names, ctx): self.numerical_channels = [ctx.job.create_channel(name=name, channel_type=neptune.ChannelType.NUMERIC) for name in channel_names] def create_charts(self, charts_desc, ctx): self.charts = [ctx.job.create_chart(name=charts_desc[i][0], series={charts_desc[i][1]: self.numerical_channels[i]}) for i in range(len(self.numerical_channels))] def send_to_neptune(self, time_point, values): send = lambda ch, val: ch.send(x=time_point, y=val) for i in range(len(self.numerical_channels)): send(self.numerical_channels[i], values[i]) def create_image_channels(self, channel_names, ctx): self.im_channels = [ctx.job.create_channel(name=name, channel_type=neptune.ChannelType.IMAGE) for name in channel_names] def send_image_with_proposals(self, time_step, im, proposals, shape, rois=False): width = 340 height = 150 im_ = cv2.resize(im, (width, height)) im_ = np.uint8(im_*255.) for proposal in proposals: x1 = int(width*proposal[0]/float(shape[1])) y1 = int(height*proposal[1]/float(shape[0])) x2 = int(width*proposal[2]/float(shape[1])) y2 = int(height*proposal[3]/float(shape[0])) cv2.rectangle(im_, (x1, y1), (x2, y2), (255, 0, 0), 1) pil_im = Image.fromarray(im_) if rois: neptune_im = neptune.Image(name='all the RoIs', description='region proposals', data=pil_im) self.im_channels[0].send(x=time_step, y=neptune_im) else: neptune_im = neptune.Image(name='chosen RoIs', description='object detections', data=pil_im) self.im_channels[1].send(x=time_step, y=neptune_im)
[ "ayon01051998@gmail.com" ]
ayon01051998@gmail.com
a7bd12e2ee0ac60ddb220bbd563cbe214e9ed450
3fa8d270c7d8f6e1fd608cbe7e5ddbf2b39768f3
/external-calls/httpsGet/src/lambda_function.py
1a3cecae92804d050293b2f412631fda258ecade
[ "Apache-2.0" ]
permissive
knowlsie/alexa-cookbook
f73980df1bcd8aef630d4924e88b8797444d7280
4026ff4ff54194f628736d4c38471546e9b60860
refs/heads/master
2021-01-19T22:41:09.817843
2017-04-18T13:11:47
2017-04-18T13:11:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,541
py
## alexa-cookbook sample code ## There are three sections, Text Strings, Skill Code, and Helper Function(s). ## You can copy and paste the entire file contents as the code for a new Lambda function, ## or copy & paste section #3, the helper function, to the bottom of your existing Lambda code. ## 1. Text strings ===================================================================================================== ## Modify these strings and messages to change the behavior of your Lambda function myData = 'New York' myUrl = 'https://cp6gckjt97.execute-api.us-east-1.amazonaws.com/prod/stateresource?usstate=' ## 2. Skill Code ======================================================================================================= def speechResponse(say, endSession, sessionAttributes): print('say = ' + say); print return { 'version': '1.0', 'sessionAttributes': sessionAttributes, 'response': { 'outputSpeech': { 'type': 'SSML', 'ssml': say }, 'shouldEndSession': endSession } } def lambda_handler(event, context): #print("Received event: " + json.dumps(event, indent=2)) if event['request']['type'] == "LaunchRequest": pop = httpsGet(myData) ## see the helper function defined below say = "The population of " + myData + " is " + str(pop) return speechResponse(say, True, {}) elif event['request']['type'] == "IntentRequest": intentName = event['request']['intent']['name'] print('handling Intent', intentName) say = 'I heard your intent ' + intentName return speechResponse(say, False, {}) elif event['request']['type'] == "SessionEndedRequest": say = 'goodbye' return speechResponse(say, True, {}) ## 3. Helper Function ================================================================================================= import json import urllib2 ## Requests: a separate Python Library http://docs.python-requests.org/en/master/ ## to install with Python PIP: ## open a command prompt in your /src folder and type ## pip install requests -t . import requests def httpsGet(myData): global myUrl myUrl = myUrl + urllib2.quote(myData) r = requests.get(myUrl) myJs = r.json() if 'population' in myJs: return myJs['population'] else: print 'Error, web service return data not in expected format' return 0
[ "mccaul@amazon.com" ]
mccaul@amazon.com
a8db440dc6845514e7806efca43efbe2f6c0c9bd
c27e6ce3e77dd7ab1f66a2a653893fcc06fc3ad4
/employee_register/migrations/0003_auto_20200812_1039.py
e503bb2bd350fcfa1ffff6c9f5f295392bfa0056
[]
no_license
bokerwere/python-django-employee-registration
f162c01c70fda7979c3904277df3cad0f192abc8
ac6433bfa378b37f7d84c36e0244f42e5b5383da
refs/heads/master
2022-12-12T06:48:35.922474
2020-09-01T12:13:08
2020-09-01T12:13:08
291,987,541
0
0
null
null
null
null
UTF-8
Python
false
false
375
py
# Generated by Django 3.0.8 on 2020-08-12 07:39 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('employee_register', '0002_auto_20200808_1309'), ] operations = [ migrations.RenameField( model_name='employee', old_name='mopile', new_name='mobile', ), ]
[ "josephboker93@gmail.com" ]
josephboker93@gmail.com
8fa27adb7b645a2f9b9ee0039cbef6dac5e7a346
cfb66405a8aa30401c4c9b9cdb5f40edc3cda4a1
/Layout/MainWindow.py
6e703100858bf8bd974a878b76b75390e1523897
[]
no_license
stinger000/CopterHack2020_IR_LED_Desktop_GUI
da42e978ab4eb8ee137ce07a25373c46719e3580
88c2ce10e3ba566a210d9e3a9c277770f2d388b6
refs/heads/master
2023-03-25T04:56:57.216682
2021-03-19T17:44:47
2021-03-19T17:44:47
300,357,485
0
0
null
null
null
null
UTF-8
Python
false
false
3,750
py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui\MainWindow.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(398, 420) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.comboSerial = QtWidgets.QComboBox(self.centralwidget) self.comboSerial.setGeometry(QtCore.QRect(0, 20, 231, 22)) self.comboSerial.setObjectName("comboSerial") self.btnConnect = QtWidgets.QPushButton(self.centralwidget) self.btnConnect.setGeometry(QtCore.QRect(250, 20, 75, 23)) self.btnConnect.setObjectName("btnConnect") self.listData = QtWidgets.QListWidget(self.centralwidget) self.listData.setGeometry(QtCore.QRect(20, 180, 256, 192)) self.listData.setObjectName("listData") self.labelClock = QtWidgets.QLabel(self.centralwidget) self.labelClock.setGeometry(QtCore.QRect(20, 80, 141, 51)) font = QtGui.QFont() font.setPointSize(18) self.labelClock.setFont(font) self.labelClock.setObjectName("labelClock") self.btnStart = QtWidgets.QPushButton(self.centralwidget) self.btnStart.setGeometry(QtCore.QRect(20, 150, 75, 23)) self.btnStart.setObjectName("btnStart") self.btnStop = QtWidgets.QPushButton(self.centralwidget) self.btnStop.setGeometry(QtCore.QRect(120, 150, 75, 23)) self.btnStop.setObjectName("btnStop") self.radioFreeMode = QtWidgets.QRadioButton(self.centralwidget) self.radioFreeMode.setGeometry(QtCore.QRect(290, 220, 91, 31)) self.radioFreeMode.setChecked(True) self.radioFreeMode.setObjectName("radioFreeMode") self.radioLapsMode = QtWidgets.QRadioButton(self.centralwidget) self.radioLapsMode.setGeometry(QtCore.QRect(290, 250, 91, 31)) self.radioLapsMode.setObjectName("radioLapsMode") self.checkFirstLap = QtWidgets.QCheckBox(self.centralwidget) self.checkFirstLap.setGeometry(QtCore.QRect(290, 190, 101, 21)) self.checkFirstLap.setObjectName("checkFirstLap") self.spinLaps = QtWidgets.QSpinBox(self.centralwidget) self.spinLaps.setEnabled(True) self.spinLaps.setGeometry(QtCore.QRect(290, 290, 71, 22)) self.spinLaps.setMinimum(1) self.spinLaps.setObjectName("spinLaps") MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) self.radioFreeMode.toggled['bool'].connect(self.spinLaps.setHidden) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Easy Race")) self.btnConnect.setText(_translate("MainWindow", "Connect")) self.labelClock.setText(_translate("MainWindow", "00:00:00.000")) self.btnStart.setText(_translate("MainWindow", "Start")) self.btnStop.setText(_translate("MainWindow", "Stop")) self.radioFreeMode.setText(_translate("MainWindow", "Free Mode")) self.radioLapsMode.setText(_translate("MainWindow", "Laps Mode")) self.checkFirstLap.setText(_translate("MainWindow", "Start on first lap"))
[ "bigbani1334@gmail.com" ]
bigbani1334@gmail.com
e6d1540bc6d924824bde87140da6b4be13c83c4f
1c3f011b5e29ce6aa2f65da8b5055454b2be3eab
/battleship_oo1.py
9d048819f5c89625f70f7cba2fde3645aa776cbe
[]
no_license
pythonlectures/battleship
b26f86f6207a04bd8921d6547ee6837e4cf4db99
e35fd6e9fe14f4c612ba5c89be4a9bcae3fdad0d
refs/heads/master
2020-03-23T21:04:07.132029
2018-08-05T08:01:43
2018-08-05T08:01:43
142,079,474
0
0
null
2018-08-11T18:10:52
2018-07-23T23:22:16
Python
UTF-8
Python
false
false
2,740
py
class Game: def __init__(self, player1, player2, board): self.player1 = player1 self.player2 = player2 self.board = board self.board.ships.extend(player1.ships) self.board.ships.extend(player2.ships) self.player_takes_turn = self.player1 self.winner = None @classmethod def default(cls): player1 = Player("Jack", [Ship(1, 1), Ship(2, 2)]) player2 = Player("Jill", [Ship(8, 8), Ship(3, 3)]) board = Board(9) return cls(player1, player2, board) def play_game(self): while self.winner is None: shot = self.player_takes_turn.call_your_shot() self.take_shot(shot) self.winner = self.return_winner() self.alternate_turns() self.declare_winner() def alternate_turns(self): self.player_takes_turn = self.player2 if self.player_takes_turn == self.player1 else self.player1 def declare_winner(self): print('{}, you won the game!'.format(self.winner.name)) def take_shot(self, shot): ship_hit = self.board.take_shot(shot) if ship_hit: print('{}, your shot hit {}!'.format(self.player_takes_turn.name, ship_hit.name)) self.winner = self.player2 else: print('{}, you missed your shot!'.format(self.player_takes_turn.name)) self.winner = None def return_winner(self): if self.player1.has_lost(): return self.player2 elif self.player2.has_lost(): return self.player1 else: return None class Board: def __init__(self, board_len): self.coordinates = [(x, y) for x in range(1, board_len) for y in range(1, board_len)] self.ships = [] def take_shot(self, shot): for ship in self.ships: if ship.is_hit(shot): return ship return None class Player: def __init__(self, name, ships): self.name = name self.ships = [] self.ships.extend(ships) def call_your_shot(self): return tuple(int(x.strip()) for x in input('{}, call your shot using comma separated coordinates x, y: '.format(self.name)).split(',')) def has_lost(self): return all(ship.is_sunk for ship in self.ships) class Ship: id_counter = 0 def __init__(self, x, y): Ship.id_counter += 1 self.name = "Ship{}".format(Ship.id_counter) self.coordinates = (x, y) self.is_sunk = False def is_hit(self, shot): hit = (shot == self.coordinates) if hit: self.is_sunk = True return hit if __name__ == "__main__": Game.default().play_game()
[ "marco.verduci@outlook.com" ]
marco.verduci@outlook.com
022c0ad1b16fcf2c7fcb93d178db8aeacd866d1b
2e83e004d8a69a773d1e305152edd16e4ea35ed8
/students/humberto_gonzalez/session04/mailroom.py
d46c459282d38bc0a37ac96b496c40d1524f327a
[]
no_license
UWPCE-PythonCert-ClassRepos/SP_Online_PY210
9b170efbab5efedaba8cf541e8fc42c5c8c0934d
76224d0fb871d0bf0b838f3fccf01022edd70f82
refs/heads/master
2021-06-16T20:14:29.754453
2021-02-25T23:03:19
2021-02-25T23:03:19
161,077,720
19
182
null
2021-02-25T23:03:19
2018-12-09T20:18:25
Python
UTF-8
Python
false
false
4,004
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 13 17:53:34 2019 @author: humberto gonzalez """ import sys import tempfile import os import operator donor_db = {"Tyrod Taylor": [1000.00, 45.50], "Jarvis Landry": [150.25], "Philip Rivers": [650.23, 40.87, 111.32], "Melvin Gordon": [1677.25, 4300.23, 10532.00], "Mike Williams": [230.56, 12.45, 11.00], "Antonio Brown": [100.00, 88.88] } donors = list(donor_db.keys()) main_prompt = "\n".join(("Welcome to the Mailroom!", "Please choose from below options:", "1 - Send a Thank You", "2 - Create a report", "3 - Send letters to all donors", "4 - Exit Program", ">>> ")) def create_report_db(db): '''Takes in the donor database and creates the required database to be used for printing out the report''' report_df = {} for donor in db: donations = db[donor] total = round(sum(donations),2) num = len(donations) average = round(total/num,2) report_df[donor] = [total,num,average] report_df = sorted(report_df.items(), key=operator.itemgetter(1),reverse=True) return report_df def send_thank_you(): '''Prompts user for a donor and donation amount, and then prints out a thank you email''' donor_name = input('What is the full name of the donor you would like to thank? ') if donor_name.lower() == "quit": main() if donor_name not in donors: donor_db[donor_name] = [] donation = input('What was the donation amount? ') if donation.lower()=="quit": main() donor_db[donor_name].append(float(donation)) print() print() print(f'Dear {donor_name},\n Thank you for your generous donation of ${donation}') def create_report(): '''Creates a report of the current donor database''' print('{:20} | {:^10} | {:^10} | {:^10} |'.format("Donor Name", "Total Given","Num Gifts","Average Gift")) print('-'*64) formatter = "{:20} ${:>10} {:>10} ${:>10}" report_db = create_report_db(donor_db) for donor_info in report_db: donor = donor_info[0] donation_info = donor_info[1] total = donation_info[0] num = donation_info[1] average = donation_info[2] print(formatter.format(donor,total,num,average)) def send_letters(): '''Writes and saves letters to all donors in the database in the form of txt files''' formatter = '''Dear {},\n Thank you for your generous donation of ${}. \n Your donation will be put to great use. \n Sincerely, \n -The Organization''' path = tempfile.gettempdir() path = path + "/" + "Letters to Donors" os.mkdir(path) for donor in donor_db: temp = path + "/" + donor.replace(' ','_') + '.txt' with open(temp,'w') as file: txt = formatter.format(donor,donor_db[donor][0]) file.write(txt) file.close() print('Letters have been created and saved to \n a new folder in your temp directory') def exit_program(): print("Bye!") sys.exit() def main(): while True: response = input(main_prompt) # continuously collect user selection # now redirect to feature functions based on the user selection menu_options = {"1":send_thank_you, "2":create_report, "3":send_letters, "4":exit_program} if response not in menu_options: print() print("Please select one of the available options") print("You will be returned to the main menu") main() menu_options.get(response)() if __name__ == "__main__": # don't forget this block to guard against your code running automatically if this module is imported main()
[ "Humberto.gonzalezj@gmail.com" ]
Humberto.gonzalezj@gmail.com
7b6403c7efbad9fe1289f6a2236850d7a726f626
eacff46eda2c6b509449979a16002b96d4645d8e
/Collections-a-installer/community-general-2.4.0/plugins/modules/awall.py
ca3979593c598ecae378543075eff676aa1be9d1
[ "MIT", "GPL-3.0-only", "GPL-3.0-or-later" ]
permissive
d-amien-b/simple-getwordpress
5e6d4d15d5f87124ab591e46b63fec552998fdc3
da90d515a0aa837b633d50db4d91d22b031c04a2
refs/heads/master
2023-04-08T22:13:37.347545
2021-04-06T09:25:51
2021-04-06T09:25:51
351,698,069
0
0
MIT
2021-03-31T16:16:45
2021-03-26T07:30:00
HTML
UTF-8
Python
false
false
15
py
system/awall.py
[ "test@burdo.fr" ]
test@burdo.fr
30728e1d5147122974d91d2d0ec16a80eed21091
06f05e8a3786349edee85a3feb06b5fe8e881c5a
/assets/misc/ExcelHandler.py
4be1ee7fac62cd65b664c361400c50354f464b64
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "MIT" ]
permissive
UbiCastTeam/mediasite_client
fcb3b20c8d70d170d2acac562ff8166e4bd8383a
04f398c0a1dd2ab70272b346065db4e63dfb38fb
refs/heads/master
2023-07-05T18:23:35.439153
2023-06-30T14:53:51
2023-06-30T15:10:23
329,262,964
0
0
MIT
2021-06-28T16:21:42
2021-01-13T09:53:23
null
UTF-8
Python
false
false
867
py
# Referenced from: # https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch12s08.html from xml.sax import ContentHandler, parse class ExcelHandler(ContentHandler): def __init__(self): self.chars = [ ] self.cells = [ ] self.rows = [ ] self.tables = [ ] def characters(self, content): self.chars.append(content) def startElement(self, name, atts): if name=="Cell": self.chars = [ ] elif name=="Row": self.cells=[ ] elif name=="Table": self.rows = [ ] def endElement(self, name): if name=="Cell": self.cells.append(''.join(self.chars)) elif name=="Row": self.rows.append(self.cells) elif name=="Table": self.tables.append(self.rows)
[ "d33bs@users.noreply.github.com" ]
d33bs@users.noreply.github.com
d7366a977e0afa6e57c3e3e0f85d765de473224f
a00443d297229f1631eb14b2fa5359b37ed6ace7
/Scripts/pip3.7-script.py
93e6c18ca6cae0ed63e4add0a85b977ad98f32a9
[]
no_license
ugoodumegwu/my-first-blog
9e4f0df4fe971246515e827b362bbfef09f8813f
ce891b3a62efb5c5933bd65db14a25dc186114b4
refs/heads/master
2020-06-11T23:28:19.434299
2019-06-28T11:32:08
2019-06-28T11:32:08
194,120,844
0
0
null
null
null
null
UTF-8
Python
false
false
412
py
#!C:\Users\user\PycharmProjects\anywhere\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7' __requires__ = 'pip==10.0.1' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')() )
[ "ugoodumegwu@gmail.com" ]
ugoodumegwu@gmail.com
075b9c4afc1dee1fe7094ba2c41a89815e17e658
a0e4be557dd32d6a89ea3d86166e23d4c6de3101
/6.00.1x_scripts/Week 4/Problem Set 4/ps4a_wordgame.py
70e23ae820dce6c9e9deadde6e4856e28f2501a2
[ "Giftware" ]
permissive
acpfog/python
65d8016d115ad5ae62314ed809375b93fb692bf6
1ec01bf1c7feeaa420fe08b5936c53b8d0e03879
refs/heads/master
2018-12-07T02:43:18.127737
2018-09-11T06:34:07
2018-09-11T06:34:07
32,280,908
0
0
null
null
null
null
UTF-8
Python
false
false
10,505
py
# # A Word Game: Part a # # This game is a lot like Scrabble or Words With Friends, if you've played those. # Letters are dealt to players, who then construct one or more words out of their letters. # Each valid word receives a score, based on the length of the word and the letters in that word. # # The rules of the game are as follows: # # Dealing # A player is dealt a hand of n letters chosen at random (assume n=7 for now). # # The player arranges the hand into as many words as they want out of the letters, using each letter at most once. # # Some letters may remain unused (these won't be scored). # # Scoring # The score for the hand is the sum of the scores for each word formed. # # The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, # plus 50 points if all n letters are used on the first word created. # # Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on. # We have defined the dictionary SCRABBLE_LETTER_VALUES that maps each lowercase letter to its Scrabble letter value. # # For example, 'weed' would be worth 32 points ((4+1+1+2) for the four letters, then multiply by len('weed') to get (4+1+1+2)*4 = 32). # Be sure to check that the hand actually has 1 'w', 2 'e's, and 1 'd' before scoring the word! # # As another example, if n=7 and you make the word 'waybill' on the first try, it would be worth 155 points # (the base score for 'waybill' is (4+1+4+3+1+1+1)*7=105, plus an additional 50 point bonus for using all n letters). # import random import string VOWELS = 'aeiou' CONSONANTS = 'bcdfghjklmnpqrstvwxyz' HAND_SIZE = 7 SCRABBLE_LETTER_VALUES = { 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10 } WORDLIST_FILENAME = "words.txt" def loadWords(): """ Returns a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. """ print "Loading word list from file..." # inFile: file inFile = open(WORDLIST_FILENAME, 'r', 0) # wordList: list of strings wordList = [] for line in inFile: wordList.append(line.strip().lower()) print " ", len(wordList), "words loaded." return wordList def getFrequencyDict(sequence): """ Returns a dictionary where the keys are elements of the sequence and the values are integer counts, for the number of times that an element is repeated in the sequence. sequence: string or list return: dictionary """ # freqs: dictionary (element_type -> int) freq = {} for x in sequence: freq[x] = freq.get(x,0) + 1 return freq # # Scoring a word # def getWordScore(word, n): """ Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0 """ score = 0 if len(word) > 0 and len(word) == n: bonus = 50 else: bonus = 0 for letter in word: score += SCRABBLE_LETTER_VALUES[letter] return score * len(word) + bonus # # Make sure you understand how this function works and what it does! # def displayHand(hand): """ Displays the letters currently in the hand. For example: >>> displayHand({'a':1, 'x':2, 'l':3, 'e':1}) Should print out something like: a x x l l l e The order of the letters is unimportant. hand: dictionary (string -> int) """ for letter in hand.keys(): for j in range(hand[letter]): print letter, # print all on the same line print # print an empty line # # Make sure you understand how this function works and what it does! # def dealHand(n): """ Returns a random hand containing n lowercase letters. At least n/3 the letters in the hand should be VOWELS. Hands are represented as dictionaries. The keys are letters and the values are the number of times the particular letter is repeated in that hand. n: int >= 0 returns: dictionary (string -> int) """ hand={} numVowels = n / 3 for i in range(numVowels): x = VOWELS[random.randrange(0,len(VOWELS))] hand[x] = hand.get(x, 0) + 1 for i in range(numVowels, n): x = CONSONANTS[random.randrange(0,len(CONSONANTS))] hand[x] = hand.get(x, 0) + 1 return hand # # Update a hand by removing letters # def updateHand(hand, word): """ Assumes that 'hand' has all the letters in word. In other words, this assumes that however many times a letter appears in 'word', 'hand' has at least as many of that letter in it. Updates the hand: uses up the letters in the given word and returns the new hand, without those letters in it. Has no side effects: does not modify hand. word: string hand: dictionary (string -> int) returns: dictionary (string -> int) """ hand2 = hand.copy() for letter in word: if hand2.get(letter,0) == 0: hand2[letter] = 0 else: hand2[letter] -= 1 return hand2 # # Test word validity # def isValidWord(word, hand, wordList): """ Returns True if word is in the wordList and is entirely composed of letters in the hand. Otherwise, returns False. Does not mutate hand or wordList. word: string hand: dictionary (string -> int) wordList: list of lowercase strings """ hand2 = hand.copy() for letter in word: if hand2.get(letter,0) == 0: return False else: hand2[letter] -= 1 if word in wordList: return True else: return False # # Playing a hand # def calculateHandlen(hand): """ Returns the length (number of letters) in the current hand. hand: dictionary (string-> int) returns: integer """ num = 0 for letter in hand.keys(): if hand.get(letter,0) != 0: num += hand[letter] return num def playHand(hand, wordList, n): """ Allows the user to play the given hand, as follows: * The hand is displayed. * The user may input a word or a single period (the string ".") to indicate they're done playing * Invalid words are rejected, and a message is displayed asking the user to choose another word until they enter a valid word or "." * When a valid word is entered, it uses up letters from the hand. * After every valid word: the score for that word is displayed, the remaining letters in the hand are displayed, and the user is asked to input another word. * The sum of the word scores is displayed when the hand finishes. * The hand finishes when there are no more unused letters or the user inputs a "." hand: dictionary (string -> int) wordList: list of lowercase strings n: integer (HAND_SIZE; i.e., hand size required for additional points) """ # BEGIN PSEUDOCODE <-- Remove this comment when you code this function; do your coding within the pseudocode (leaving those comments in-place!) # Keep track of the total score total_score = 0 # As long as there are still letters left in the hand: while calculateHandlen(hand) > 0: # Display the hand print "Current Hand: ", displayHand(hand) # Ask user for input word = str(raw_input("Enter word, or a \".\" to indicate that you are finished: ")).lower() # If the input is a single period: if word == ".": # End the game (break out of the loop) print "Goodbye!", break # Otherwise (the input is not a single period): else: # If the word is not valid: if not isValidWord(word, hand, wordList): # Reject invalid word (print a message followed by a blank line) print "Invalid word, please try again.\n" # Otherwise (the word is valid): else: # Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line word_score = getWordScore(word, n) total_score += word_score print "\"%s\" earned %s points. Total: %s points\n" % ( word, word_score, total_score ) # Update the hand hand = updateHand(hand, word) # Game is over (user entered a '.' or ran out of letters), so tell user the total score if calculateHandlen(hand) == 0: print "Run out of letters.", print "Total score: %s points.\n" % total_score # # Playing a game # def playGame(wordList): """ Allow the user to play an arbitrary number of hands. 1) Asks the user to input 'n' or 'r' or 'e'. * If the user inputs 'n', let the user play a new (random) hand. * If the user inputs 'r', let the user play the last hand again. * If the user inputs 'e', exit the game. * If the user inputs anything else, tell them their input was invalid. 2) When done playing the hand, repeat from step 1 """ while True: action = str(raw_input("Enter n to deal a new hand, r to replay the last hand, or e to end game: ")).lower() if action == "n": hand = dealHand(HAND_SIZE) playHand(hand, wordList, HAND_SIZE) elif action == "r": if 'hand' in locals(): playHand(hand, wordList, HAND_SIZE) else: print "You have not played a hand yet. Please play a new hand first!\n" elif action == "e": return else: print "Invalid command." # # Build data structures used for entire session and play game # if __name__ == '__main__': wordList = loadWords() playGame(wordList)
[ "acpfog@gmail.com" ]
acpfog@gmail.com
e7a6a0fe0f7e05f27f6c11b927615a33570870ae
b560c65b2cdaa92333d114d79cf2a191c6da5852
/client.py
29faa95073402246f8263fbb591214aa58c647aa
[]
no_license
Ranger11Danger/python-server-client
3823be1b3cc761c6f75343c0fc3c058742746f9c
b587d961184944853c7ca3636fd173b98964d5f3
refs/heads/master
2020-09-23T04:13:03.990788
2019-12-02T14:51:49
2019-12-02T14:51:49
225,398,857
1
1
null
null
null
null
UTF-8
Python
false
false
383
py
#!/usr/bin/python3 import socket import os import time ip = '0.0.0.0' port = 1337 s = socket.create_connection((ip,port)) while True: message = input('To server: ') if '#shell' in message: os.system("gnome-terminal -e 'bash -c \"ncat -nlvp 8888 --ssl\" '") time.sleep(3) s.send(message.encode()) data = s.recv(1024) print(data.decode('utf-8'))
[ "noreply@github.com" ]
noreply@github.com
3f124ab243ee4915437638872d37c259c7a7ea28
e3396ba55ba855e146963aeb4733f0ce2725892b
/result/145/ant.py
a5e2a21c04735fe5cf42d467a81de5c361fc8e03
[]
no_license
masashihamaguchi/contest-2018-GOMOKU
2ad93794a72370801bbcfd628cf4ed03a87c4313
e6b2fd0aa1b4095cfd962078ed2ec69068e929c5
refs/heads/master
2021-09-23T03:24:59.725681
2018-09-20T08:01:13
2018-09-20T08:01:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,774
py
#!/usr/bin/python3 N = 15 # The main routine of AI. # input: str[N][N] field : state of the field. # output: int[2] : where to put a stone in this turn. def Think(field): CENTER = (int(N / 2), int(N / 2)) best_position = (0, 0) canMaxStone = 0 for i in range(N): for j in range(N): if field[i][j] != '.': continue position = (i, j) # Assume to put a stone on (i, j). field[i][j] = 'O' count, lineCounts = CanHaveFiveStones(field, position) #DebugPrint('I have a winning choice at (%d, %d)' %(i, j)) if count >= 5: return position elif lineCounts > canMaxStone: best_position = position canMaxStone = lineCounts # return position field[i][j] = 'X' if OppHaveNumStones(field, position, 5): DebugPrint('I have a winning choice at (%d, %d)' % (i, j)) return position field[i][j] = '.' if GetDistance(best_position, CENTER) > GetDistance(position, CENTER) and canMaxStone == 0: best_position = position return best_position def OppHaveNumStones(field, position, num): return (CountOppStonesOnLine(field, position, (1, 1)) >= num or CountOppStonesOnLine(field, position, (1, 0)) >= num or CountOppStonesOnLine(field, position, (1, -1)) >= num or CountOppStonesOnLine(field, position, (0, 1)) >= num) def CountOppStonesOnLine(field, position, diff): count = 0 row = position[0] col = position[1] while True: if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'X': break row += diff[0] col += diff[1] count += 1 row = position[0] - diff[0] col = position[1] - diff[1] while True: if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'X': break row -= diff[0] col -= diff[1] count += 1 return count # Returns true if you have five stones from |position|. Returns false otherwise. def CanHaveFiveStones(field, position): LineCounts = 0 count1 = CountStonesOnLine(field, position, (1, 1)) if LineCounts < CountStonesOnLine(field, position, (1, 1)): LineCounts = count1 count2 = CountStonesOnLine(field, position, (1, 0)) if LineCounts < CountStonesOnLine(field, position, (1, 0)): LineCounts = count2 count3 = CountStonesOnLine(field, position, (0, 1)) if LineCounts < CountStonesOnLine(field, position, (0, 1)): LineCounts = count3 count4 = CountStonesOnLine(field, position, (1, -1)) if LineCounts < CountStonesOnLine(field, position, (1, -1)): LineCounts = count4 count1 -= 1 count2 -= 1 count3 -= 1 count4 -= 1 return LineCounts, (count1 * count2 * count3* count4) # return (CountStonesOnLine(field, position, (1, 1)) >= 5 or # CountStonesOnLine(field, position, (1, 0)) >= 5 or # CountStonesOnLine(field, position, (1, -1)) >= 5 or # CountStonesOnLine(field, position, (0, 1)) >= 5) # Returns the number of stones you can put around |position| in the direction specified by |diff|. def CountStonesOnLine(field, position, diff): count = 0 row = position[0] col = position[1] while True: if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'O': break row += diff[0] col += diff[1] count += 1 row = position[0] - diff[0] col = position[1] - diff[1] #num = count while True: if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'O': break row -= diff[0] col -= diff[1] count += 1 return count # Returns the Manhattan distance from |a| to |b|. def GetDistance(a, b): return abs(a[0] - b[0]) + abs(a[1] - b[1]) # ============================================================================= # DO NOT EDIT FOLLOWING FUNCTIONS # ============================================================================= def main(): field = Input() position = Think(field) Output(position) def Input(): field = [list(input()) for i in range(N)] return field def Output(position): print(position[0], position[1]) # Outputs |msg| to stderr; This is actually a thin wrapper of print(). def DebugPrint(*msg): import sys print(*msg, file=sys.stderr) if __name__ == '__main__': main()
[ "sknzw2608@gmail.com" ]
sknzw2608@gmail.com
07b9a817a5d2523a16a0e03b7548ae96e1853340
192b040fb4487d4634c41cdf9c66042853749937
/colat/utils/stylegan_helper.py
796ba7ca92e7cfdc1374a0100a6f519676562d52
[]
no_license
kkodoo/latentclr
f62dbdb50d3a9ad0cd3869618c973d88cf4406fb
f5e88ee90f5c5dc38a42972117acf419dfa39da9
refs/heads/main
2023-08-29T10:24:46.831681
2021-10-11T19:32:25
2021-10-11T19:32:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
22,962
py
# Copyright 2020 Erik Härkönen. All rights reserved. # This file is licensed to you under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy # of the License at http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. import pickle import sys from collections import OrderedDict from pathlib import Path import numpy as np import requests import torch import torch.nn as nn import torch.nn.functional as F # Reimplementation of StyleGAN in PyTorch # Source: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__( self, input_size, output_size, gain=2 ** (0.5), use_wscale=False, lrmul=1, bias=True, ): super().__init__() he_std = gain * input_size ** (-0.5) # He init # Equalized learning rate and custom learning rate multiplier. if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter( torch.randn(output_size, input_size) * init_std ) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class MyConv2d(nn.Module): """Conv layer with equalized learning rate and custom learning rate multiplier.""" def __init__( self, input_channels, output_channels, kernel_size, gain=2 ** (0.5), use_wscale=False, lrmul=1, bias=True, intermediate=None, upscale=False, ): super().__init__() if upscale: self.upscale = Upscale2d() else: self.upscale = None he_std = gain * (input_channels * kernel_size ** 2) ** (-0.5) # He init self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter( torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std ) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_mul = lrmul else: self.bias = None self.intermediate = intermediate def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul have_convolution = False if self.upscale is not None and min(x.shape[2:]) * 2 >= 128: # this is the fused upscale + conv from StyleGAN, sadly this seems incompatible with the non-fused way # this really needs to be cleaned up and go into the conv... w = self.weight * self.w_mul w = w.permute(1, 0, 2, 3) # probably applying a conv on w would be more efficient. also this quadruples the weight (average)?! w = F.pad(w, (1, 1, 1, 1)) w = ( w[:, :, 1:, 1:] + w[:, :, :-1, 1:] + w[:, :, 1:, :-1] + w[:, :, :-1, :-1] ) x = F.conv_transpose2d(x, w, stride=2, padding=(w.size(-1) - 1) // 2) have_convolution = True elif self.upscale is not None: x = self.upscale(x) if not have_convolution and self.intermediate is None: return F.conv2d( x, self.weight * self.w_mul, bias, padding=self.kernel_size // 2 ) elif not have_convolution: x = F.conv2d( x, self.weight * self.w_mul, None, padding=self.kernel_size // 2 ) if self.intermediate is not None: x = self.intermediate(x) if bias is not None: x = x + bias.view(1, -1, 1, 1) return x class NoiseLayer(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x, noise=None): if noise is None and self.noise is None: noise = torch.randn( x.size(0), 1, x.size(2), x.size(3), device=x.device, dtype=x.dtype ) elif noise is None: # here is a little trick: if you get all the noiselayers and set each # modules .noise attribute, you can have pre-defined noise. # Very useful for analysis noise = self.noise x = x + self.weight.view(1, -1, 1, 1) * noise return x class StyleMod(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleMod, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale=use_wscale) def forward(self, x, latent): style = self.lin(latent) # style => [batch_size, n_channels*2] shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1] style = style.view(shape) # [batch_size, 2, n_channels, ...] x = x * (style[:, 0] + 1.0) + style[:, 1] return x class PixelNormLayer(nn.Module): def __init__(self, epsilon=1e-8): super().__init__() self.epsilon = epsilon def forward(self, x): return x * torch.rsqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon) class BlurLayer(nn.Module): def __init__(self, kernel=[1, 2, 1], normalize=True, flip=False, stride=1): super(BlurLayer, self).__init__() kernel = [1, 2, 1] kernel = torch.tensor(kernel, dtype=torch.float32) kernel = kernel[:, None] * kernel[None, :] kernel = kernel[None, None] if normalize: kernel = kernel / kernel.sum() if flip: kernel = kernel[:, :, ::-1, ::-1] self.register_buffer("kernel", kernel) self.stride = stride def forward(self, x): # expand kernel channels kernel = self.kernel.expand(x.size(1), -1, -1, -1) x = F.conv2d( x, kernel, stride=self.stride, padding=int((self.kernel.size(2) - 1) / 2), groups=x.size(1), ) return x def upscale2d(x, factor=2, gain=1): assert x.dim() == 4 if gain != 1: x = x * gain if factor != 1: shape = x.shape x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand( -1, -1, -1, factor, -1, factor ) x = x.contiguous().view( shape[0], shape[1], factor * shape[2], factor * shape[3] ) return x class Upscale2d(nn.Module): def __init__(self, factor=2, gain=1): super().__init__() assert isinstance(factor, int) and factor >= 1 self.gain = gain self.factor = factor def forward(self, x): return upscale2d(x, factor=self.factor, gain=self.gain) class G_mapping(nn.Sequential): def __init__(self, nonlinearity="lrelu", use_wscale=True): act, gain = { "relu": (torch.relu, np.sqrt(2)), "lrelu": (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2)), }[nonlinearity] layers = [ ("pixel_norm", PixelNormLayer()), ( "dense0", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense0_act", act), ( "dense1", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense1_act", act), ( "dense2", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense2_act", act), ( "dense3", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense3_act", act), ( "dense4", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense4_act", act), ( "dense5", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense5_act", act), ( "dense6", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense6_act", act), ( "dense7", MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale), ), ("dense7_act", act), ] super().__init__(OrderedDict(layers)) def forward(self, x): return super().forward(x) class Truncation(nn.Module): def __init__(self, avg_latent, max_layer=8, threshold=0.7): super().__init__() self.max_layer = max_layer self.threshold = threshold self.register_buffer("avg_latent", avg_latent) def forward(self, x): assert x.dim() == 3 interp = torch.lerp(self.avg_latent, x, self.threshold) do_trunc = (torch.arange(x.size(1)) < self.max_layer).view(1, -1, 1) return torch.where(do_trunc, interp, x) class LayerEpilogue(nn.Module): """Things to do at the end of each layer.""" def __init__( self, channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ): super().__init__() layers = [] if use_noise: layers.append(("noise", NoiseLayer(channels))) layers.append(("activation", activation_layer)) if use_pixel_norm: layers.append(("pixel_norm", PixelNorm())) if use_instance_norm: layers.append(("instance_norm", nn.InstanceNorm2d(channels))) self.top_epi = nn.Sequential(OrderedDict(layers)) if use_styles: self.style_mod = StyleMod(dlatent_size, channels, use_wscale=use_wscale) else: self.style_mod = None def forward(self, x, dlatents_in_slice=None): x = self.top_epi(x) if self.style_mod is not None: x = self.style_mod(x, dlatents_in_slice) else: assert dlatents_in_slice is None return x class InputBlock(nn.Module): def __init__( self, nf, dlatent_size, const_input_layer, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ): super().__init__() self.const_input_layer = const_input_layer self.nf = nf if self.const_input_layer: # called 'const' in tf self.const = nn.Parameter(torch.ones(1, nf, 4, 4)) self.bias = nn.Parameter(torch.ones(nf)) else: self.dense = MyLinear( dlatent_size, nf * 16, gain=gain / 4, use_wscale=use_wscale ) # tweak gain to match the official implementation of Progressing GAN self.epi1 = LayerEpilogue( nf, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ) self.conv = MyConv2d(nf, nf, 3, gain=gain, use_wscale=use_wscale) self.epi2 = LayerEpilogue( nf, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ) def forward(self, dlatents_in_range): batch_size = dlatents_in_range.size(0) if self.const_input_layer: x = self.const.expand(batch_size, -1, -1, -1) x = x + self.bias.view(1, -1, 1, 1) else: x = self.dense(dlatents_in_range[:, 0]).view(batch_size, self.nf, 4, 4) x = self.epi1(x, dlatents_in_range[:, 0]) x = self.conv(x) x = self.epi2(x, dlatents_in_range[:, 1]) return x class GSynthesisBlock(nn.Module): def __init__( self, in_channels, out_channels, blur_filter, dlatent_size, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ): # 2**res x 2**res # res = 3..resolution_log2 super().__init__() if blur_filter: blur = BlurLayer(blur_filter) else: blur = None self.conv0_up = MyConv2d( in_channels, out_channels, kernel_size=3, gain=gain, use_wscale=use_wscale, intermediate=blur, upscale=True, ) self.epi1 = LayerEpilogue( out_channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ) self.conv1 = MyConv2d( out_channels, out_channels, kernel_size=3, gain=gain, use_wscale=use_wscale ) self.epi2 = LayerEpilogue( out_channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer, ) def forward(self, x, dlatents_in_range): x = self.conv0_up(x) x = self.epi1(x, dlatents_in_range[:, 0]) x = self.conv1(x) x = self.epi2(x, dlatents_in_range[:, 1]) return x class G_synthesis(nn.Module): def __init__( self, dlatent_size=512, # Disentangled latent (W) dimensionality. num_channels=3, # Number of output color channels. resolution=1024, # Output resolution. fmap_base=8192, # Overall multiplier for the number of feature maps. fmap_decay=1.0, # log2 feature map reduction when doubling the resolution. fmap_max=512, # Maximum number of feature maps in any layer. use_styles=True, # Enable style inputs? const_input_layer=True, # First layer is a learned constant? use_noise=True, # Enable noise inputs? randomize_noise=True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables. nonlinearity="lrelu", # Activation function: 'relu', 'lrelu' use_wscale=True, # Enable equalized learning rate? use_pixel_norm=False, # Enable pixelwise feature vector normalization? use_instance_norm=True, # Enable instance normalization? dtype=torch.float32, # Data type to use for activations and outputs. blur_filter=[ 1, 2, 1, ], # Low-pass filter to apply when resampling activations. None = no filtering. ): super().__init__() def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) self.dlatent_size = dlatent_size resolution_log2 = int(np.log2(resolution)) assert resolution == 2 ** resolution_log2 and resolution >= 4 act, gain = { "relu": (torch.relu, np.sqrt(2)), "lrelu": (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2)), }[nonlinearity] num_layers = resolution_log2 * 2 - 2 num_styles = num_layers if use_styles else 1 torgbs = [] blocks = [] for res in range(2, resolution_log2 + 1): channels = nf(res - 1) name = "{s}x{s}".format(s=2 ** res) if res == 2: blocks.append( ( name, InputBlock( channels, dlatent_size, const_input_layer, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, act, ), ) ) else: blocks.append( ( name, GSynthesisBlock( last_channels, channels, blur_filter, dlatent_size, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, act, ), ) ) last_channels = channels self.torgb = MyConv2d(channels, num_channels, 1, gain=1, use_wscale=use_wscale) self.blocks = nn.ModuleDict(OrderedDict(blocks)) def forward(self, dlatents_in): # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size]. # lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype) batch_size = dlatents_in.size(0) for i, m in enumerate(self.blocks.values()): if i == 0: x = m(dlatents_in[:, 2 * i : 2 * i + 2]) else: x = m(x, dlatents_in[:, 2 * i : 2 * i + 2]) rgb = self.torgb(x) return rgb class StyleGAN_G(nn.Sequential): def __init__(self, resolution, truncation=1.0): self.resolution = resolution self.layers = OrderedDict( [ ("g_mapping", G_mapping()), # ('truncation', Truncation(avg_latent)), ("g_synthesis", G_synthesis(resolution=resolution)), ] ) super().__init__(self.layers) def forward(self, x, latent_is_w=False): if isinstance(x, list): assert len(x) == 18, "Must provide 1 or 18 latents" if not latent_is_w: x = [self.layers["g_mapping"].forward(l) for l in x] x = torch.stack(x, dim=1) else: if not latent_is_w: x = self.layers["g_mapping"].forward(x) x = x.unsqueeze(1).expand(-1, 18, -1) x = self.layers["g_synthesis"].forward(x) return x # From: https://github.com/lernapparat/lernapparat/releases/download/v2019-02-01/ def load_weights(self, checkpoint): self.load_state_dict(torch.load(checkpoint)) def export_from_tf(self, pickle_path): module_path = Path(__file__).parent / "stylegan_tf" sys.path.append(str(module_path.resolve())) import collections import pickle import dnnlib import dnnlib.tflib import torch dnnlib.tflib.init_tf() weights = pickle.load(open(pickle_path, "rb")) weights_pt = [ collections.OrderedDict( [ (k, torch.from_numpy(v.value().eval())) for k, v in w.trainables.items() ] ) for w in weights ] # torch.save(weights_pt, pytorch_name) # then on the PyTorch side run ( state_G, state_D, state_Gs, ) = weights_pt # torch.load('./karras2019stylegan-ffhq-1024x1024.pt') def key_translate(k): k = k.lower().split("/") if k[0] == "g_synthesis": if not k[1].startswith("torgb"): k.insert(1, "blocks") k = ".".join(k) k = ( k.replace("const.const", "const") .replace("const.bias", "bias") .replace("const.stylemod", "epi1.style_mod.lin") .replace("const.noise.weight", "epi1.top_epi.noise.weight") .replace("conv.noise.weight", "epi2.top_epi.noise.weight") .replace("conv.stylemod", "epi2.style_mod.lin") .replace("conv0_up.noise.weight", "epi1.top_epi.noise.weight") .replace("conv0_up.stylemod", "epi1.style_mod.lin") .replace("conv1.noise.weight", "epi2.top_epi.noise.weight") .replace("conv1.stylemod", "epi2.style_mod.lin") .replace("torgb_lod0", "torgb") ) else: k = ".".join(k) return k def weight_translate(k, w): k = key_translate(k) if k.endswith(".weight"): if w.dim() == 2: w = w.t() elif w.dim() == 1: pass else: assert w.dim() == 4 w = w.permute(3, 2, 0, 1) return w # we delete the useless torgb filters param_dict = { key_translate(k): weight_translate(k, v) for k, v in state_Gs.items() if "torgb_lod" not in key_translate(k) } if 1: sd_shapes = {k: v.shape for k, v in self.state_dict().items()} param_shapes = {k: v.shape for k, v in param_dict.items()} for k in list(sd_shapes) + list(param_shapes): pds = param_shapes.get(k) sds = sd_shapes.get(k) if pds is None: print("sd only", k, sds) elif sds is None: print("pd only", k, pds) elif sds != pds: print("mismatch!", k, pds, sds) self.load_state_dict(param_dict, strict=False) # needed for the blur kernels torch.save(self.state_dict(), Path(pickle_path).with_suffix(".pt"))
[ "okyksl@gmail.com" ]
okyksl@gmail.com
8574e74809fd2344a0ec2897ad43bff7a1dd3a40
7f8483b792bd2dfa732a13c26590833f5902f083
/Aula05/manipulacao_canais.py
a6f1a212109a13ac562163dda03b1d47a9abcc99
[ "MIT" ]
permissive
thiagopollachini/introducao-opencv
c905c61a65a966d50df2838ba8463a7e0725a765
c1b63bb2aca008821489f65479c957ce4f925c80
refs/heads/master
2021-09-13T23:50:20.365076
2018-05-06T03:19:51
2018-05-06T03:19:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
473
py
#Importando biblioteca do OpenCV import cv2 #Importando biblioteca para computação científica import numpy as np #Lendo imagens imagem_original = cv2.imread('../images/lena.png',1) #Separa os canais da imagem (B, G, R) = cv2.split(imagem_original) cv2.imshow("Red", R) cv2.imshow("Green", G) cv2.imshow("Blue", B) cv2.imshow("Original", imagem_original) cv2.waitKey(0) #Juntando os canais fusao = cv2.merge([B,G,R]) cv2.imshow("Imagem fundida", fusao) cv2.waitKey(0)
[ "viniciuscampos120@gmail.com" ]
viniciuscampos120@gmail.com
08455de6972b66d2d49d9e1f4df28605236f0491
73c0d0b755182ca5ae182637677aefb3f2f572b6
/scoring_program/libscores.py
96829266b92e2701193ec70504993fc5e5b1afea
[]
no_license
Kihansi95/ChallengePredictSales
1c52b253f86d55188b60119713b7604bec970e75
6e3f602b41c736c51115cc74946c6b6a1cf8ea0b
refs/heads/master
2020-05-01T10:31:13.318397
2019-05-05T22:32:55
2019-05-05T22:32:55
177,422,235
0
0
null
null
null
null
UTF-8
Python
false
false
32,028
py
# Score library for NUMPY arrays # ChaLearn AutoML challenge # For regression: # solution and prediction are vectors of numerical values of the same dimension # For classification: # solution = array(p,n) of 0,1 truth values, samples in lines, classes in columns # prediction = array(p,n) of numerical scores between 0 and 1 (analogous to probabilities) # Isabelle Guyon and Arthur Pesah, ChaLearn, August-November 2014 # ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS". # ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM # ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE # WARRANTY OF NON-INFRINGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS. # IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL, # INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS, # PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE. import os from sys import stderr from sys import version import numpy as np import scipy as sp from sklearn import metrics from sklearn.preprocessing import * swrite = stderr.write from os import getcwd as pwd try: from pip._internal.utils.misc import get_installed_distributions except ImportError: # pip<10 from pip import get_installed_distributions from glob import glob import platform import psutil if (os.name == "nt"): filesep = '\\' else: filesep = '/' # ========= Useful functions ============== def read_array(filename): ''' Read array and convert to 2d np arrays ''' array = np.genfromtxt(filename, dtype=float) if len(array.shape) == 1: array = array.reshape(-1, 1) return array def sanitize_array(array): ''' Replace NaN and Inf (there should not be any!)''' a = np.ravel(array) #maxi = np.nanmax((filter(lambda x: x != float('inf'), a))) # Max except NaN and Inf #mini = np.nanmin((filter(lambda x: x != float('-inf'), a))) # Mini except NaN and Inf maxi=np.max(a) mini=np.min(a) array[array == float('inf')] = maxi array[array == float('-inf')] = mini mid = (maxi + mini) / 2 array[np.isnan(array)] = mid return array def normalize_array(solution, prediction): ''' Use min and max of solution as scaling factors to normalize prediction, then threshold it to [0, 1]. Binarize solution to {0, 1}. This allows applying classification scores to all cases. In principle, this should not do anything to properly formatted classification inputs and outputs.''' # Binarize solution sol = np.ravel(solution) # convert to 1-d array #maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf #mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf maxi=np.max(sol); mini=np.min(sol) if maxi == mini: print('Warning, cannot normalize') return [solution, prediction] diff = maxi - mini mid = (maxi + mini) / 2. new_solution = np.copy(solution) new_solution[solution >= mid] = 1 new_solution[solution < mid] = 0 # Normalize and threshold predictions (takes effect only if solution not in {0, 1}) new_prediction = (np.copy(prediction) - float(mini)) / float(diff) new_prediction[new_prediction > 1] = 1 # and if predictions exceed the bounds [0, 1] new_prediction[new_prediction < 0] = 0 # Make probabilities smoother # new_prediction = np.power(new_prediction, (1./10)) return [new_solution, new_prediction] def binarize_predictions(array, task='binary.classification'): ''' Turn predictions into decisions {0,1} by selecting the class with largest score for multiclass problems and thresholding at 0.5 for other cases.''' # add a very small random value as tie breaker (a bit bad because this changes the score every time) # so to make sure we get the same result every time, we seed it # eps = 1e-15 # np.random.seed(sum(array.shape)) # array = array + eps*np.random.rand(array.shape[0],array.shape[1]) bin_array = np.zeros(array.shape) if (task != 'multiclass.classification') or (array.shape[1] == 1): bin_array[array >= 0.5] = 1 else: sample_num = array.shape[0] for i in range(sample_num): j = np.argmax(array[i, :]) bin_array[i, j] = 1 return bin_array def acc_stat(solution, prediction): ''' Return accuracy statistics TN, FP, TP, FN Assumes that solution and prediction are binary 0/1 vectors.''' # This uses floats so the results are floats TN = sum(np.multiply((1 - solution), (1 - prediction))) FN = sum(np.multiply(solution, (1 - prediction))) TP = sum(np.multiply(solution, prediction)) FP = sum(np.multiply((1 - solution), prediction)) # print "TN =",TN # print "FP =",FP # print "TP =",TP # print "FN =",FN return (TN, FP, TP, FN) def tiedrank(a): ''' Return the ranks (with base 1) of a list resolving ties by averaging. This works for numpy arrays.''' m = len(a) # Sort a in ascending order (sa=sorted vals, i=indices) i = a.argsort() sa = a[i] # Find unique values uval = np.unique(a) # Test whether there are ties R = np.arange(m, dtype=float) + 1 # Ranks with base 1 if len(uval) != m: # Average the ranks for the ties oldval = sa[0] newval = sa[0] k0 = 0 for k in range(1, m): newval = sa[k] if newval == oldval: # moving average R[k0:k + 1] = R[k - 1] * (k - k0) / (k - k0 + 1) + R[k] / (k - k0 + 1) else: k0 = k; oldval = newval # Invert the index S = np.empty(m) S[i] = R return S def mvmean(R, axis=0): ''' Moving average to avoid rounding errors. A bit slow, but... Computes the mean along the given axis, except if this is a vector, in which case the mean is returned. Does NOT flatten.''' if len(R.shape) == 0: return R average = lambda x: reduce(lambda i, j: (0, (j[0] / (j[0] + 1.)) * i[1] + (1. / (j[0] + 1)) * j[1]), enumerate(x))[ 1] R = np.array(R) if len(R.shape) == 1: return average(R) if axis == 1: return np.array(map(average, R)) else: return np.array(map(average, R.transpose())) # ======= Default metrics ======== def bac_binary(solution, prediction): return bac_metric(solution, prediction, task='binary.classification') def bac_multiclass(solution, prediction): return bac_metric(solution, prediction, task='multiclass.classification') def bac_multilabel(solution, prediction): return bac_metric(solution, prediction, task='multilabel.classification') def auc_binary(solution, prediction): return auc_metric(solution, prediction, task='binary.classification') def auc_multilabel(solution, prediction): return auc_metric(solution, prediction, task='multilabel.classification') def pac_binary(solution, prediction): return pac_metric(solution, prediction, task='binary.classification') def pac_multiclass(solution, prediction): return pac_metric(solution, prediction, task='multiclass.classification') def pac_multilabel(solution, prediction): return pac_metric(solution, prediction, task='multilabel.classification') def f1_binary(solution, prediction): return f1_metric(solution, prediction, task='binary.classification') def f1_multilabel(solution, prediction): return f1_metric(solution, prediction, task='multilabel.classification') def abs_regression(solution, prediction): return a_metric(solution, prediction, task='regression') def r2_regression(solution, prediction): return r2_metric(solution, prediction, task='regression') # ======= Pre-made metrics ======== ### REGRESSION METRICS (work on raw solution and prediction) # These can be computed on all solutions and predictions (classification included) def r2_metric(solution, prediction, task='regression'): ''' 1 - Mean squared error divided by variance ''' mse = mvmean((solution - prediction) ** 2) var = mvmean((solution - mvmean(solution)) ** 2) score = 1 - mse / var return mvmean(score) def a_metric(solution, prediction, task='regression'): ''' 1 - Mean absolute error divided by mean absolute deviation ''' mae = mvmean(np.abs(solution - prediction)) # mean absolute error mad = mvmean(np.abs(solution - mvmean(solution))) # mean absolute deviation score = 1 - mae / mad return mvmean(score) ### END REGRESSION METRICS ### CLASSIFICATION METRICS (work on solutions in {0, 1} and predictions in [0, 1]) # These can be computed for regression scores only after running normalize_array def bac_metric(solution, prediction, task='binary.classification'): ''' Compute the normalized balanced accuracy. The binarization and the normalization differ for the multi-label and multi-class case. ''' label_num = solution.shape[1] score = np.zeros(label_num) bin_prediction = binarize_predictions(prediction, task) [tn, fp, tp, fn] = acc_stat(solution, bin_prediction) # Bounding to avoid division by 0 eps = 1e-15 tp = sp.maximum(eps, tp) pos_num = sp.maximum(eps, tp + fn) tpr = tp / pos_num # true positive rate (sensitivity) if (task != 'multiclass.classification') or (label_num == 1): tn = sp.maximum(eps, tn) neg_num = sp.maximum(eps, tn + fp) tnr = tn / neg_num # true negative rate (specificity) bac = 0.5 * (tpr + tnr) base_bac = 0.5 # random predictions for binary case else: bac = tpr base_bac = 1. / label_num # random predictions for multiclass case bac = mvmean(bac) # average over all classes # Normalize: 0 for random, 1 for perfect score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac)) return score def pac_metric(solution, prediction, task='binary.classification'): ''' Probabilistic Accuracy based on log_loss metric. We assume the solution is in {0, 1} and prediction in [0, 1]. Otherwise, run normalize_array.''' debug_flag = False [sample_num, label_num] = solution.shape if label_num == 1: task = 'binary.classification' eps = 1e-15 the_log_loss = log_loss(solution, prediction, task) # Compute the base log loss (using the prior probabilities) pos_num = 1. * sum(solution) # float conversion! frac_pos = pos_num / sample_num # prior proba of positive class the_base_log_loss = prior_log_loss(frac_pos, task) # Alternative computation of the same thing (slower) # Should always return the same thing except in the multi-label case # For which the analytic solution makes more sense if debug_flag: base_prediction = np.empty(prediction.shape) for k in range(sample_num): base_prediction[k, :] = frac_pos base_log_loss = log_loss(solution, base_prediction, task) diff = np.array(abs(the_base_log_loss - base_log_loss)) if len(diff.shape) > 0: diff = max(diff) if (diff) > 1e-10: print('Arrggh {} != {}'.format(the_base_log_loss, base_log_loss)) # Exponentiate to turn into an accuracy-like score. # In the multi-label case, we need to average AFTER taking the exp # because it is an NL operation pac = mvmean(np.exp(-the_log_loss)) base_pac = mvmean(np.exp(-the_base_log_loss)) # Normalize: 0 for random, 1 for perfect score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac)) return score def f1_metric(solution, prediction, task='binary.classification'): ''' Compute the normalized f1 measure. The binarization differs for the multi-label and multi-class case. A non-weighted average over classes is taken. The score is normalized.''' label_num = solution.shape[1] score = np.zeros(label_num) bin_prediction = binarize_predictions(prediction, task) [tn, fp, tp, fn] = acc_stat(solution, bin_prediction) # Bounding to avoid division by 0 eps = 1e-15 true_pos_num = sp.maximum(eps, tp + fn) found_pos_num = sp.maximum(eps, tp + fp) tp = sp.maximum(eps, tp) tpr = tp / true_pos_num # true positive rate (recall) ppv = tp / found_pos_num # positive predictive value (precision) arithmetic_mean = 0.5 * sp.maximum(eps, tpr + ppv) # Harmonic mean: f1 = tpr * ppv / arithmetic_mean # Average over all classes f1 = mvmean(f1) # Normalize: 0 for random, 1 for perfect if (task != 'multiclass.classification') or (label_num == 1): # How to choose the "base_f1"? # For the binary/multilabel classification case, one may want to predict all 1. # In that case tpr = 1 and ppv = frac_pos. f1 = 2 * frac_pos / (1+frac_pos) # frac_pos = mvmean(solution.ravel()) # base_f1 = 2 * frac_pos / (1+frac_pos) # or predict random values with probability 0.5, in which case # base_f1 = 0.5 # the first solution is better only if frac_pos > 1/3. # The solution in which we predict according to the class prior frac_pos gives # f1 = tpr = ppv = frac_pos, which is worse than 0.5 if frac_pos<0.5 # So, because the f1 score is used if frac_pos is small (typically <0.1) # the best is to assume that base_f1=0.5 base_f1 = 0.5 # For the multiclass case, this is not possible (though it does not make much sense to # use f1 for multiclass problems), so the best would be to assign values at random to get # tpr=ppv=frac_pos, where frac_pos=1/label_num else: base_f1 = 1. / label_num score = (f1 - base_f1) / sp.maximum(eps, (1 - base_f1)) return score def auc_metric(solution, prediction, task='binary.classification'): ''' Normarlized Area under ROC curve (AUC). Return Gini index = 2*AUC-1 for binary classification problems. Should work for a vector of binary 0/1 (or -1/1)"solution" and any discriminant values for the predictions. If solution and prediction are not vectors, the AUC of the columns of the matrices are computed and averaged (with no weight). The same for all classification problems (in fact it treats well only the binary and multilabel classification problems).''' # auc = metrics.roc_auc_score(solution, prediction, average=None) # There is a bug in metrics.roc_auc_score: auc([1,0,0],[1e-10,0,0]) incorrect label_num = solution.shape[1] auc = np.empty(label_num) for k in range(label_num): r_ = tiedrank(prediction[:, k]) s_ = solution[:, k] if sum(s_) == 0: print('WARNING: no positive class example in class {}'.format(k + 1)) npos = sum(s_ == 1) nneg = sum(s_ < 1) auc[k] = (sum(r_[s_ == 1]) - npos * (npos + 1) / 2) / (nneg * npos) # print('AUC[%d]=' % k + '%5.2f' % auc[k]) return 2 * mvmean(auc) - 1 ### END CLASSIFICATION METRICS # ======= Specialized scores ======== # We run all of them for all tasks even though they don't make sense for some tasks def nbac_binary_score(solution, prediction): ''' Normalized balanced accuracy for binary and multilabel classification ''' return bac_metric(solution, prediction, task='binary.classification') def nbac_multiclass_score(solution, prediction): ''' Multiclass accuracy for binary and multilabel classification ''' return bac_metric(solution, prediction, task='multiclass.classification') def npac_binary_score(solution, prediction): ''' Normalized balanced accuracy for binary and multilabel classification ''' return pac_metric(solution, prediction, task='binary.classification') def npac_multiclass_score(solution, prediction): ''' Multiclass accuracy for binary and multilabel classification ''' return pac_metric(solution, prediction, task='multiclass.classification') def f1_binary_score(solution, prediction): ''' Normalized balanced accuracy for binary and multilabel classification ''' return f1_metric(solution, prediction, task='binary.classification') def f1_multiclass_score(solution, prediction): ''' Multiclass accuracy for binary and multilabel classification ''' return f1_metric(solution, prediction, task='multiclass.classification') def log_loss(solution, prediction, task='binary.classification'): ''' Log loss for binary and multiclass. ''' [sample_num, label_num] = solution.shape eps = 1e-15 pred = np.copy(prediction) # beware: changes in prediction occur through this sol = np.copy(solution) if (task == 'multiclass.classification') and (label_num > 1): # Make sure the lines add up to one for multi-class classification norma = np.sum(prediction, axis=1) for k in range(sample_num): pred[k, :] /= sp.maximum(norma[k], eps) # Make sure there is a single label active per line for multi-class classification sol = binarize_predictions(solution, task='multiclass.classification') # For the base prediction, this solution is ridiculous in the multi-label case # Bounding of predictions to avoid log(0),1/0,... pred = sp.minimum(1 - eps, sp.maximum(eps, pred)) # Compute the log loss pos_class_log_loss = - mvmean(sol * np.log(pred), axis=0) if (task != 'multiclass.classification') or (label_num == 1): # The multi-label case is a bunch of binary problems. # The second class is the negative class for each column. neg_class_log_loss = - mvmean((1 - sol) * np.log(1 - pred), axis=0) log_loss = pos_class_log_loss + neg_class_log_loss # Each column is an independent problem, so we average. # The probabilities in one line do not add up to one. # log_loss = mvmean(log_loss) # print('binary {}'.format(log_loss)) # In the multilabel case, the right thing i to AVERAGE not sum # We return all the scores so we can normalize correctly later on else: # For the multiclass case the probabilities in one line add up one. log_loss = pos_class_log_loss # We sum the contributions of the columns. log_loss = np.sum(log_loss) # print('multiclass {}'.format(log_loss)) return log_loss def prior_log_loss(frac_pos, task='binary.classification'): ''' Baseline log loss. For multiplr classes ot labels return the volues for each column''' eps = 1e-15 frac_pos_ = sp.maximum(eps, frac_pos) if (task != 'multiclass.classification'): # binary case frac_neg = 1 - frac_pos frac_neg_ = sp.maximum(eps, frac_neg) pos_class_log_loss_ = - frac_pos * np.log(frac_pos_) neg_class_log_loss_ = - frac_neg * np.log(frac_neg_) base_log_loss = pos_class_log_loss_ + neg_class_log_loss_ # base_log_loss = mvmean(base_log_loss) # print('binary {}'.format(base_log_loss)) # In the multilabel case, the right thing i to AVERAGE not sum # We return all the scores so we can normalize correctly later on else: # multiclass case fp = frac_pos_ / sum(frac_pos_) # Need to renormalize the lines in multiclass case # Only ONE label is 1 in the multiclass case active for each line pos_class_log_loss_ = - frac_pos * np.log(fp) base_log_loss = np.sum(pos_class_log_loss_) return base_log_loss # sklearn implementations for comparison def log_loss_(solution, prediction): return metrics.log_loss(solution, prediction) def r2_score_(solution, prediction): return metrics.r2_score(solution, prediction) def a_score_(solution, prediction): mad = float(mvmean(abs(solution - mvmean(solution)))) return 1 - metrics.mean_absolute_error(solution, prediction) / mad def auc_score_(solution, prediction): auc = metrics.roc_auc_score(solution, prediction, average=None) return mvmean(auc) ### SOME I/O functions def ls(filename): return sorted(glob(filename)) def write_list(lst): for item in lst: swrite(item + "\n") def mkdir(d): if not os.path.exists(d): os.makedirs(d) def get_info(filename): ''' Get all information {attribute = value} pairs from the public.info file''' info = {} with open(filename, "r") as info_file: lines = info_file.readlines() features_list = list(map(lambda x: tuple(x.strip("\'").split(" = ")), lines)) for (key, value) in features_list: info[key] = value.rstrip().strip("'").strip(' ') if info[key].isdigit(): # if we have a number, we want it to be an integer info[key] = int(info[key]) return info def show_io(input_dir, output_dir): ''' show directory structure and inputs and autputs to scoring program''' swrite('\n=== DIRECTORIES ===\n\n') # Show this directory swrite("-- Current directory " + pwd() + ":\n") write_list(ls('.')) write_list(ls('./*')) write_list(ls('./*/*')) swrite("\n") # List input and output directories swrite("-- Input directory " + input_dir + ":\n") write_list(ls(input_dir)) write_list(ls(input_dir + '/*')) write_list(ls(input_dir + '/*/*')) write_list(ls(input_dir + '/*/*/*')) swrite("\n") swrite("-- Output directory " + output_dir + ":\n") write_list(ls(output_dir)) write_list(ls(output_dir + '/*')) swrite("\n") # write meta data to sdterr swrite('\n=== METADATA ===\n\n') swrite("-- Current directory " + pwd() + ":\n") try: metadata = yaml.load(open('metadata', 'r')) for key, value in metadata.items(): swrite(key + ': ') swrite(str(value) + '\n') except: swrite("none\n"); swrite("-- Input directory " + input_dir + ":\n") try: metadata = yaml.load(open(os.path.join(input_dir, 'metadata'), 'r')) for key, value in metadata.items(): swrite(key + ': ') swrite(str(value) + '\n') swrite("\n") except: swrite("none\n"); def show_version(scoring_version): ''' Python version and library versions ''' swrite('\n=== VERSIONS ===\n\n') # Scoring program version swrite("Scoring program version: " + str(scoring_version) + "\n\n") # Python version swrite("Python version: " + version + "\n\n") # Give information on the version installed swrite("Versions of libraries installed:\n") map(swrite, sorted(["%s==%s\n" % (i.key, i.version) for i in lib()])) def show_platform(): ''' Show information on platform''' swrite('\n=== SYSTEM ===\n\n') try: linux_distribution = platform.linux_distribution() except: linux_distribution = "N/A" swrite(""" dist: %s linux_distribution: %s system: %s machine: %s platform: %s uname: %s version: %s mac_ver: %s memory: %s number of CPU: %s """ % ( str(platform.dist()), linux_distribution, platform.system(), platform.machine(), platform.platform(), platform.uname(), platform.version(), platform.mac_ver(), psutil.virtual_memory(), str(psutil.cpu_count()) )) def compute_all_scores(solution, prediction): ''' Compute all the scores and return them as a dist''' missing_score = -0.999999 scoring = {'BAC (multilabel)': nbac_binary_score, 'BAC (multiclass)': nbac_multiclass_score, 'F1 (multilabel)': f1_binary_score, 'F1 (multiclass)': f1_multiclass_score, 'Regression ABS ': a_metric, 'Regression R2 ': r2_metric, 'AUC (multilabel)': auc_metric, 'PAC (multilabel)': npac_binary_score, 'PAC (multiclass)': npac_multiclass_score} # Normalize/sanitize inputs [csolution, cprediction] = normalize_array(solution, prediction) solution = sanitize_array(solution); prediction = sanitize_array(prediction) # Compute all scores score_names = sorted(scoring.keys()) scores = {} for key in score_names: scoring_func = scoring[key] try: if key == 'Regression R2 ' or key == 'Regression ABS ': scores[key] = scoring_func(solution, prediction) else: scores[key] = scoring_func(csolution, cprediction) except: scores[key] = missing_score return scores def write_scores(fp, scores): ''' Write scores to file opened under file pointer fp''' for key in scores.keys(): fp.write("%s --> %s\n" % (key, scores[key])) print(key + " --> " + str(scores[key])) def show_all_scores(solution, prediction): ''' Compute and display all the scores for debug purposes''' scores = compute_all_scores(solution, prediction) for key in scores.keys(): print(key + " --> " + str(scores[key])) ############################### TEST PROGRAM ########################################## if __name__ == "__main__": # This shows a bug in metrics.roc_auc_score # print('\n\nBug in sklearn.metrics.roc_auc_score:') # print('auc([1,0,0],[1e-10,0,0])=1') # print('Correct (ours): ' +str(auc_metric(np.array([[1,0,0]]).transpose(),np.array([[1e-10,0,0]]).transpose()))) # print('Incorrect (sklearn): ' +str(metrics.roc_auc_score(np.array([1,0,0]),np.array([1e-10,0,0])))) # This checks the binary and multi-class cases are well implemented # In the 2-class case, all results should be identical, except for f1 because # this is a score that is not symmetric in the 2 classes. eps = 1e-15 print('\n\nBinary score verification:') print('\n\n==========================') sol0 = np.array([[1, 0], [1, 0], [0, 1], [0, 1]]) comment = ['PERFECT'] Pred = [sol0] Sol = [sol0] comment.append('ANTI-PERFECT, very bad for r2_score') Pred.append(1 - sol0) Sol.append(sol0) comment.append('UNEVEN PROBA, BUT BINARIZED VERSION BALANCED (bac and auc=0.5)') Pred.append( np.array([[0.7, 0.3], [0.4, 0.6], [0.49, 0.51], [0.2, 0.8]])) # here is we have only 2, pac not 0 in uni-col Sol.append(sol0) comment.append('PROBA=0.5, TIES BROKEN WITH SMALL VALUE TO EVEN THE BINARIZED VERSION') Pred.append( np.array([[0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps], [0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps]])) Sol.append(sol0) comment.append('PROBA=0.5, TIES NOT BROKEN (bad for f1 score)') Pred.append(np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])) Sol.append(sol0) sol1 = np.array([[1, 0], [0, 1], [0, 1]]) comment.append('EVEN PROBA, but wrong PAC prior because uneven number of samples') Pred.append(np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])) Sol.append(sol1) comment.append( 'Correct PAC prior; score generally 0. But 100% error on positive class because of binarization so f1 (1 col) is at its worst.') p = len(sol1) Pred.append(np.array([sum(sol1) * 1. / p] * p)) Sol.append(sol1) comment.append('All positive') Pred.append(np.array([[1, 1], [1, 1], [1, 1]])) Sol.append(sol1) comment.append('All negative') Pred.append(np.array([[0, 0], [0, 0], [0, 0]])) Sol.append(sol1) for k in range(len(Sol)): sol = Sol[k] pred = Pred[k] print('****** ({}) {} ******'.format(k, comment[k])) print('------ 2 columns ------') show_all_scores(sol, pred) print('------ 1 column ------') sol = np.array([sol[:, 0]]).transpose() pred = np.array([pred[:, 0]]).transpose() show_all_scores(sol, pred) print('\n\nMulticlass score verification:') print('\n\n==========================') sol2 = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]) comment = ['Three classes perfect'] Pred = [sol2] Sol = [sol2] comment.append('Three classes all wrong') Pred.append(np.array([[0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]])) Sol.append(sol2) comment.append('Three classes equi proba') Pred.append(np.array([[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]])) Sol.append(sol2) comment.append('Three classes some proba that do not add up') Pred.append(np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]])) Sol.append(sol2) comment.append('Three classes predict prior') Pred.append(np.array([[0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.]])) Sol.append(sol2) for k in range(len(Sol)): sol = Sol[k] pred = Pred[k] print('****** ({}) {} ******'.format(k, comment[k])) show_all_scores(sol, pred) print('\n\nMulti-label score verification: 1) all identical labels') print('\n\n=======================================================') print('\nIt is normal that for more then 2 labels the results are different for the multiclass scores.') print('\nBut they should be indetical for the multilabel scores.') num = 2 sol = np.array([[1, 1, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) sol3 = sol[:, 0:num] if num == 1: sol3 = np.array([sol3[:, 0]]).transpose() comment = ['{} labels perfect'.format(num)] Pred = [sol3] Sol = [sol3] comment.append('All wrong, in the multi-label sense') Pred.append(1 - sol3) Sol.append(sol3) comment.append('All equi proba: 0.5') sol = np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) if num == 1: Pred.append(np.array([sol[:, 0]]).transpose()) else: Pred.append(sol[:, 0:num]) Sol.append(sol3) comment.append('All equi proba, prior: 0.25') sol = np.array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]) if num == 1: Pred.append(np.array([sol[:, 0]]).transpose()) else: Pred.append(sol[:, 0:num]) Sol.append(sol3) comment.append('Some proba') sol = np.array([[0.2, 0.2, 0.2], [0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [0.7, 0.7, 0.7]]) if num == 1: Pred.append(np.array([sol[:, 0]]).transpose()) else: Pred.append(sol[:, 0:num]) Sol.append(sol3) comment.append('Invert both solution and prediction') if num == 1: Pred.append(np.array([sol[:, 0]]).transpose()) else: Pred.append(sol[:, 0:num]) Sol.append(1 - sol3) for k in range(len(Sol)): sol = Sol[k] pred = Pred[k] print('****** ({}) {} ******'.format(k, comment[k])) show_all_scores(sol, pred) print('\n\nMulti-label score verification:') print('\n\n==========================') sol4 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1]]) comment = ['Three labels perfect'] Pred = [sol4] Sol = [sol4] comment.append('Three classes all wrong, in the multi-label sense') Pred.append(1 - sol4) Sol.append(sol4) comment.append('Three classes equi proba') Pred.append(np.array([[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]])) Sol.append(sol4) comment.append('Three classes some proba that do not add up') Pred.append(np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]])) Sol.append(sol4) comment.append('Three classes predict prior') Pred.append(np.array([[0.25, 0.25, 0.5], [0.25, 0.25, 0.5], [0.25, 0.25, 0.5], [0.25, 0.25, 0.5]])) Sol.append(sol4) for k in range(len(Sol)): sol = Sol[k] pred = Pred[k] print('****** ({}) {} ******'.format(k, comment[k])) show_all_scores(sol, pred)
[ "ndh3395@gmail.com" ]
ndh3395@gmail.com
bf85a112cc933c60dd948b96d1bb63260bbb3d38
118a63e4b65052430edbc07b5d794d3bad95a75d
/Урок 7/Task7_2.py
71b53f30b81915ba28649a964cc02c4e4c20c5d5
[]
no_license
olgaBovyka/BasicLanguagePython
a47bff74b4908dc34d106defc81f5825d75dc88e
312d32ca4a7f382eac7e25295c44846572c3b9f0
refs/heads/master
2022-11-05T22:34:29.302452
2020-06-24T20:35:55
2020-06-24T20:35:55
263,566,938
0
0
null
2020-06-24T20:35:57
2020-05-13T08:13:21
Python
UTF-8
Python
false
false
3,141
py
""" 2. Реализовать проект расчета суммарного расхода ткани на производство одежды. Основная сущность (класс) этого проекта — одежда, которая может иметь определенное название. К типам одежды в этом проекте относятся пальто и костюм. У этих типов одежды существуют параметры: размер (для пальто) и рост (для костюма). Это могут быть обычные числа: V и H, соответственно. Для определения расхода ткани по каждому типу одежды использовать формулы: для пальто (V/6.5 + 0.5), для костюма (2 * H + 0.3). Проверить работу этих методов на реальных данных. Реализовать общий подсчет расхода ткани. Проверить на практике полученные на этом уроке знания: реализовать абстрактные классы для основных классов проекта, проверить на практике работу декоратора @property. """ from abc import ABC, abstractmethod class Wear(ABC): @property @abstractmethod def consumption(self) -> float: pass @property @abstractmethod def params(self) -> float: pass class Suit(Wear): def __init__(self, name: str, height: float): self.__height = height self.__name = name @property def consumption(self) -> float: return 2*self.__height+0.3 @property def params(self) -> float: return self.__height class Coat(Wear): def __init__(self, name: str, size: float): self.__name = name self.__size = size @property def consumption(self) -> float: return self.__size / 6.5 + 0.5 @property def params(self) -> float: return self.__size while True: coat_h_var = input("Введите размер пальто ") if coat_h_var.isdigit(): coat_h_int = int(coat_h_var) break my_coat = Coat("burberry", coat_h_int) print("Расход ткани на 1 пальто", my_coat.consumption) while True: coat_c_var = input("Введите количество пальто ") if coat_c_var.isdigit(): coat_c_int = int(coat_c_var) break while True: suit_s_var = input("Введите размер костюма ") if suit_s_var.isdigit(): suit_s_int = int(suit_s_var) break my_suit = Coat("prada", suit_s_int) print("Расход ткани на 1 костюм", my_suit.consumption) while True: suit_c_var = input("Введите количество костюмов ") if suit_c_var.isdigit(): suit_c_int = int(suit_c_var) break print("Общий расход ткани", my_suit.consumption * suit_c_int + my_coat.consumption * coat_c_int)
[ "olga@skat-vending.com" ]
olga@skat-vending.com
c72ea0fdf63e7cab3cd12fac24e9a96fe75a01e2
50402cc4388dfee3a9dbe9e121ef217759ebdba8
/etc/MOPSO-GP0/ZDT4.py
1082e5005e8823de068729fbccebe4e6a539378f
[]
no_license
dqyi11/SVNBackup
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
9ad38e38453ef8539011cf4d9a9c0a363e668759
refs/heads/master
2020-03-26T12:15:01.155873
2015-12-10T01:11:36
2015-12-10T01:11:36
144,883,382
2
1
null
null
null
null
UTF-8
Python
false
false
1,609
py
''' Created on 2014-1-25 @author: Walter ''' from SwarmND import *; import numpy as np; import sys; if __name__ == '__main__': def func1(x): return x[0]; def func2(x): sum = 0.0; for i in range(2, 10): sum += x[i]**2 - 10 * np.cos(4 * np.pi * x[i]); g = 1 + 10 * 9 + sum; h = 1 - np.sqrt(x[0]/g); return g * h; figFolder = sys.path[0]; figFolder = sys.path[0] + "\\zdt4"; paretoX = np.arange(0.0,1.0,0.005); paretoY = np.zeros(len(paretoX)); localParetoY = np.zeros(len(paretoX)); paretoPos = []; for i in range(len(paretoX)): paretoY[i] = 1 - np.sqrt(paretoX[i]); localParetoY[i] = 1 - np.sqrt(paretoX[i]/1.25); fitPos = np.matrix(np.zeros((1,2), np.float)); fitPos[0,0] = paretoX[i]; fitPos[0,1] = paretoY[i]; paretoPos.append(fitPos); swarm = SwarmND(100, 10); swarm.setDisplayParam(600, 600, 20, 0.1) swarm.setParam(2.0, 2.0, 0.8, [func1, func2]); ws = []; ws.append([0.0, 1.0]); for i in range(1,10): ws.append([-5.0, 5.0]) swarm.initParticles(ws); swarm.paretoX = paretoX; swarm.paretoY = paretoY; swarm.localParetoX = paretoX; swarm.localParetoY = localParetoY; swarm.paretoPos = paretoPos; runPlan = [30, 60, 80, 100]; count = 0; for r in runPlan: for t in range(r): swarm.update(); count += 1; swarm.plot(count, figFolder);
[ "walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39" ]
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
c04c9fa2bf4ebb0e001ac9c901b729acbfa268d1
ae537f4732aa31c7238e2287609738da881cb89c
/resources/item.py
33ecafc1761c929d614cd892a1a08eb8112a8514
[]
no_license
vincentgoh82/SchoolCode
568e96126f350bff5d665bb87a0136d805b0c780
1904362d2bf1b673d9fcd2b6344aca8d7fa4aaa1
refs/heads/master
2021-07-09T21:10:20.905018
2017-10-05T10:31:40
2017-10-05T10:31:40
105,864,634
0
0
null
null
null
null
UTF-8
Python
false
false
1,655
py
from flask_restful import Resource, reqparse from flask_jwt import jwt_required from models.item import ItemModel class Item(Resource): parser = reqparse.RequestParser() parser.add_argument('price', type=float, required=True, help="This field cannot be left blank!" ) parser.add_argument('store_id', type=int, required=True, help="Every item needs a store id." ) @jwt_required() def get(self, name): item = ItemModel.find_by_name(name) if item: return item.json() return {'message': 'Item not found'}, 404 def post(self, name): if ItemModel.find_by_name(name): return{'message': "An item with name '{}' already exists".format(name)}, 400#gone wrong with the request data = Item.parser.parse_args() item = ItemModel(name, **data) try: item.save_to_db() except: return{"message": "An error occurred inserting the item."}, 500 #internal server error return item.json(), 201 def delete(self, name): item = ItemModel.find_by_name(name) if item: item.delete_from_db() return{"message": "Item deleted."} def put(self, name): data = Item.parser.parse_args() item = ItemModel.find_by_name(name) if item: item.price = data['price'] else: item = ItemModel(name, data['price']) item.save_to_db() return item.json() class ItemList(Resource): def get(self): return {'items': [item.json() for item in ItemModel.query.all()]}
[ "vincentgoh1982@gmail.com" ]
vincentgoh1982@gmail.com
b48ce47ae18e26b378819f5c9ecf150415fccd5a
25cf15f81982348cdee729baa5c6c8ca19ab4506
/Ziza/static/media/settings.py
48970cf31f12b9a59aebed0ae3dce7f5eb92fa0c
[]
no_license
HarunColic/ZizaRepo
ca962f42cbb3a521e3121174d6bf615187dfb67c
79cd051b88a39d678abd8aa329fd7cfdca40cb42
refs/heads/master
2020-03-26T15:17:32.182469
2020-03-03T12:00:46
2020-03-03T12:00:46
145,034,327
0
0
null
null
null
null
UTF-8
Python
false
false
4,221
py
u""" Django settings for Ziza project. Generated by 'django-admin startproject' using Django 2.1. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '8yd0w1$#g6zzpigdqd7!3*80k5q7d=v!(rhv6#s_3fsw5=@t*v' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['ziza.ba','www.ziza.ba','mail.ziza.ba', '127.0.0.1', '185.99.2.141'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'account', 'location', 'post', 'sweetify', ] BROKET_HOST = "localhost" BROKER_PORT = "5672" BROKER_USER = "guest" BROKER_PASSWORD = "guest" BROKER_VHOST = "/" MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'Ziza.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')] , 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'Ziza.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases #ONLINE DB """ DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'dpswllhm', 'USER': 'dpswllhm', 'PASSWORD': 'INNeGggbTkmqk_zValpRFXdINOMS2HVW', 'HOST': 'horton.elephantsql.com', 'PORT': '', } } """ DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'zizadb', 'USER': 'cola', 'PASSWORD': 'cola', 'HOST': 'localhost', 'PORT': '5432', } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Sarajevo' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) ADMIN_MEDIA_PREFIX = 'static/admin' STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static') STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media') MEDIA_URL = '/media/' EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'testingziza@gmail.com' EMAIL_HOST_PASSWORD = 'Verification1' EMAIL_PORT = 587
[ "haruncolic@hotmail.com" ]
haruncolic@hotmail.com
367dc9761443c981fdda7dc72a838157e0f2e0a8
6e87da516ab1af2646c45f6f7c6626081f45f00b
/xarm_moveit_servo/launch/_xarm_moveit_servo.launch.py
ea87c5ea477110a88acab45300c174c6d56bdbb1
[ "BSD-3-Clause" ]
permissive
0000duck/xarm_ros2
5598f6812b4f2da8b0d38c932282249033e21048
57892f0bd79ffdc9936dd3b340ae141c186a370d
refs/heads/master
2023-08-24T02:49:43.560843
2021-10-21T07:40:10
2021-10-21T07:40:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
10,030
py
#!/usr/bin/env python3 # Software License Agreement (BSD License) # # Copyright (c) 2021, UFACTORY, Inc. # All rights reserved. # # Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com> import os from ament_index_python import get_package_share_directory from launch.launch_description_sources import load_python_launch_file_as_module from launch import LaunchDescription from launch.actions import OpaqueFunction, IncludeLaunchDescription from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration, PathJoinSubstitution from launch_ros.substitutions import FindPackageShare from launch_ros.actions import ComposableNodeContainer from launch_ros.descriptions import ComposableNode from launch_ros.actions import Node def launch_setup(context, *args, **kwargs): dof = LaunchConfiguration('dof', default=7) prefix = LaunchConfiguration('prefix', default='') hw_ns = LaunchConfiguration('hw_ns', default='xarm') limited = LaunchConfiguration('limited', default=False) effort_control = LaunchConfiguration('effort_control', default=False) velocity_control = LaunchConfiguration('velocity_control', default=False) add_gripper = LaunchConfiguration('add_gripper', default=False) add_vacuum_gripper = LaunchConfiguration('add_vacuum_gripper', default=False) ros2_control_plugin = LaunchConfiguration('ros2_control_plugin', default='xarm_control/FakeXArmHW') # 1: xbox360 wired # 2: xbox360 wireless # 3: spacemouse wireless joystick_type = LaunchConfiguration('joystick_type', default=1) add_other_geometry = LaunchConfiguration('add_other_geometry', default=False) geometry_type = LaunchConfiguration('geometry_type', default='box') geometry_mass = LaunchConfiguration('geometry_mass', default=0.1) geometry_height = LaunchConfiguration('geometry_height', default=0.1) geometry_radius = LaunchConfiguration('geometry_radius', default=0.1) geometry_length = LaunchConfiguration('geometry_length', default=0.1) geometry_width = LaunchConfiguration('geometry_width', default=0.1) geometry_mesh_filename = LaunchConfiguration('geometry_mesh_filename', default='') geometry_mesh_origin_xyz = LaunchConfiguration('geometry_mesh_origin_xyz', default='"0 0 0"') geometry_mesh_origin_rpy = LaunchConfiguration('geometry_mesh_origin_rpy', default='"0 0 0"') geometry_mesh_tcp_xyz = LaunchConfiguration('geometry_mesh_tcp_xyz', default='"0 0 0"') geometry_mesh_tcp_rpy = LaunchConfiguration('geometry_mesh_tcp_rpy', default='"0 0 0"') moveit_config_package_name = 'xarm_moveit_config' xarm_type = 'xarm{}'.format(dof.perform(context)) ros_namespace = LaunchConfiguration('ros_namespace', default='').perform(context) # robot_description_parameters # xarm_moveit_config/launch/lib/xarm_moveit_config_lib.py mod = load_python_launch_file_as_module(os.path.join(get_package_share_directory(moveit_config_package_name), 'launch', 'lib', 'xarm_moveit_config_lib.py')) get_xarm_robot_description_parameters = getattr(mod, 'get_xarm_robot_description_parameters') robot_description_parameters = get_xarm_robot_description_parameters( xacro_urdf_file=PathJoinSubstitution([FindPackageShare('xarm_description'), 'urdf', 'xarm_device.urdf.xacro']), xacro_srdf_file=PathJoinSubstitution([FindPackageShare('xarm_moveit_config'), 'srdf', 'xarm.srdf.xacro']), urdf_arguments={ 'prefix': prefix, 'hw_ns': hw_ns.perform(context).strip('/'), 'limited': limited, 'effort_control': effort_control, 'velocity_control': velocity_control, 'add_gripper': add_gripper, 'add_vacuum_gripper': add_vacuum_gripper, 'dof': dof, 'ros2_control_plugin': ros2_control_plugin, 'add_other_geometry': add_other_geometry, 'geometry_type': geometry_type, 'geometry_mass': geometry_mass, 'geometry_height': geometry_height, 'geometry_radius': geometry_radius, 'geometry_length': geometry_length, 'geometry_width': geometry_width, 'geometry_mesh_filename': geometry_mesh_filename, 'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz, 'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy, 'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz, 'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy, }, srdf_arguments={ 'prefix': prefix, 'dof': dof, 'add_gripper': add_gripper, 'add_other_geometry': add_other_geometry, }, arguments={ 'context': context, 'xarm_type': xarm_type, } ) load_yaml = getattr(mod, 'load_yaml') servo_yaml = load_yaml('xarm_moveit_servo', "config/xarm_moveit_servo_config.yaml") servo_yaml['move_group_name'] = xarm_type xarm_traj_controller = '{}{}_traj_controller'.format(prefix.perform(context), xarm_type) servo_yaml['command_out_topic'] = '/{}/joint_trajectory'.format(xarm_traj_controller) servo_params = {"moveit_servo": servo_yaml} controllers = ['joint_state_controller', xarm_traj_controller] if add_gripper.perform(context) in ('True', 'true'): controllers.append('{}xarm_gripper_traj_controller'.format(prefix.perform(context))) # rviz_config_file = PathJoinSubstitution([FindPackageShare(moveit_config_package_name), 'rviz', 'moveit.rviz']) rviz_config_file = PathJoinSubstitution([FindPackageShare('xarm_moveit_servo'), 'rviz', 'servo.rviz']) rviz_node = Node( package='rviz2', executable='rviz2', name='rviz2', output='screen', arguments=['-d', rviz_config_file], parameters=[ robot_description_parameters, ], remappings=[ ('/tf', 'tf'), ('/tf_static', 'tf_static'), ] ) # ros2 control launch # xarm_controller/launch/_ros2_control.launch.py ros2_control_launch = IncludeLaunchDescription( PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_controller'), 'launch', '_ros2_control.launch.py'])), launch_arguments={ 'prefix': prefix, 'hw_ns': hw_ns, 'limited': limited, 'effort_control': effort_control, 'velocity_control': velocity_control, 'add_gripper': add_gripper, 'add_vacuum_gripper': add_vacuum_gripper, 'dof': dof, 'ros2_control_plugin': ros2_control_plugin, 'add_other_geometry': add_other_geometry, 'geometry_type': geometry_type, 'geometry_mass': geometry_mass, 'geometry_height': geometry_height, 'geometry_radius': geometry_radius, 'geometry_length': geometry_length, 'geometry_width': geometry_width, 'geometry_mesh_filename': geometry_mesh_filename, 'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz, 'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy, 'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz, 'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy, }.items(), ) # Load controllers load_controllers = [] for controller in controllers: load_controllers.append(Node( package='controller_manager', executable='spawner.py', output='screen', arguments=[ controller, '--controller-manager', '{}/controller_manager'.format(ros_namespace) ], )) # Launch as much as possible in components container = ComposableNodeContainer( name='xarm_moveit_servo_container', namespace='/', package='rclcpp_components', executable='component_container', composable_node_descriptions=[ ComposableNode( package='robot_state_publisher', plugin='robot_state_publisher::RobotStatePublisher', name='robot_state_publisher', parameters=[robot_description_parameters], ), ComposableNode( package='tf2_ros', plugin='tf2_ros::StaticTransformBroadcasterNode', name='static_tf2_broadcaster', parameters=[{'child_frame_id': 'link_base', 'frame_id': 'world'}], ), ComposableNode( package='moveit_servo', plugin='moveit_servo::ServoServer', name='servo_server', parameters=[ servo_params, robot_description_parameters, ], extra_arguments=[{'use_intra_process_comms': True}], ), ComposableNode( package='xarm_moveit_servo', plugin='xarm_moveit_servo::JoyToServoPub', name='joy_to_servo_node', parameters=[ servo_params, { 'dof': dof, 'ros_queue_size': 10, 'joystick_type': joystick_type, }, ], extra_arguments=[{'use_intra_process_comms': True}], ), ComposableNode( package='joy', plugin='joy::Joy', name='joy_node', parameters=[ # {'autorepeat_rate': 50.0}, ], extra_arguments=[{'use_intra_process_comms': True}], ), ], output='screen', ) return [ rviz_node, ros2_control_launch, container, ] + load_controllers def generate_launch_description(): return LaunchDescription([ OpaqueFunction(function=launch_setup) ])
[ "vinman.cub@gmail.com" ]
vinman.cub@gmail.com
2d2fd39c2843868ed674ffcd8ec03183ce56c7d3
128a4a9f7d96aa159619947bb3f66752d8e79b75
/train_functions.py
7f2a12147ace02a13767d900228d4a43ded5d394
[]
no_license
r0mer0m/DL-Medical-Images
262fdda89612abc5a119b2dd60d1ded78359eeae
c0f4b2b6727d1680f13fdd4f1c7b950fd1c31251
refs/heads/master
2020-05-18T03:09:32.132147
2019-11-20T14:01:54
2019-11-20T14:01:54
184,136,383
4
1
null
null
null
null
UTF-8
Python
false
false
16,012
py
from core import * from utils import lr_loss_plot, save_model, load_model # loss_function = F.binary_cross_entropy_with_logits ################################ ######## 1st approach ######### ################################ def get_optimizer(model, lr:float = .01, wd:float = 0.): parameters = filter(lambda p: p.requires_grad, model.parameters()) optim = torch.optim.Adam(parameters, lr=lr, weight_decay=wd) return optim ######### FIND LEARNING RATE & STORE/LOAD history ##### # def lr_finder(model, train_dl, p:(Path,str)=None, lr_low:float=1e-5, lr_high:float=1, epochs:int=2): # ''' # Lr finder with the first approach # # # :param model: # :param train_dl: # :param p: # :param lr_low: # :param lr_high: # :param epochs: # :return: # ''' # losses = [] # if p: save_model(model, str(p)) # # iterations = epochs * len(train_dl) # delta = (lr_high - lr_low) / iterations # lrs = [lr_low + i * delta for i in range(iterations)] # model.train() # ind = 0 # for i in range(epochs): # train_dl.set_random_choices() # for x, y in train_dl: # optim = get_optimizer(model, lr=lrs[ind]) # x = x.cuda().float() # y = y.cuda() # out = model(x) # loss = F.binary_cross_entropy_with_logits(out.squeeze(), y) # loss.backward() # optim.step() # optim.zero_grad() # losses.append(loss.item()) # ind += 1 # if p: load_model(model, str(p)) # # return lrs, losses ######### Store/Load lr finder output ##### def to_csv(lrs,losses, file='lrs_losses.csv'): with open(str(file), 'w') as f: for i in range(len(lrs)): f.write(f'{lrs[i]},{losses[i]}\n') def from_csv(path): if not isinstance(path, str): path = str(path) df = pd.read_csv(path, header=None) lr = df[0].tolist() losses = df[1].tolist() return lr, losses ######### Define LR policy and training ##### def train_regular_policy(model, path, train_dl, valid_dl, loss_function=F.binary_cross_entropy_with_logits, lr_low=1e-6, lr_high=0.001, epochs=50, epsilon=.01, compute_metric=True, data=None): if data is None: data = train_dl.data lr = lr_high prev_loss, min_loss = np.inf, np.inf for i in range(epochs): model.train() train_dl.set_random_choices() total = 0 sum_loss = 0 optim = get_optimizer(model, lr=lr, wd=0) for x, y in train_dl: batch = y.shape[0] out = model(x) loss = loss_function(out, y) optim.zero_grad() loss.backward() optim.step() total += batch sum_loss += batch * (loss.item()) print("lr %.7f train loss %.5f" % (lr, sum_loss / total)) if data == 'chest': val_loss, measure = val_metrics_chest(model, valid_dl, compute_metric) elif data == 'chest-PvsNP' or data == 'binary_task': val_loss, measure = val_metrics_chest_PvsNP(model, valid_dl, compute_metric) elif data == 'hands': val_loss, measure = val_metrics_hands(model, valid_dl, loss_function, compute_metric) elif data == 'MURA': val_loss, measure = val_metrics_MURA(model, valid_dl, compute_metric) print("lr %.7f train loss %.5f" % (lr, sum_loss / total, measure)) if val_loss - prev_loss > epsilon: lr = lr / 10.0 if val_loss < min_loss: save_model(model, path) min_loss = val_loss prev_loss = val_loss if lr < lr_low: break return sum_loss / total ################################ ######## 2nd approach ######### ################################ ### Annealings ### def exp_annealing(start_lr, end_lr, n): ptg = np.linspace(0, 1, n) return start_lr * (end_lr / start_lr) ** ptg def cos_annealing(start_lr, end_lr, n_iterations): i = np.arange(n_iterations) c_i = 1 + np.cos(i * np.pi / n_iterations) return end_lr + (start_lr - end_lr) / 2 * c_i ### Diff lr ### def diff_range(val, alpha=1./3): return [val * alpha ** i for i in range(2, -1, -1)] #### POLICIES (Finder and Training) ###### class FinderPolicy: def __init__(self, n_epochs, dl, min_lr=1e-7, max_lr=10): ''' Implements exponential annealing policy from min_lr to max_lr ''' total_iterations = n_epochs * len(dl) self.lr_schedule = exp_annealing(min_lr, max_lr, total_iterations) self.mom = .9 # constant momentum policy with default value self.idx = -1 def step(self): self.idx = self.idx + 1 return self.lr_schedule[self.idx], self.mom # LR finder loop def lr_finder(model, n_epochs, train_dl, min_lr=1e-4, max_lr=1e-1, save_path=None, early_stopping=200, plot_every=200): if save_path: save_model(model, save_path) model.train() policy = FinderPolicy(n_epochs=n_epochs, dl=train_dl, min_lr=min_lr, max_lr=max_lr) optimizer = OptimizerWrapper(model, policy) lrs = optimizer.policy.lr_schedule losses = [] cnt = 0 for _ in tqdm_notebook(range(n_epochs)): train_dl.set_random_choices() for it, (x, y) in enumerate(tqdm_notebook(train_dl, leave=False)): optimizer.zero_grad() out = model(x) loss = F.binary_cross_entropy_with_logits(input=out.squeeze(), target=y) loss.backward() optimizer.step() losses.append(loss.item()) if cnt % plot_every == (plot_every-1): lr_loss_plot(lrs, losses) if cnt == early_stopping: return lrs[:cnt], losses cnt += 1 if save_path: load_model(model, save_path) return lrs, losses class TrainingPolicy: '''Cretes the lr and momentum policy''' def __init__(self, n_epochs, dl, max_lr, pctg=.3, moms=(.95, .85), delta=1e-4, div_factor=25.): total_iterations = n_epochs * len(dl) iter1 = int(total_iterations * pctg) iter2 = total_iterations - int(total_iterations * pctg) iterations = (iter1, iter2) min_start = max_lr / div_factor min_end = min_start * delta lr_segments = ((min_start, max_lr), (max_lr, min_end)) mom_segments = (moms, (moms[1], moms[0])) self.lr_schedule = self._create_schedule(lr_segments, iterations) self.mom_schedule = self._create_schedule(mom_segments, iterations) self.idx = -1 def _create_schedule(self, segments, iterations): ''' Creates a schedule given a function, behaviour and size ''' stages = [cos_annealing(start, end, n) for ((start, end), n) in zip(segments, iterations)] return np.concatenate(stages) def step(self): self.idx += 1 return self.lr_schedule[self.idx], self.mom_schedule[self.idx] #### OPTIMIZER WRAPPERS ###### class OptimizerWrapper: '''Without using the momentum policy''' def __init__(self, model, policy, wd=0, alpha=1. / 3): self.policy = policy # TrainingPolicy(n_epochs=n_epochs, dl=dl, max_lr=max_lr) self.model = model self.alpha = alpha self.wd = wd # This assumes the model is defined by groups. param_groups = [group.parameters() for group in list(self.model.children())[0]] lr_0 = self.policy.lr_schedule[0] mom_0 = self.policy.mom_schedule[0] if hasattr(self.policy, 'mom_schedule') else .9 groups = zip(param_groups, diff_range(lr_0, alpha=alpha), diff_range(mom_0, alpha=1)) self.optimizer = optim.Adam( [{'params': p, 'lr': lr, 'mom': (mom, .999)} for p, lr, mom in groups] ) def _update_optimizer(self): lr_i, mom_i = self.policy.step() groups = zip(self.optimizer.param_groups, diff_range(lr_i, alpha=self.alpha), diff_range(mom_i, alpha=1)) for param_group, lr, mom in groups: param_group['lr'] = lr param_group['mom'] = (mom, .999) def _weight_decay(self): for group in self.optimizer.param_group: for p in group['params']: p.data.mul_(group['lr'] * self.wd) def step(self): self._update_optimizer() if self.wd != 0: self._weight_decay() self.optimizer.step() def zero_grad(self): self.optimizer.zero_grad() ########## METRICS ################# def R2L1(y, out): y_bar = np.mean(y) numerator = np.sum(np.absolute(out-y)) denominator = np.sum(np.absolute(y-y_bar)) return 1 - numerator/denominator def ave_auc(probs, ys): aucs = [roc_auc_score(ys[:, i], probs[:, i]) for i in range(probs.shape[1])] return np.mean(aucs), aucs ########## VALIDATION ################# def cuda2cpu_classification(y): return y.long().cpu().numpy() def cuda2cpu_regression(y): return y.cpu().numpy() def validate_loop(model, valid_dl, task): if task=='binary' or task=='multilabel': cuda2cpu = cuda2cpu_classification loss_fun = F.binary_cross_entropy_with_logits elif task=='regression': cuda2cpu = cuda2cpu_regression loss_fun = F.l1_loss model.eval() total = 0 sum_loss = 0 ys = [] preds = [] for x, y in valid_dl: out = model(x).squeeze() loss = loss_fun(out.squeeze(), y) batch = y.shape[0] sum_loss += batch * (loss.item()) total += batch preds.append(out.squeeze().detach().cpu().numpy()) ys.append(cuda2cpu(y)) return sum_loss/total, preds, ys def validate_multilabel(model, valid_dl): loss, preds, ys = validate_loop(model, valid_dl, 'multilabel') preds = np.vstack(preds) ys = np.vstack(ys) mean_auc, aucs = ave_auc(preds, ys) return loss, mean_auc, aucs def validate_binary(model, valid_dl): loss, preds, ys = validate_loop(model, valid_dl, 'binary') preds = np.concatenate(preds) ys = np.concatenate(ys) auc = roc_auc_score(ys, preds) accuracy = accuracy_score(ys, (preds>.5).astype(np.int)) return loss, auc, accuracy def validate_regression(model, valid_dl): loss, preds, ys = validate_loop(model, valid_dl, 'regression') preds = np.concatenate(preds) ys = np.concatenate(ys) R2 = R2L1(y=ys,out=preds) return loss, R2 ########## TTA ################# def TTA_loop(model, valid_dl, task, ndl=4): if task=='binary' or task=='multilabel': cuda2cpu = cuda2cpu_classification loss_fun = F.binary_cross_entropy_with_logits elif task=='regression': cuda2cpu = cuda2cpu_regression loss_fun = F.l1_loss model.eval() total = 0 sum_loss = 0 ys = [] preds = [[] for _ in range(ndl)] for i in range(ndl - 1): valid_dl.set_random_choices() for x, y in valid_dl: out = model(x).squeeze() loss = loss_fun(out.squeeze(), y) batch = y.shape[0] sum_loss += batch * (loss.item()) total += batch preds[i].append(out.squeeze().detach().cpu().numpy()) for x, y in valid_dl: out = model(x) loss = loss_fun(out.squeeze(), y) batch = y.shape[0] sum_loss += batch * (loss.item()) total += batch preds[ndl - 1].append(out.squeeze().detach().cpu().numpy()) ys.append(cuda2cpu(y)) return sum_loss / total, preds, ys def TTA_multilabel(model, valid_dl, ndl=4): loss, preds, ys = TTA_loop(model, valid_dl, 'multilabel', ndl) preds = [np.vstack(pred) for pred in preds] preds = np.mean(preds, axis=0) ys = np.vstack(ys) mean_auc, aucs = ave_auc(preds, ys) print("TTA loss %.4f and auc %.4f" % (loss, mean_auc)) return loss, mean_auc, aucs def TTA_binary(model, valid_dl, ndl=4): loss, preds, ys = TTA_loop(model, valid_dl, 'binary', ndl) preds = [np.concatenate(pred) for pred in preds] preds = np.mean(preds, axis=0) ys = np.concatenate(ys) auc = roc_auc_score(ys, preds) accuracy = accuracy_score(ys, (preds>0).astype(int)) print("TTA loss %.4f auc %.4f accuracy %.4f" % (loss, auc, accuracy)) return loss, auc, accuracy def TTA_regression(model, valid_dl, ndl=4): loss, preds, ys = TTA_loop(model, valid_dl, 'regression', ndl) preds = [np.concatenate(pred) for pred in preds] preds = np.mean(preds, axis=0) ys = np.concatenate(ys) R2 = R2L1(y=ys, out=preds) print("TTA loss %.4f pseudo R2 (L1) %.4f " % (loss, R2)) return loss, R2 #### LR FINDER AND TRAINING WITH POLICY #### # loss_functions = {'binary': F.binary_cross_entropy_with_logits, # 'multilabel': F.binary_cross_entropy_with_logits, # 'multiclass': F.cross_entropy, # 'regression': F.l1_loss # } # # def lr_finder(model, n_epochs, train_dl, min_lr=1e-7, max_lr=10, save_path=None, # mode='exponential', bar=tqdm_notebook, early_stopping=200): # # if save_path: save_model(model, save_path) # # optimizer = FinderOptimizerWrapper(model, n_epochs, train_dl, min_lr=min_lr, max_lr=max_lr, wd=0, mode=mode) # # lrs = optimizer.policy.lr_schedule # losses = [] # cnt = 0 # # for _ in bar(range(n_epochs)): # model.train() # train_dl.set_random_choices() # for it, (x, y) in enumerate(bar(train_dl)): # # optimizer.zero_grad() # # out = model(x) # loss = F.binary_cross_entropy_with_logits(input=out, target=y) # # loss.backward() # optimizer.step() # # losses.append(loss.item()) # # if it%200 == 199: # plt.plot(lrs[:len(losses)], losses) # plt.xticks(rotation=45) # plt.show() # # if cnt==early_stopping: return lrs[:cnt], losses # cnt +=1 # # if save_path: load_model(model, p) # # return lrs, losses # # # def train(n_epochs, train_dl, valid_dl, model, div_factor=25., max_lr=.01, wd=0, alpha=1./ 3, classification_type='binary', # save_path=None, bar=tqdm_notebook, val_function=None, unfreeze_during_loop:tuple=None): # # model.train() # # best_loss = np.inf # # loss_f = loss_functions[classification_type] # # validate = val_function if val_function else get_val_metric(train_dl) # # optimizer = OptimizerWrapper(model, n_epochs, train_dl, div_factor=div_factor, max_lr=max_lr, wd=wd, alpha=alpha) # # if unfreeze_during_loop: # if not isinstance(unfreeze_during_loop, (list, tuple)): raise ValueError('unfreeze_during_loop requires to be None, list or a tuple') # total_iter = n_epochs*len(train_dl) # first_unfreeze = int(total_iter*unfreeze_during_loop[0]) # second_unfreeze = int(total_iter*unfreeze_during_loop[1]) # # for epoch in bar(range(n_epochs)): # div = 0 # agg_loss = 0 # if hasattr(train_dl, 'set_random_choices'): train_dl.set_random_choices() # for i, (x, y) in enumerate(train_dl): # # if unfreeze_during_loop: # if i == first_unfreeze: model.unfreeze(1) # if i == second_unfreeze: model.unfreeze(0) # # out = model(x) # optimizer.zero_grad() # loss = loss_f(input=out, target=y) # loss.backward() # optimizer.step() # # agg_loss += loss.item() # div += 1 # # # val_loss, measure = validate(model, valid_dl, True) # print(f'Ep. {epoch+1} - train loss {agg_loss/div:.4f} - val loss {val_loss:.4f} AUC {measure:.4f}') # # # # if save_path and val_loss < best_loss: # save_model(model, save_path) # best_loss = val_loss # #
[ "mromerocalvo@dons.usfca.edu" ]
mromerocalvo@dons.usfca.edu
4126a6bc42a32fee4d838aaaaec1268e2fac7a50
d9efd71bcd858924c32761a24b37c285e3eb8f36
/caption_generator_api/utils.py
c474a5e7a7cf2a7871fdcad24deb1e58874816f4
[]
no_license
fg91/Neural-Image-Caption-Generation-Tutorial
561c1bb33cf8c2827b9c0e5ab8c226512eeaacfc
3416e64b47de7ac41249295e2de65d380421f4d7
refs/heads/master
2020-05-01T07:16:04.590640
2019-12-15T12:30:54
2019-12-15T12:30:54
177,348,369
70
9
null
null
null
null
UTF-8
Python
false
false
2,802
py
from scipy.misc import imresize from scipy.ndimage.filters import gaussian_filter from matplotlib.patheffects import Stroke, Normal import numpy as np import matplotlib.pyplot as plt from PIL import Image # the functions fig2data and fig2img are taken from # http://www.icare.univ-lille1.fr/tutorials/convert_a_matplotlib_figure # Deprecation errors have been fixed def fig2data ( fig ): """ @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it @param fig a matplotlib figure @return a numpy 3D array of RGBA values """ # draw the renderer fig.canvas.draw ( ) # Get the RGBA buffer from the figure w,h = fig.canvas.get_width_height() buf = np.fromstring( fig.canvas.tostring_argb(), dtype=np.uint8 ) buf.shape = ( w, h,4 ) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode buf = np.roll ( buf, 3, axis = 2 ) return buf def fig2img ( fig ): """ @brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it @param fig a matplotlib figure @return a Python Imaging Library ( PIL ) image """ # put the figure pixmap into a numpy array buf = fig2data ( fig ) w, h, d = buf.shape return Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) ) def draw_text(ax, xy, txt, sz=14): text = ax.text(*xy, txt, verticalalignment='top', color='white', fontsize=sz, weight='bold') draw_outline(text, 1) def draw_outline(matplt_plot_obj, lw): matplt_plot_obj.set_path_effects([Stroke(linewidth=lw, foreground='black'), Normal()]) def show_img(im, figsize=None, ax=None, alpha=1, cmap=None): if not ax: fig, ax = plt.subplots(figsize=figsize) ax.imshow(im, alpha=alpha, cmap=cmap) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) return ax def visualize_attention(im, pred, alphas, denorm, vocab, att_size=7, thresh=0., sz=224, return_fig_as_PIL_image=False): cap_len = len(pred) alphas = alphas.view(-1,1, att_size, att_size).cpu().data.numpy() alphas = np.maximum(thresh, alphas) alphas -= alphas.min() alphas /= alphas.max() figure, axes = plt.subplots(cap_len//5 + 1,5, figsize=(12,8)) for i, ax in enumerate(axes.flat): if i <= cap_len: ax = show_img(denorm(im), ax=ax) if i > 0: mask = np.array(Image.fromarray(alphas[i - 1,0]).resize((sz,sz))) blurred_mask = gaussian_filter(mask, sigma=8) show_img(blurred_mask, ax=ax, alpha=0.5, cmap='afmhot') draw_text(ax, (0,0), vocab.itos[pred[i - 1]]) else: ax.axis('off') plt.tight_layout() if return_fig_as_PIL_image: return fig2img(figure)
[ "fabiograetz@googlemail.com" ]
fabiograetz@googlemail.com
bd28753be51fb382e2fc2bb1f18dd523d88d8db0
b28ca1c9a669f1a538b17837d25e394a8fd4ad10
/SimpleAuth/wsgi.py
f7182e8691ce978835f002e1be4f6912795ed46d
[]
no_license
andricDu/SimpleAuth
e01a1c2b95cc9e8485a2e0be4cb8c772a6d5968f
2de255c1d550314113a9b6529e59a68f09b29dd6
refs/heads/master
2021-08-18T12:40:40.865700
2020-04-21T14:34:26
2020-04-21T14:34:26
90,769,179
0
1
null
2021-06-10T18:34:25
2017-05-09T16:43:10
Python
UTF-8
Python
false
false
398
py
""" WSGI config for SimpleAuth project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SimpleAuth.settings") application = get_wsgi_application()
[ "dusan.andric@oicr.on.ca" ]
dusan.andric@oicr.on.ca
b34f8b9f7f3ba7a6eedc23ea2331a5070bc75d4b
e21f51bc47bf7a5e2cf1bc38a745d3685144511a
/Flask/flask_parser.py
b88c750851cd6c041834aebfe7c8f240925f7f22
[]
no_license
razmanika/DjangoWeb
3b1a763a9a2bc888e316ddde9010ebfdd4a0a28e
e5a660f7fabe4e1912cdcf72ba938fc355b614c6
refs/heads/master
2020-06-14T16:21:06.690176
2019-07-03T13:14:24
2019-07-03T13:14:24
195,054,360
0
0
null
null
null
null
UTF-8
Python
false
false
266
py
import requests from bs4 import BeautifulSoup url = requests.get('http://localhost:8080/echo') soup = BeautifulSoup(url.content, 'html.parser') url1 = requests.post('http://localhost:8080/echo') soup1 = BeautifulSoup(url1.content, 'html.parser') print(soup,soup1)
[ "nikoloz.razmadze.1@btu.edu.ge" ]
nikoloz.razmadze.1@btu.edu.ge
715807a84591cf453e82f10c07cfed4904e75100
42acd95998670c6815d7995eda8f54025086697f
/backend/src/api.py
6563db1ce0158a18847ab77702d303ba6c8df441
[]
no_license
ahmostafa91/coffee-shop
2933a7ec26f006151fea04eeeed4eda9b8d5202e
2a12e470b6773682f86afe21cb3cb74f75f6b783
refs/heads/main
2023-03-17T08:45:02.811929
2021-03-06T06:03:21
2021-03-06T06:03:21
345,019,376
0
0
null
null
null
null
UTF-8
Python
false
false
5,909
py
import os from flask import Flask, request, jsonify, abort from sqlalchemy import exc import json from flask_cors import CORS from .database.models import db_drop_and_create_all, setup_db, Drink from .auth.auth import AuthError, requires_auth app = Flask(__name__) setup_db(app) CORS(app) ''' @TODO uncomment the following line to initialize the datbase !! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH !! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN ''' db_drop_and_create_all() ## ROUTES ''' @TODO implement endpoint GET /drinks it should be a public endpoint it should contain only the drink.short() data representation returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks or appropriate status code indicating reason for failure ''' @app.route('/drinks') def get_drinks(): drinks = Drink.query.all() # print('=========>', drinks) return jsonify({ 'success': True, 'drinks': [drink.short() for drink in drinks] }), 200 ''' @TODO implement endpoint GET /drinks-detail it should require the 'get:drinks-detail' permission it should contain the drink.long() data representation returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks or appropriate status code indicating reason for failure ''' @app.route('/drinks-detail') @requires_auth('get:drinks-detail') def drinks_detail(payload): try: all_drinks = Drink.query.all() return jsonify({ 'success': True, 'drinks': [drink.long() for drink in all_drinks] }), 200 except Exception: abort(500) ''' @TODO implement endpoint POST /drinks it should create a new row in the drinks table it should require the 'post:drinks' permission it should contain the drink.long() data representation returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the newly created drink or appropriate status code indicating reason for failure ''' @app.route('/drinks', methods=['POST']) @requires_auth('post:drinks') def create_drink(payload): req = request.get_json() try: req_recipe = req['recipe'] if isinstance(req_recipe, dict): req_recipe = [req_recipe] drink = Drink() drink.title = req['title'] # stringfy the object drink.recipe = json.dumps(req_recipe) drink.insert() except BaseException: abort(400) return jsonify({'success': True, 'drinks': [drink.long()]}) ''' @TODO implement endpoint PATCH /drinks/<id> where <id> is the existing model id it should respond with a 404 error if <id> is not found it should update the corresponding row for <id> it should require the 'patch:drinks' permission it should contain the drink.long() data representation returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the updated drink or appropriate status code indicating reason for failure ''' @app.route('/drinks/<int:id>', methods=['PATCH']) @requires_auth('patch:drinks') def update_drink(payload, id): req = request.get_json() drink = Drink.query.filter(Drink.id == id).one_or_none() if not drink: abort(404) try: req_title = req.get('title') req_recipe = req.get('recipe') if req_title: drink.title = req_title if req_recipe: drink.recipe = json.dumps(req['recipe']) drink.update() except BaseException: abort(400) return jsonify({'success': True, 'drinks': [drink.long()]}), 200 ''' @TODO implement endpoint DELETE /drinks/<id> where <id> is the existing model id it should respond with a 404 error if <id> is not found it should delete the corresponding row for <id> it should require the 'delete:drinks' permission returns status code 200 and json {"success": True, "delete": id} where id is the id of the deleted record or appropriate status code indicating reason for failure ''' @app.route('/drinks/<int:id>', methods=['DELETE']) @requires_auth('delete:drinks') def delete_drinks(payload, id): try: drink = Drink.query.filter(Drink.id == id).one_or_none() if drink: drink.delete() return jsonify({ 'success': True, 'delete': id }), 200 else: abort(404, f'No drink found with id: {id}') except Exception: abort(500) ## Error Handling ''' Example error handling for unprocessable entity ''' @app.errorhandler(422) def unprocessable(error): return jsonify({ "success": False, "error": 422, "message": "unprocessable" }), 422 ''' @TODO implement error handlers using the @app.errorhandler(error) decorator each error handler should return (with approprate messages): jsonify({ "success": False, "error": 404, "message": "resource not found" }), 404 ''' ''' @TODO implement error handler for 404 error handler should conform to general task above ''' @app.errorhandler(404) def not_found(error): return jsonify({ "success": False, "error": 404, "message": "resource not found" }), 404 ''' @TODO implement error handler for AuthError error handler should conform to general task above ''' @app.errorhandler(AuthError) def not_authenticated(auth_error): return jsonify({ "success": False, "error": auth_error.status_code, "message": auth_error.error }), 401
[ "a.moustafa@penta-b.com" ]
a.moustafa@penta-b.com
a71d409d7b326fb5a4bd4507fcc6b07b84090dce
59cdbbd0ed4e68e8b3b2ecf1b0a724ddb13fa5f2
/config.py
5354798ce4d7f2805275e21e966410dd0fd26abd
[]
no_license
doyleSM/flask-api-rest-example
a7bb2c4ab43b8092a18e4e0474fdbfb951931c77
ef8760cfd5ce4a0090dd928b5bee72ab9d6c02ab
refs/heads/master
2022-07-05T05:50:25.485950
2020-05-04T18:07:59
2020-05-04T18:07:59
254,703,415
0
0
null
2020-05-04T18:08:00
2020-04-10T18:10:03
Python
UTF-8
Python
false
false
77
py
import os DEBUG = False SQLALCHEMY_DATABASE_URI = os.environ["DATABASE_URL"]
[ "gbalk@inf.ufsm.br" ]
gbalk@inf.ufsm.br
83b3c2d69dac365eecb94e332cdd2162199e5997
4e6ebd9c5cbd5cb7458e12bfce7e2ce930ad2315
/src/datasets/tools/harmonization_mapping.py
8e954c1bd3416a162521f2ab38fd296936f35e29
[ "CC0-1.0" ]
permissive
qifang-robotics/lidar-harmonization
a9473a991c896ffd293898c6abfd34b9dd245130
c99087fe5f807f3343ec07acd048b21aab57bbf7
refs/heads/master
2023-06-13T05:32:59.682066
2021-07-03T23:52:13
2021-07-03T23:52:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,468
py
import pandas as pd import numpy as np from pathlib import Path from shutil import copyfile from src.datasets.tools.transforms import GlobalShift import code class HarmonizationMapping: def __init__(self, config): scans_path = config['dataset']['scans_path'] target_scan_num = config['dataset']['target_scan'] harmonization_path = config['dataset']['harmonized_path'] self.harmonization_path = Path(harmonization_path) self.harmonization_path.mkdir(exist_ok=True, parents=True) # 1. collect all scans scans = [str(f) for f in Path(scans_path).glob("*.npy")] # 2. select target scan(s) target_scan_path = Path(scans_path) / (target_scan_num+".npy") # copy to the harmonized path. # - one time this just didn't work. Deleting the copy and restarting the # program seemed to work. if not (self.harmonization_path / (target_scan_num+".npy")).exists(): if not config['dataset']['shift']: copyfile( str(target_scan_path), str(self.harmonization_path / (target_scan_num+".npy"))) else: # move this later? target = np.load(str(target_scan_path)) G = GlobalShift(**config["dataset"]) target = G(target) np.save(str(self.harmonization_path / (target_scan_num+".npy")), target) if not config['dataset']['create_new']: if (self.harmonization_path / "df.csv").exists(): self.df = pd.read_csv((self.harmonization_path / "df.csv"), index_col=0) else: exit(f"Couldn't find HM csv file at {self.harmonization_path / 'df.csv'}") else: if (self.harmonization_path / "df.csv").exists(): # store a backup just in case copyfile(str(self.harmonization_path / "df.csv"), str(self.harmonization_path / "df_old.csv") ) # initialize the df self.df = pd.DataFrame( columns=["source_scan", "harmonization_target", "source_scan_path", "harmonization_scan_path", "processing_stage"]) self.df.source_scan_path = scans self.df.harmonization_target = [None]*len(scans) self.df.harmonization_scan_path = [None]*len(scans) self.df.source_scan = [int(Path(f).stem) for f in scans] self.df.processing_stage = [0]*len(scans) # setup target scan target_scan_num = int(target_scan_num) self.df.loc[self.df.source_scan == target_scan_num, "harmonization_target"] = int(target_scan_num) self.df.loc[self.df.source_scan == target_scan_num, "harmonization_scan_path"] = str(self.harmonization_path / (str(target_scan_num)+".npy")) self.df.loc[self.df.source_scan == target_scan_num, "processing_stage"] = 2 # need processing stages for each source. Sources start at stage 0. # Stage 0 means that the sources haven't been identified as having # any overlap with a target scan. By extension, they don't have # examples in the dataset, nor do they have the harmonized # version. A source scan enters stage one after overlap in the # scan has been detected and examples have been added to the # dataset. After a model is trained with the new dataset, this # source scan can then be harmonized with the target. The source # scan enters stage 2 after it has been harmonized. This source # scan can now be used as a target scan to search for overlap # regions with other soure scans. After all sources have been # checked for overlap, the stage 2 source scan can then be moved # to stage 3 (done). Stage 3 scans do not have to be used again. # The harmonization is process is finished when all scans are stage # 2 or higher OR all scans are stage 3 or stage 0. self.save() def __getitem__(self, source_scan_num): # return the entire row for a source scan num (float or int or str) return self.df.loc[self.df.source_scan == int(source_scan_num)] def __len__(self): return len(self.df) def save(self): self.df.to_csv(self.harmonization_path / "df.csv") def done(self): # there are two conditions for being done. If either are not satisified, # then the whole process is not finished. The first condition is that # all sources must be harmonized (all scans are stage 2 and above). In # the event that a scan does not contain enough overlap to reach stage # 1, all stage 2 and above scans will be harmonized to stage 3 while # searching for overlap, so there will be no stage 1 or stage 2 sources # remaining. # All scans are harmonized cond1 = ((1 not in self.df.processing_stage.values) and (0 not in self.df.processing_stage.values)) # All scans are harmonized except for stage 0 scans which don't have # any reasonable overlap cond2 = ((2 not in self.df.processing_stage.values) and (1 not in self.df.processing_stage.values)) return cond1 or cond2 def add_target(self, source_scan_num, harmonization_target_num): self.df.loc[self.df.source_scan == int(source_scan_num), "harmonization_target"] = harmonization_target_num self.save() def incr_stage(self, source_scan_num): self.df.loc[self.df.source_scan == int(source_scan_num), "processing_stage"] += 1 self.save() def get_stage(self, stage_num): return self.df.loc[self.df.processing_stage == int(stage_num)].source_scan.values.tolist() def add_harmonized_scan_path(self, source_scan_num): self.df.loc[self.df.source_scan == int(source_scan_num), "harmonization_scan_path"] = str(self.harmonization_path / (str(source_scan_num)+".npy")) self.save() def print_mapping(self): print("Final Mapping:") for idx, row in self.df.iterrows(): print(f"{row.source_scan}: {row.harmonization_target}")
[ "d.t.jones@outlook.com" ]
d.t.jones@outlook.com
545089b43d7ea9cf0b0315df7fda1767e7181dff
628574233007517f0fde0b40317a68f6065f37ca
/Python/DjangoProjects/time_display_assignments/time_display_assignments/urls.py
d5f657802eb41ba481a1031393a28ae661e30133
[]
no_license
carolynyen/DojoAssignments
5a5d2df904bc2d650f945d09369a1d0ee5a316bc
a06ee21b968357e7bda77542d6a21b664a53136e
refs/heads/master
2021-01-11T17:54:13.006990
2017-04-21T19:42:46
2017-04-21T19:42:46
79,866,508
0
4
null
null
null
null
UTF-8
Python
false
false
801
py
"""time_display_assignments URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r'^', include('apps.timedisplay.urls')), ]
[ "whitehawk6888@yahoo.com" ]
whitehawk6888@yahoo.com
18b80378c40737c228cdd1f7cb7e35ce5933b2bc
abeb6ca5a9723d96c432e318aa8cdbd774804c29
/last-ansible/tweets_harvester/config.py
9135f0ee7d15d91934df9edd4439cb14011aa259
[]
no_license
ylin18/CCC-ass2
cec056fb9e1b1f9cf020d0ecaf2993fb70748dfd
9d24f0937a43152e697f6c7bed41181ce9618a29
refs/heads/master
2020-08-28T15:35:12.454736
2019-06-24T05:13:59
2019-06-24T05:13:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,337
py
app_keys_tokens = [ {'consumer_key' : 'CP20F8yCMC85K26XY07w4XElp', 'consumer_secret' : '4t1r4cdlBPGVzkosnZ2gvBqXbet5MbuJIlkuN0JKYufWIdo4yM', 'access_token' : '1121041186305630208-hG4Jv9cfPOufx3vAgPpBUCODlWsHQH', 'access_token_secret' : 'OJSXpMxZDzY9XUo2gqoqZcLUyGY1C9duopI4032fywDPb'} , {'consumer_key' : '2BjmB9QN2UwT7BWGEYJc6mzyQ', 'consumer_secret' : 'dkP4itLYIM0rqhHef4BiRkEgp8n2STc5CZuddYzjpnRzN3QX0m', 'access_token' : '1121041186305630208-9pyRCJS3ltExpoKeTqKVrYcdSNnqHg', 'access_token_secret' : 'dWIS8xzpbuB1T77UZSQCHJGBOX2uT7A82UmiwpyuSfrkq'} , {'consumer_key' : 'W225IVMaLWc3Cio8Y2ZwHmwXT', 'consumer_secret' : 'D0Gebz3e1xqrSKKCNbQPCwLsjNdQVZxHguLekTU4zCavWysswy', 'access_token' : '1121041186305630208-vVcpClv576aYx9OJjVaWJkYA89m7eI', 'access_token_secret' : 'ZjUk3ppAaudL4KR3oDQo3K6lDMZRKrnGvj2wYRpzfx1uP'} , {'consumer_key' : 'ahKRXTnEizWqy4oHC4uBFxWuu', 'consumer_secret' : 'xF2Pc3JwGtSij9Ig0UhW5A5o4RVk1kxcbTk6jMGM7W7XfOub8w', 'access_token' : '1121041186305630208-85TVCtBvNc3RjW9RjmcBdwJn5FKxQm', 'access_token_secret' : 'l3qRsugZsCt1MApDSjtCwMFS19Jms2Y2QiGpUPfzeWVit'} , {'consumer_key' : 'IaRuxafuggm6eZvXdmurzA2MV', 'consumer_secret' : '9DwkZsuQHFHtnFD5JVfttZ6uNkqsm99yNxvpmPMgSlPxAtqjJ9', 'access_token' : '1121041186305630208-UJOUSKINRytWJY9OiCF9ANeqNacdMY', 'access_token_secret' : 'kQjU0Vc4x7oPnSeuOM4Jrs5waE21dtYOKHN9Wi0hcikxB'} , {'consumer_key' : 'W2pCfanNy4x8YiEEuC1pHbPU8', 'consumer_secret' : '3XfspXNraGydG3xKYfxZ6wLVaJsCZqYeSx0cOUFSv2ABNHNi8a', 'access_token' : '1121041186305630208-iHTAQCyxr0QcCPjEH8gpNYTZyomfjQ', 'access_token_secret' : 'nsNp6LLbb1SacGUbKlUPBFBEiV937Lct9LxHIgB5MYKnL'} , {'consumer_key' : 'kpC53YdFfY1Q4vXIEhLKM4lhT', 'consumer_secret' : '3qYR9E9oucDhQSwilPQxUVnmdr1tJwAOi0iMyKCYWy63coEQLZ', 'access_token' : '1121041186305630208-bFfc3Y4x0ueoHCyCvVUZxeMWRmhloR', 'access_token_secret' : 'O460teRRz2MJxQRto7ipMz3MyKA2fwZWwW4bMnMVKzKGZ'} , {'consumer_key' : 'hVmTeuwuAy44Ufr6q6SlIlSST', 'consumer_secret' : 'BsYDcvgtmbhdGDv57oI5Q3DwSsBEuPtjaIDjALelQsmnQaeQf1', 'access_token' : '1121041186305630208-9xcHlMmIVGemqa6jNPDYfrXOv05V8l', 'access_token_secret' : 'NULMzSH41CSFTzJSv8rUu0fPf2VmiqqKX80SqF06LkqT0'} ] geocodes = { 'sydney':"-33.63,150.89,85km", 'melbourne':"-37.80,145.11,75km", 'brisbane':"-27.33,152.81,109km", 'perth':"-32.12,115.68,75km" } coordinates = { 'sydney':[150.00, -34.30, 151.62, -33.00], 'melbourne':[144.36,-38.50,145.88,-37.18], 'brisbane':[152.10,-28.34,153.55,-26.45], 'perth':[115.40,-32.80,116.41,-31.45] } search_appid = { 'sydney': 0, 'melbourne': 1, 'brisbane': 2, 'perth': 3 } stream_appid = { 'sydney': 4, 'melbourne': 5, 'brisbane': 6, 'perth': 7 } # city = 'Sydney' # placeid = '0073b76548e5984f' # centre = [-33.865143,151.209900] # geocode = "-33.63,150.89,85km" # coordinates = [150.00, -34.30, 151.62, -33.00] # city = 'Melbourne' # placeid = '01864a8a64df9dc4' # centre = [-37.815338, 144.963226] # geocode = "-37.80,145.11,75km" #all covery # coordinates = [144.36,-38.50,145.88,-37.18] # greater # city = 'Brisbane' # placeid = '004ec16c62325149' # centre = [-27.46207,153.01462] # geocode = "-27.33,152.81,109km" # coordinates = [152.10,-28.34,153.55,-26.45] # city = 'Perth' # placeid = '0118c71c0ed41109' # centre = [-31.94475,115.86013] # geocode = "-32.12,115.68,75km" # coordinates = [115.40,-32.80,116.41,-31.45]
[ "xudongma@ua-unistudent-ten-9-131-202.uniaccess.unimelb.edu.au" ]
xudongma@ua-unistudent-ten-9-131-202.uniaccess.unimelb.edu.au
fefa4008d3c6a8622e01e84a315130f060863036
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
/.history/2-Python-Basics-part2/6-Logical-operators_20200414002000.py
7a4ee8fd3c96f8e57b7e41dc522b12fb81613bec
[]
no_license
CaptainStorm21/Python-Foundation
01b5fbaf7a913506518cf22e0339dd948e65cea1
a385adeda74f43dd7fb2d99d326b0be23db25024
refs/heads/master
2021-05-23T01:29:18.885239
2020-04-23T19:18:06
2020-04-23T19:18:06
253,171,611
0
0
null
null
null
null
UTF-8
Python
false
false
196
py
# Short Circuting is_Friend = True is_User = True if is_Friend or is_User: print("both are true") if is_Friend and is_User: print("both are true") age = 15 year = 2019 boy = "Vlad"
[ "tikana4@yahoo.com" ]
tikana4@yahoo.com
7e15d512ec3c87a9d4dc6de189623ab45646f041
efb3194a583cd79cc03dc91b9a96dfc0bdd3a344
/stm32f/json_pkt.py
8fab02dbeb225a6406222a1a16911d147abec342
[ "Apache-2.0" ]
permissive
andersy005/capstone
9227b0c19b4e16ea5e67a529937652408d0a35f2
b4301ebc7c1447f3ce2ff034add985c1f417f065
refs/heads/master
2021-09-13T07:42:52.359116
2018-04-26T17:58:05
2018-04-26T17:58:05
118,843,216
0
0
null
null
null
null
UTF-8
Python
false
false
3,517
py
# This code should run fine on MicroPython or CPython. # # It allows objects which can be represented as JSON objects to be sent # between two python programs (running on the same or different computers). import json from dump_mem import dump_mem SOH = 0x01 STX = 0x02 ETX = 0x03 EOT = 0x04 # <SOH><LenLow><LenHigh><STX><PAYLOAD><ETX><LRC><EOT> def lrc(str): sum = 0 for b in str: sum = (sum + b) & 0xff return ((sum ^ 0xff) + 1) & 0xff class JSON_Packet: STATE_SOH = 0 STATE_LEN_0 = 1 STATE_LEN_1 = 2 STATE_STX = 3 STATE_PAYLOAD = 4 STATE_ETX = 5 STATE_LRC = 6 STATE_EOT = 7 def __init__(self, serial_port, show_packets=False): self.serial_port = serial_port self.show_packets = show_packets self.pkt_len = 0 self.pkt_idx = 0 self.pkt = None self.lrc = 0 self.state = JSON_Packet.STATE_SOH def send(self, obj): """Converts a python object into its json representation and then sends it using the 'serial_port' passed in the constructor. """ j_str = json.dumps(obj).encode('ascii') j_len = len(j_str) j_lrc = lrc(j_str) hdr = bytearray((SOH, j_len & 0xff, j_len >> 8, STX)) ftr = bytearray((ETX, j_lrc, EOT)) if self.show_packets: data = hdr + j_str + ftr dump_mem(data, 'Send') self.serial_port.write(hdr) self.serial_port.write(j_str) self.serial_port.write(ftr) def process_byte(self, byte): """Processes a single byte. Returns a json object when one is successfully parsed, otherwise returns None. """ if self.show_packets: if byte >= ord(' ') and byte <= ord('~'): print('Rcvd 0x%02x \'%c\'' % (byte, byte)) else: print('Rcvd 0x%02x' % byte) if self.state == JSON_Packet.STATE_SOH: if byte == SOH: self.state = JSON_Packet.STATE_LEN_0 elif self.state == JSON_Packet.STATE_LEN_0: self.pkt_len = byte self.state = JSON_Packet.STATE_LEN_1 elif self.state == JSON_Packet.STATE_LEN_1: self.pkt_len += (byte << 8) self.state = JSON_Packet.STATE_STX elif self.state == JSON_Packet.STATE_STX: if byte == STX: self.state = JSON_Packet.STATE_PAYLOAD self.pkt_idx = 0 self.pkt = bytearray(self.pkt_len) self.lrc = 0 else: self.state = JSON_Packet.STATE_SOH elif self.state == JSON_Packet.STATE_PAYLOAD: self.pkt[self.pkt_idx] = byte self.lrc = (self.lrc + byte) & 0xff self.pkt_idx += 1 if self.pkt_idx >= self.pkt_len: self.state = JSON_Packet.STATE_ETX elif self.state == JSON_Packet.STATE_ETX: if byte == ETX: self.state = JSON_Packet.STATE_LRC else: self.state = JSON_Packet.STATE_SOH elif self.state == JSON_Packet.STATE_LRC: self.lrc = ((self.lrc ^ 0xff) + 1) & 0xff if self.lrc == byte: self.state = JSON_Packet.STATE_EOT else: self.state = JSON_Packet.STATE_SOH elif self.state == JSON_Packet.STATE_EOT: self.state = JSON_Packet.STATE_SOH if byte == EOT: return json.loads(str(self.pkt, 'ascii'))
[ "axbanihirwe@ualr.edu" ]
axbanihirwe@ualr.edu
820447248ef9ca3978c5079ad021994e1141f098
841cc95591bd1d5317310d8d809e2a2c0284be0b
/queries/query2.py
ed085ae4cbbe5c3c0497e79237223bc0b3f10d3b
[]
no_license
annwhoorma/DMD2_assignment1
2864e4b7a6380aa557bdd18b08a20f47212ec544
6934d842c3b3bbce89ea60469a3390f1922cdcfa
refs/heads/master
2022-06-02T22:39:43.364294
2020-04-29T08:44:52
2020-04-29T08:44:52
259,871,477
0
0
null
null
null
null
UTF-8
Python
false
false
1,249
py
from py2neo import Database from py2neo import Graph, Node import numpy as np """ Connection to Neo4j Database """ dvdrental = "bolt://localhost:7687" db = Database(dvdrental) graph = Graph(password='13579qwer') db = graph.begin(autocommit=False) actors_list = graph.run("match(a:Actor) with a.ID as ids return ids").to_ndarray() number_of_ids = graph.run("match(a:Actor) with count(a.ID) as number_of_ids return number_of_ids").to_ndarray()[0][0] maxID = graph.run('''match(a:Actor) with max(a.ID) as max_actor_ID return max_actor_ID''').to_ndarray()[0][0] ret = graph.run('''match (a1:Actor)-[:ACTS_IN]->(f:Film) match (a2:Actor)-[:ACTS_IN]->(f:Film) with a1.ID as actor1_ID, a2.ID as actor2_ID, count(f) as together where actor1_ID <> actor2_ID return actor1_ID, actor2_ID, together''').to_ndarray() # ret: 1st col - actor1_ID, 2nd col - actor2_ID, 3rd col - how many films actor1 and actor2 had together print(ret[0][0], ret[0][1], ret[0][2], ret[20867][0], ret[20867][1], ret[20867][2]) table = np.zeros([maxID+1, maxID+1], dtype=int) for i in range(0, len(ret)-1): act1 = ret[i][0] act2 = ret[i][1] table[act1][act2] = ret[i][2] print(table) np.savetxt("queries_results/query2_result.csv", table, delimiter=",") db.commit()
[ "a.boronina@innopolis.university" ]
a.boronina@innopolis.university
69eeb01074d12cecc75f2289a18fdde829495e7f
03414057da375391470f28c8fa55bd8dfec751e3
/app/api_1_0/__init__.py
b14eb42e0236fe410134b2c9de2fae0883b0d457
[]
no_license
xingyuan1998/school
6fe817046e052fd47d610ef7a492327f20637ea2
1780b9246621beb4aa56e84c6fb4571521482d2d
refs/heads/master
2021-07-19T01:38:08.824341
2017-10-25T14:58:58
2017-10-25T14:58:58
107,509,754
5
0
null
null
null
null
UTF-8
Python
false
false
260
py
from flask import Blueprint api = Blueprint('api', __name__, url_prefix='/api') from . import ( appointment, auth, circles, news, profess, roommate, secondhand, tasks, user ) @api.route('/') def hello(): return 'dddd'
[ "xingyuan1998@163.com" ]
xingyuan1998@163.com
08dfeef07dc2184dd58ed15584e4a9d792be3383
3a8c2bd3b8df9054ed0c26f48616209859faa719
/Challenges/Hackerrank-DynamicArray.py
c63264cadb5c93066503209dd51764b1eaa68ce0
[]
no_license
AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges
684f1ca2f9ee3c49d0b17ecb1e80707efe305c82
98fb752c574a6ec5961a274e41a44275b56da194
refs/heads/master
2023-09-01T23:58:15.514231
2021-09-10T12:42:03
2021-09-10T12:42:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
528
py
#!/bin/python3 import os import sys # # Complete the dynamicArray function below. # def dynamicArray(n, queries): # # Write your code here. # if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nq = input().split() n = int(nq[0]) q = int(nq[1]) queries = [] for _ in range(q): queries.append(list(map(int, input().rstrip().split()))) result = dynamicArray(n, queries) fptr.write('\n'.join(map(str, result))) fptr.write('\n') fptr.close()
[ "bennyhwangg@gmail.com" ]
bennyhwangg@gmail.com
feb9eaf05f8f618e83419e8790d01f074d6592b0
6f1ae83ae56134f0ecd190e3c0d3d0947a08886f
/query_stanford_parser.py
6f97a39b2353fd6465ec478efa41209c135d659c
[ "BSD-3-Clause" ]
permissive
yukiyakiZ/questplusplus
60b2ea03bb32ba6cee99c222630cdae25e53ce8b
e30775f59ef827c318330febb84f8065fbcc3397
refs/heads/master
2020-05-07T14:29:21.651749
2019-05-05T11:55:04
2019-05-05T11:55:04
180,595,864
0
0
null
null
null
null
UTF-8
Python
false
false
4,076
py
# -*- coding:utf8 -*- # Query Stanford Parser online to obtain the mean dependency distance calculation import os import re import time from bs4 import BeautifulSoup import json import requests languageSet = ["English", "Chinese"] url = 'http://nlp.stanford.edu:8080/parser/index.jsp' def TimeIt(method): def Timed(*args, **kw): t1 = time.time() result = method(*args, **kw) t2 = time.time() t3 = t2 - t1 print( "{} spent {} seconds to run!".format( method.__name__, t3)) return result return Timed def ParseHtml(text): soup = BeautifulSoup(text, 'html.parser') tags = soup.find_all('pre') tag = tags[-1] lines = tag.text.splitlines() numTokens = len(lines) depDistanceSum = 0 for line in lines: # Excluding ROOT if line: if not re.search("ROOT",line): try: numStrs = re.findall("\d+", line) print(line) print(numStrs) index2 = int(numStrs[-1]) index1 = int(re.findall(r"-(.+?),", line)[0]) depDistanceSum += abs(index1-index2) except: pass print("Token number: {0}, dependency distance sum: {1}".format(numTokens, depDistanceSum)) return numTokens, depDistanceSum @TimeIt def FetchSentenceQueryRes(sentence, language="English"): payLoad = {'query':sentence, 'parserSelect':language} if language in languageSet: r = requests.post(url, data=payLoad) else: raise ValueError("Unsupported language!") numTokens, depDistanceSum = ParseHtml(r.text) return numTokens, depDistanceSum # print(r.encoding) print("=== Test samples ===") # FetchSentenceQueryRes("I love writing thesis very much .", "English") # FetchSentenceQueryRes("我 非常 爱 写 论文 .", "Chinese") FetchSentenceQueryRes("截至 2015 年 六月 底 , 全国 机动车 保有量 达 2.7 亿 余辆 , 其中 汽车 163 亿辆 . ", "Chinese") sourceDir = "/Users/liangchengyu/questplusplus/input/ch_text_ordered/" targetDir = "/Users/liangchengyu/questplusplus/input/en_text_ordered/" # print("=== Process source texts ===") # filenames = os.listdir(sourceDir) # unordered # filenames.sort() # with open('sourceDepDistance.txt', 'w', encoding="utf-8") as outfile: # for filename in filenames: # docDepDistanceSum = 0.0 # docNumTokens = 0.0 # numSentences = 0 # with open(sourceDir+filename, 'r', encoding='utf-8') as infile: # print(sourceDir+filename) # lines = infile.readlines() # for line in lines: # numSentences += 1 # numTokens, depDistanceSum = FetchSentenceQueryRes(line, "Chinese") # docDepDistanceSum += depDistanceSum # docNumTokens += numTokens # print("Doc {0} with {1} sentences, mean dependency distance: {2}".format(filename, numSentences, docDepDistanceSum/(docNumTokens-numSentences))) # outfile.write(str(docDepDistanceSum/(docNumTokens-numSentences))+"\n") print("=== Process target texts ===") filenames = os.listdir(targetDir) # unordered filenames.sort() with open('targetDepDistance.txt', 'w', encoding="utf-8") as outfile: for filename in filenames: docDepDistanceSum = 0.0 docNumTokens = 0.0 numSentences = 0 with open(targetDir+filename, 'r', encoding='utf-8') as infile: print(targetDir+filename) lines = infile.readlines() for line in lines: numSentences += 1 numTokens, depDistanceSum = FetchSentenceQueryRes(line, "English") docDepDistanceSum += depDistanceSum docNumTokens += numTokens print("Doc {0} with {1} sentences, mean dependency distance: {2}".format(filename, numSentences, docDepDistanceSum/(docNumTokens-numSentences))) outfile.write(str(docDepDistanceSum/(docNumTokens-numSentences))+"\n")
[ "liangcheng.yu46@gmail.com" ]
liangcheng.yu46@gmail.com
e6bc5b628771881d5e791a8ae89ab73749fabf68
4cb0b3c4acf4e30dda0f814fab8232bf13617422
/Python/Django/survey_form/apps/surveys/urls.py
861117e318407c2eea5b49f8496698d1e86e5b31
[]
no_license
h0oper/DojoAssignments
d60336b3e67021be0e6a43c1f3693193f83b22d9
28472e7907a18725d702fc9617f27619fcc4fcfc
refs/heads/master
2020-05-09T23:38:27.916674
2018-10-06T20:46:02
2018-10-06T20:46:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
196
py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index), url(r'^process$', views.process), url(r'^result$', views.result), url(r'^goback$', views.goback) ]
[ "ccasil@ucsc.edu" ]
ccasil@ucsc.edu
ef3c9320250f5176230f9473e05712b816609be3
1a9e6140d633c660360dbc543b4d7bcb7ad1c0d9
/tests/QueueTest/run.py
239b7b4710c2ceb58e626c4d8ade1bb87bcd117e
[ "Apache-2.0" ]
permissive
mjj29/apama-epl-containers
aa80c2e3806e196c8444b4ae97ae6e41ca662e82
d3f4d5f9ebd21481ab90a9386a733d394cd01f2a
refs/heads/master
2022-11-11T06:43:57.481269
2020-06-29T15:05:35
2020-06-29T15:05:35
275,584,334
0
0
null
null
null
null
UTF-8
Python
false
false
548
py
from pysys.constants import * from pysys.basetest import BaseTest from apama.correlator import CorrelatorHelper import os class PySysTest(BaseTest): def execute(self): corr = CorrelatorHelper(self, name='correlator') corr.start(logfile='correlator.log') corr.injectEPL('../../../Queue.mon') tests = os.listdir(self.input); tests.sort() for test in tests: if test.endswith('.mon'): corr.injectEPL(test) corr.flush() corr.shutdown() def validate(self): self.assertGrep('correlator.log', expr=' ERROR ', contains=False)
[ "github@matthew.ath.cx" ]
github@matthew.ath.cx
f3c3f92fb93dfc923bdd1d17ba4bdb315bab5d81
74cd45191c0f735acc80f13599fe72044da53c56
/Greedy_Algorithms_Minimum_Spanning_Trees_and_Dynamic_Programming/Assignment2/unionfind.py
75d3fca9b2c361e9c60bdce4e2136a2f6a1cf367
[]
no_license
shenghao001/algorithms
6c41041f5132ce775acc20d0f9d4487cd3e093b6
09410e0214be715949057b3edc0a037cb40680c9
refs/heads/master
2022-03-01T07:00:56.510327
2019-08-31T17:25:43
2019-08-31T17:25:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
764
py
""" Union Find basic structure """ class UnionFind(): def __init__(self, nodes): self.root = dict(zip(nodes, nodes)) self.subtree = dict(zip(nodes, [[node] for node in nodes])) def find(self, node): """ find the root of a node """ return self.root[node] def union(self, i, j): """ union two nodes i and j by merging a smaller tree to the larger one """ pi, pj = self.root[i], self.root[j] if pi != pj: if len(self.subtree[pj]) > len(self.subtree[pi]): pi, pj = pj, pi for node in self.subtree[pj]: self.root[node] = pi self.subtree[pi] += self.subtree[pj] del self.subtree[pj] else: return
[ "anmourchen@gmail.com" ]
anmourchen@gmail.com
91a07b21c96e78c49fbbef9d59d11dcb8579ac4f
4a77af523c9e319ba44c6ceaf821208f95c97d5c
/random_name.py
f4926d0b278f3403fe852324fc1f85b2655aeb45
[]
no_license
marteczkah/BAM_coding_resources
2de2a9585a505094431b047f718f1126fa0bb81d
b5cd143bfcafa8b0374b7a04b90db517b79c048a
refs/heads/main
2023-07-13T16:02:12.792447
2021-08-18T16:02:08
2021-08-18T16:02:08
387,182,158
0
0
null
null
null
null
UTF-8
Python
false
false
314
py
from random import randint names = ['Ariya', 'Chandini', 'Kaitlyn', 'Anjali', 'Josephine', 'Zachias', 'Joel', 'Jayden', 'Larriyah', 'Trevor', 'Kimiwa', 'Suvil', 'Walddy', 'Eladio', 'Drew', 'Sasha', 'Lauren', 'Neesh', 'Raisha', 'Faye'] random_index = randint(0, len(names) - 1) print(names[random_index])
[ "48037981+marteczkah@users.noreply.github.com" ]
48037981+marteczkah@users.noreply.github.com
a25a9a45abf6afeb485d96f23c00c3d70ff087dc
b8f9d2cafb8958cdb417f05156acb6aadf90f4dd
/MachineLearning/NetworkAnalysis/PageRank.py
5d5647d240c30f7abe41a25e7aa9ec6bbe87407e
[]
no_license
Anova07/Data-Science
8d14f78236de0053e2d31cc8cd85b9c70dfa2c8a
86dd24fb04a199536ae8f3f5f843aae3fc69c086
refs/heads/master
2021-12-08T10:35:35.512188
2016-03-06T19:08:58
2016-03-06T19:08:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,945
py
import math, random, re from collections import defaultdict, Counter, deque from LinearUtils.Vectors import dotProduct, magnitude, scalarMultiply, shape, distance from LinearUtils.Matrices import getRow, getCol, generateMatrix from functools import partial # Code from Data Science from Scratch - github users = [ { "id": 0, "name": "Hero" }, { "id": 1, "name": "Dunn" }, { "id": 2, "name": "Sue" }, { "id": 3, "name": "Chi" }, { "id": 4, "name": "Thor" }, { "id": 5, "name": "Clive" }, { "id": 6, "name": "Hicks" }, { "id": 7, "name": "Devin" }, { "id": 8, "name": "Kate" }, { "id": 9, "name": "Klein" } ] friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4), (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)] # give each user a friends list for user in users: user["friends"] = [] # and fill it for i, j in friendships: users[i]["friends"].append(users[j]) # add i as a friend of j users[j]["friends"].append(users[i]) # add j as a friend of i endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3), (2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)] def PageRank(users, damping = 0.85, num_iters = 100): """ A simplified version looks like this: 1. There is a total of 1.0 (or 100%) PageRank in the network. 2. Initially this PageRank is equally distributed among nodes. 3. At each step, a large fraction of each node’s PageRank is distributed evenly among its outgoing links. 4. At each step, the remainder of each node’s PageRank is distributed evenly among all nodes. """ # initially distribute PageRank evenly num_users = len(users) pr = { user["id"] : 1 / num_users for user in users } # this is the small fraction of PageRank # that each node gets each iteration base_pr = (1 - damping) / num_users for __ in range(num_iters): next_pr = { user["id"] : base_pr for user in users } for user in users: # distribute PageRank to outgoing links links_pr = pr[user["id"]] * damping for endorsee in user["endorses"]: next_pr[endorsee["id"]] += links_pr / len(user["endorses"]) pr = next_pr return pr if __name__ == "__main__": for user in users: user["endorses"] = [] # add one list to track outgoing endorsements user["endorsed_by"] = [] # and another to track endorsements for source_id, target_id in endorsements: users[source_id]["endorses"].append(users[target_id]) users[target_id]["endorsed_by"].append(users[source_id]) endorsements_by_id = [(user["id"], len(user["endorsed_by"])) for user in users] sorted(endorsements_by_id, key=lambda pair: pair[1], reverse=True) print("PageRank") for user_id, pr in PageRank(users).items(): print(user_id, pr)
[ "titu1994@gmail.com" ]
titu1994@gmail.com
36fdbfe12bf2e030e562d801fd497d4834a91dc5
562504cf85d80c5e9a0a851a0a9d4f0db07b2b1f
/kt_web/schedule.py
cdf7b0f269ed256bc03d86b7bd247e8748507aa7
[ "MIT" ]
permissive
MrKiven/KT-Web
07cfe3c400b9059cf2cfd833f1caddf7e4ac7443
9256a4943ac5bb3c14fbec5faf17ef84ff0feaf4
refs/heads/master
2021-01-18T22:00:53.558305
2016-09-27T10:32:40
2016-09-27T10:32:40
51,062,361
17
6
null
2016-09-14T08:19:37
2016-02-04T08:28:33
Python
UTF-8
Python
false
false
2,555
py
# -*- coding: utf-8 -*- import gevent import gevent.event class Schedule(object): """Schedule Object""" def __init__(self, func, stop_event): self.func = func self.stop_event = stop_event self.g = None @property def name(self): return self.func.__name__ def trigger(self): if not self.g and callable(self.func): self.g = gevent.spawn(self.func, self.stop_event) def stop(self): if self.g: gevent.kill(self.g) class ScheduleManager(object): """Schedules Manager Example: from zeus_core.schedule import schedule_manager def task(stop_event): while not stop_event.is_set(): do_stuff() schedule_manager.clear_schedules() # clear all exist schedules schedule_manager.add(task) schedule_manager.trigger(task) # or `event_manager.trigger_all()` # here is gunicorn's main loop... # After some condition you want to stop *task* schedule, just do: schedule_manager.stop(task) # or `schedule_manager.set_events()` to stop all schedules Generally, all schedules will stop after gunicorn master stop. Note: In *task* func, there must be IO operation or `gevent.sleep()` explicitly to switch greenlet. """ def __init__(self): """Schedules Manager Initialize a global *gevent.event.Event* to manager all greenlets. Initialize a dict to store all events registered. """ self.stop_event = gevent.event.Event() self.schedules = {} def add(self, func): """Same function only register once""" if func not in self.schedules: self.schedules[func] = Schedule(func, self.stop_event) def trigger(self, func): """Trigger given func to run as a greenlet""" self.schedules[func].trigger() def trigger_all(self): """Trigger all func in manager to run""" for func in self.schedules: self.trigger(func) def clear(self): """Clear schedules in manager""" self.schedules.clear() def clear_schedules(self): """Set *gevent.event.Event* False""" self.stop_event.clear() def stop(self, func): """Stop given func schedule""" self.schedules[func].stop() def set_events(self): """Stop all schedules""" if not self.stop_event.is_set(): self.stop_event.set() # set event True schedule_manager = ScheduleManager()
[ "kiven.mr@gmail.com" ]
kiven.mr@gmail.com
bd080db414250c7460293da72e2625c463127dcf
55a4d7ed3ad3bdf89e995eef2705719ecd989f25
/main/tensorflow_test/hmm_天气_活动理解.py
1318a13a359255ef5e47ef393f656642d7456de5
[]
no_license
ichoukou/Bigdata
31c1169ca742de5ab8c5671d88198338b79ab901
537d90ad24eff4742689eeaeabe48c6ffd9fae16
refs/heads/master
2020-04-17T04:58:15.532811
2018-12-11T08:56:42
2018-12-11T08:56:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,842
py
# coding:utf-8 states = ('Rainy', 'Sunny') observations = ('walk', 'shop', 'clean') start_probability = {'Rainy': 0.6, 'Sunny': 0.4} transition_probability = { 'Rainy': {'Rainy': 0.7, 'Sunny': 0.3}, 'Sunny': {'Rainy': 0.4, 'Sunny': 0.6}, } emission_probability = { 'Rainy': {'walk': 0.1, 'shop': 0.4, 'clean': 0.5}, 'Sunny': {'walk': 0.6, 'shop': 0.3, 'clean': 0.1}, } # 打印路径概率表 def print_dptable(V): print " ", for i in range(len(V)): print "%7d" % i, print for y in V[0].keys(): print "%.10s: " % y, for t in range(len(V)): print "%.12s" % ("%f" % V[t][y]), print def viterbi(obs, states, start_p, trans_p, emit_p): """ :param obs:观测序列 :param states:隐状态 :param start_p:初始概率(隐状态) :param trans_p:转移概率(隐状态) :param emit_p: 发射概率 (隐状态表现为显状态的概率) :return: """ # 路径概率表 V[时间][隐状态] = 概率 V = [{}] # 一个中间变量,代表当前状态是哪个隐状态 path = {} # 初始化初始状态 (t == 0) for y in states: V[0][y] = start_p[y] * emit_p[y][obs[0]] path[y] = [y] # 对 t > 0 跑一遍维特比算法 for t in range(1, len(obs)): # [1,2] V.append({}) newpath = {} for y in states: # 概率 隐状态 = 前状态是y0的概率 * y0转移到y的概率 * y表现为当前状态的概率 # print [(V[t - 1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states] #计算当前循环下,天气为y的概率,可由前一天是阴天、晴天两种情况得来,但是取概率最大的作为当前链。 (prob, state) = max([(V[t - 1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states]) # 记录最大概率 V[t][y] = prob print V # 更新晴天、雨天的路径,更新当前为晴天、雨天的链路径,最后一个 newpath[y] = path[state] + [y] # print newpath # 不需要保留旧路径 path = newpath #打印列表,每天的晴天、阴天的最大的概率值输出,作为后一天晴天、阴天的输入。总之每天的计算输出,只保留“一个”晴天的输出和阴天的输出。 print_dptable(V) (prob, state) = max([(V[len(obs) - 1][y], y) for y in states]) return (prob, path[state]) def example(): return viterbi(observations, states, start_probability, transition_probability, emission_probability) #注意: max([(4,'hello'),(3,'hello'),(10,'hello')]) 比较的是[]中的()中第一个数值!!!!!!! print example()
[ "985819225@qq.com" ]
985819225@qq.com
1bc7f18b6e720d8b738fcb1a45352c500021c7e8
180c58986bfe06ff04a0d36bd6107aa9d2b27373
/tools/harness/harness.py
53c67266194582960bb561df6c2b0b09d77d7bed
[ "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-dco-1.1", "MIT" ]
permissive
mattweingarten/advanced-operating-systems
086d88ab25a03a32f3ade11ae34edc638428a369
603f9fd81e8246521f05a7aeefcbd3ad12f71657
refs/heads/master
2023-09-01T10:26:02.100704
2021-10-24T11:25:07
2021-10-24T11:25:07
420,660,000
1
0
null
null
null
null
UTF-8
Python
false
false
7,025
py
# # Copyright (c) 2009-2011, ETH Zurich. # All rights reserved. # # This file is distributed under the terms in the attached LICENSE file. # If you do not find this file, copies can be found by writing to: # ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. # import os import types import string import datetime import debug import re class Harness: RAW_FILE_NAME = 'raw.txt' MENU_LST_FILE_NAME = 'menu.lst' BOOT_FILE_NAME = 'bootlog.txt' TERM_FILTER = re.compile("\[\d\d?m") def _clean_line(self, line): # filter output line of control characters filtered_out = filter(lambda c: c in string.printable, line.rstrip()) # Delete terminal color codes from output filtered_out = self.TERM_FILTER.sub('', filtered_out) return filtered_out def _write_menu_lst_debug(self, test, build, machine, path): # Ignore for tests that do not implement get_modules if hasattr(test, "get_modules"): menu_lst_file_name = os.path.join(path, self.MENU_LST_FILE_NAME) debug.verbose("harness: writing menu.lst to %s" % menu_lst_file_name) with open(menu_lst_file_name, "w") as menu: menu.write( test.get_modules(build, machine).get_menu_data("/") ) def run_test(self, build, machine, test, path): # Open files for raw output from the victim and log data from the test raw_file_name = os.path.join(path, self.RAW_FILE_NAME) debug.verbose('open %s for raw output' % raw_file_name) raw_file = open(raw_file_name, 'w') # run the test, dumping the output to the raw file as we go try: debug.verbose('harness: setup test') test.setup(build, machine, path) self._write_menu_lst_debug(test, build, machine, path) debug.verbose('harness: run test') starttime = datetime.datetime.now() for out in test.run(build, machine, path): # timedelta for the time this line was emitted from the start of the run timestamp = datetime.datetime.now() - starttime # format as string, discarding sub-second precision timestr = str(timestamp).split('.', 1)[0] debug.debug('[%s] %s' % (timestr, self._clean_line(out))) # log full raw line (without timestamp) to output file raw_file.write(out) debug.verbose('harness: output complete') except KeyboardInterrupt: # let the user know that we are on our way out debug.error('Interrupted! Performing cleanup...') raise finally: raw_file.close() debug.verbose('harness: cleanup test') test.cleanup(machine) def process_output(self, test, path): """Process raw.txt and return array of output lines that begins with grubs output, avoids having encoding issues when generating other report files""" raw_file_name = os.path.join(path, self.RAW_FILE_NAME) if os.path.exists(raw_file_name): idx = 0 with open(raw_file_name, 'r') as rf: lines = rf.readlines() for idx, line in enumerate(lines): if line.strip() == "root (nd)" or \ line.strip().startswith("Kernel starting at address") or \ "Barrelfish CPU driver starting on ARMv8" in line: break if idx == len(lines)-1: debug.verbose('magic string "root (nd)" or "Kernel starting at address" not found, assuming no garbage in output') idx=0 return [ unicode(self._clean_line(l), errors='replace') for l in lines[idx:] ] # file did not exist return ["could not open %s to process test output" % raw_file_name] def extract_errors(self, test, path): raw_file_name = os.path.join(path, self.RAW_FILE_NAME) debug.verbose('open %s for raw input' % raw_file_name) raw_file = open(raw_file_name, 'r') try: results = test.process_data(path, raw_file) finally: raw_file.close() errors = [results.reason()] try: errors += results.errors except: pass return errors def process_results(self, test, path): # open raw file for input processing raw_file_name = os.path.join(path, self.RAW_FILE_NAME) debug.verbose('open %s for raw input' % raw_file_name) raw_file = open(raw_file_name, 'r') try: results = test.process_data(path, raw_file) finally: raw_file.close() if not results: debug.verbose('no results') return True # no results, assume success retval = True # everything OK # Process raw.txt and make a bootlog.txt that begins with grubs or # Barrelfish's output, avoids having encoding issues when viewing logfiles boot_file_name = os.path.join(path, self.BOOT_FILE_NAME) if os.path.exists(raw_file_name): idx = 0 with open(raw_file_name, 'r') as rf: lines = rf.readlines() for idx, line in enumerate(lines): if line.strip() == "root (nd)" or \ "Barrelfish CPU driver starting" in line.strip(): break if idx > 0: with open(boot_file_name, 'w') as wf: wf.writelines(lines[idx:]) else: debug.verbose('Magic string root (nd) not found, do not write bootlog.txt') else: debug.verbose('No file named %s exists. Do not create bootlog.txt.' % raw_file_name) # if a single result, turn it into a list if not isinstance(results, types.ListType): results = [results] for result in results: # see if it passed try: passed = result.passed() except NotImplementedError: passed = None if passed is False: debug.log('Test %s FAILED %s' % (test.name, '(' + result.reason() + ')') ) retval = False elif passed: debug.verbose('Test %s PASSED' % test.name) # write it to a file name = result.name if result.name else 'results' data_file_name = os.path.join(path, name + '.dat') debug.verbose('create %s for processed output' % data_file_name) data_file = open(data_file_name, 'w') try: result.to_file(data_file) data_file.close() except NotImplementedError: debug.verbose('no processed output, remove %s' % data_file_name) data_file.close() os.remove(data_file_name) return retval
[ "daniel.schwyn@inf.ethz.ch" ]
daniel.schwyn@inf.ethz.ch
dd1e606bd18714221455e014139774e53a1e0be2
51c099284113773d0d46529443b171d3b871102a
/venv/lib/python3.6/site-packages/twilio/rest/preview/hosted_numbers/__init__.py
316f6a948e493ba2b10e134f95cb5ec7c11587a8
[]
no_license
ameerbadri/twilio-taskrouter-realtime-dashboard
a9b1582858ef1bb7bf6e2a1fae47349134cc6072
984ca897e53bb04cebba20d909b4c6977a7f306e
refs/heads/master
2023-02-24T08:05:36.340105
2022-09-28T21:10:44
2022-09-28T21:10:44
101,177,223
58
45
null
2023-02-15T21:33:47
2017-08-23T12:25:12
Python
UTF-8
Python
false
false
1,194
py
# coding=utf-8 """ This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base.version import Version from twilio.rest.preview.hosted_numbers.hosted_number_order import HostedNumberOrderList class HostedNumbers(Version): def __init__(self, domain): """ Initialize the HostedNumbers version of Preview :returns: HostedNumbers version of Preview :rtype: twilio.rest.preview.hosted_numbers.HostedNumbers.HostedNumbers """ super(HostedNumbers, self).__init__(domain) self.version = 'HostedNumbers' self._hosted_number_orders = None @property def hosted_number_orders(self): """ :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList """ if self._hosted_number_orders is None: self._hosted_number_orders = HostedNumberOrderList(self) return self._hosted_number_orders def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Preview.HostedNumbers>'
[ "ameerbadri@gmail.com" ]
ameerbadri@gmail.com
af9e8bb66e0e059bd3abada0e92af12d59469d4d
6e70b35111371cf082a3a9294f96f946a28128f7
/nets/densenet.py
70f28e644f743e844a3bf084f1996aae19f1f260
[]
no_license
SDMrFeng/quiz-w8-densenet
46a4dc5c398591d5dc52fa1bd9a478735aec6f63
6456a484e57a652cbf9dfbdb393cf545876ebec9
refs/heads/master
2020-03-28T15:49:38.090040
2018-09-16T07:00:59
2018-09-16T07:00:59
148,629,888
1
0
null
null
null
null
UTF-8
Python
false
false
6,130
py
"""Contains a variant of the densenet model definition.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf slim = tf.contrib.slim def trunc_normal(stddev): return tf.truncated_normal_initializer(stddev=stddev) def bn_act_conv_drp(current, num_outputs, kernel_size, scope='block'): current = slim.batch_norm(current, scope=scope + '_bn') current = tf.nn.relu(current) current = slim.conv2d(current, num_outputs, kernel_size, scope=scope + '_conv') current = slim.dropout(current, scope=scope + '_dropout') return current def block(net, layers, growth, scope='block'): for idx in range(layers): bottleneck = bn_act_conv_drp(net, 4 * growth, [1, 1], scope=scope + '_conv1x1' + str(idx)) tmp = bn_act_conv_drp(bottleneck, growth, [3, 3], scope=scope + '_conv3x3' + str(idx)) net = tf.concat(axis=3, values=[net, tmp]) return net def transition(net, num_outputs, scope='transition'): net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=scope + '_conv1x1') net = slim.avg_pool2d(net, [2, 2], stride=2, scope=scope + '_avgpool2x2') return net def densenet(images, num_classes=1001, is_training=False, dropout_keep_prob=0.8, scope='densenet'): """Creates a variant of the densenet model. images: A batch of `Tensors` of size [batch_size, height, width, channels]. num_classes: the number of classes in the dataset. is_training: specifies whether or not we're currently training the model. This variable will determine the behaviour of the dropout layer. dropout_keep_prob: the percentage of activation values that are retained. prediction_fn: a function to get predictions out of logits. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, `num_classes`] end_points: a dictionary from components of the network to the corresponding activation. """ growth = 24 compression_rate = 0.5 def reduce_dim(input_feature): return int(int(input_feature.shape[-1]) * compression_rate) end_points = {} with tf.variable_scope(scope, 'DenseNet', [images, num_classes]): with slim.arg_scope(bn_drp_scope(is_training=is_training, keep_prob=dropout_keep_prob)) as ssc: ############# My code start ############## #224 x 224 x 3 end_point = 'Conv2d_0' net = slim.conv2d(images, 2 * growth, [7, 7], stride=2, scope=end_point) end_points[end_point] = net #112 x 112 x 2g (g:growth) end_point = 'MaxPool_0' net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point) end_points[end_point] = net #56 x 56 x 2g end_point = 'DenseBlock_1' net = block(net, 6, growth, scope=end_point) end_points[end_point] = net #56 x 56 end_point = 'Transition_1' net = transition(net, reduce_dim(net), scope=end_point) end_points[end_point] = net #28 x 28 end_point = 'DenseBlock_2' net = block(net, 12, growth, scope=end_point) end_points[end_point] = net #28 x 28 end_point = 'Transition_2' net = transition(net, reduce_dim(net), scope=end_point) end_points[end_point] = net #14 x 14 end_point = 'DenseBlock_3' net = block(net, 24, growth, scope=end_point) end_points[end_point] = net #14 x 14 end_point = 'Transition_3' net = transition(net, reduce_dim(net), scope=end_point) end_points[end_point] = net #7 x 7 end_point = 'DenseBlock_4' net = block(net, 16, growth, scope=end_point) end_points[end_point] = net #7 x 7 end_point = 'last_bn_relu' net = slim.batch_norm(net, scope=end_point) net = tf.nn.relu(net) end_points[end_point] = net #7 x 7 # Global average pooling. end_point = 'global_avg_pool' net = slim.avg_pool2d(net, net.shape[1:3], scope=end_point) end_points[end_point] = net #1 x 1 # Fully-connected end_point = 'logits' biases_initializer = tf.constant_initializer(0.1) pre_logits = slim.conv2d(net, num_classes, [1, 1], biases_initializer=biases_initializer, scope=end_point) logits = tf.squeeze(pre_logits, [1, 2], name='SpatialSqueeze') end_points[end_point] = logits # Softmax prediction end_points['predictions'] = slim.softmax(logits, scope='predictions') ############### My code end ############# return logits, end_points def bn_drp_scope(is_training=True, keep_prob=0.8): keep_prob = keep_prob if is_training else 1 with slim.arg_scope( [slim.batch_norm], scale=True, is_training=is_training, updates_collections=None): with slim.arg_scope( [slim.dropout], is_training=is_training, keep_prob=keep_prob) as bsc: return bsc def densenet_arg_scope(weight_decay=0.004): """Defines the default densenet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d], weights_initializer=tf.contrib.layers.variance_scaling_initializer( factor=2.0, mode='FAN_IN', uniform=False), activation_fn=None, biases_initializer=None, padding='same', stride=1) as sc: return sc densenet.default_image_size = 224
[ "fengxuezhi@outlook.com" ]
fengxuezhi@outlook.com
6a3f7a9e2c64be5b0255a25b57bce8436509b3bb
c89579b97327167bdf5322d9eb12e37ed1563189
/api/__init__.py
33c708794f8488e56c30b5caea6d30bf0b7164c4
[]
no_license
Alweezy/movies-read
bff9f4c592f6187b175c64faed62b28980069027
a5bde0df99ea648e9368ceba22b7ad0290bfa6f8
refs/heads/master
2020-05-21T21:50:01.604487
2019-05-12T11:58:07
2019-05-12T11:58:07
186,162,063
0
0
null
null
null
null
UTF-8
Python
false
false
78
py
import os from flask import Flask app = Flask(__name__) from . import movies
[ "alvin@Alvins-MacBook-Pro.local" ]
alvin@Alvins-MacBook-Pro.local
34e5301e4eb916f43547d87a83b4bf95c03b3885
5982e164eb9e4622a2adda8c166ef39af22a09ad
/model_conf.py
4c142f04ce06fe644cb36bff1be97eeb078b4aaa
[]
no_license
guy-amir/core
0f82cbef4c4c8ffe5be76118428a8353090f087b
d9f920a16e503e4db4efff53750143a89ee15bdd
refs/heads/master
2022-11-27T18:32:36.337902
2020-07-20T10:49:37
2020-07-20T10:49:37
266,842,228
0
0
null
null
null
null
UTF-8
Python
false
false
10,646
py
import torch.nn as nn import torch.nn.functional as F import torch import numpy as np class cifar_net(nn.Module): def __init__(self): super(cifar_net, self).__init__() self.conv_layer1 = nn.Sequential( # Conv Layer block 1 nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.conv_layer2 = nn.Sequential( # Conv Layer block 2 nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Dropout2d(p=0.05), ) self.conv_layer3 = nn.Sequential( # Conv Layer block 3 nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.fc_layer1 = nn.Sequential( nn.Dropout(p=0.1), nn.Linear(4096, 1024), nn.BatchNorm1d(1024), nn.ReLU(inplace=True), ) # self.fc_layer2 = nn.Sequential( # nn.Linear(1024, 512), # nn.ReLU(inplace=True), # nn.Dropout(p=0.1), # nn.Linear(512, 10) # ) self.fc_layer2 = nn.Sequential( nn.Linear(1024, 512), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Dropout(p=0.1), nn.Linear(512, 256) ) # self.for_tree = nn.Sequential( # nn.Linear(1024, 512), # nn.ReLU(inplace=True), # nn.Dropout(p=0.1), # nn.Linear(512, 10) # ) # self.softmax = nn.Sequential( # nn.Softmax() #dim=1) #maybe add dim if necessarry # ) def forward(self, x): cl1 = self.conv_layer1(x) cl2 = self.conv_layer2(cl1) cl3 = self.conv_layer3(cl2) # flatten cl3 = cl3.view(cl3.size(0), -1) # fc layer fc1 = self.fc_layer1(cl3) fc2 = self.fc_layer2(fc1) #softmax # sm = self.softmax(fc2) return x,cl1,cl2,cl3,fc1,fc2 #option a - smoothness testing # return fc2 #option b - no smoothness testing class Forest(nn.Module): def __init__(self, prms): super(Forest, self).__init__() self.trees = nn.ModuleList() self.prms = prms self.y_hat_avg= [] self.mu_list = [] #The neural network that feeds into the trees: self.prenet = cifar_net() for _ in range(self.prms.n_trees): tree = Tree(prms) self.trees.append(tree) def forward(self, xb,yb=None,layer=None, save_flag = False): self.predictions = [] if self.training: #convert yb from tensor to one_hot yb_onehot = torch.zeros(yb.size(0), int(yb.max()+1)) yb = yb.view(-1,1) if yb.is_cuda: yb_onehot = yb_onehot.cuda() yb_onehot.scatter_(1, yb, 1) self.predictions = [] if self.prms.use_prenet: self.pred_list = self.prenet(xb) xb = self.pred_list[-1] if (self.prms.use_tree == False): return xb for tree in self.trees: #construct routing probability tree: mu = tree(xb) #find the nodes that are leaves: mu_midpoint = int(mu.size(1)/2) mu_leaves = mu[:,mu_midpoint:] # NL = mu_leaves.sum(1) #create a normalizing factor for leaves: N = mu.sum(0) if self.training: if self.prms.classification: self.y_hat = yb_onehot.t() @ mu/N y_hat_leaves = self.y_hat[:,mu_midpoint:] self.y_hat_batch_avg.append(self.y_hat.unsqueeze(2)) #################################################################### else: y_hat_val_avg = torch.cat(self.y_hat_avg, dim=2) y_hat_val_avg = torch.sum(y_hat_val_avg, dim=2)/y_hat_val_avg.size(2) y_hat_leaves = y_hat_val_avg[:,mu_midpoint:] #################################################################### pred = (mu_leaves @ y_hat_leaves.t()) if save_flag: self.mu_list.append(mu) self.y_hat_val_avg = y_hat_val_avg self.predictions.append(pred.unsqueeze(1)) #################################################### # if self.training: # self.y_hat_batch_avg = torch.cat(self.y_hat_batch_avg, dim=2) # self.y_hat_batch_avg = torch.sum(self.y_hat_batch_avg, dim=2)/self.prms.n_trees # self.y_hat_avg.append(self.y_hat_batch_avg.unsqueeze(2)) ####################################################### self.prediction = torch.cat(self.predictions, dim=1) self.prediction = torch.sum(self.prediction, dim=1)/self.prms.n_trees return self.prediction def forward_wavelets(self, xb,cutoff_nodes,yb=None, layer=None, save_flag = False): #convert yb from tensor to one_hot yb_onehot = torch.zeros(yb.size(0), int(yb.max()+1)) yb = yb.view(-1,1) if yb.is_cuda: yb_onehot = yb_onehot.cuda() yb_onehot.scatter_(1, yb, 1) self.predictions = [] if self.prms.use_prenet: self.pred_list = self.prenet(xb) xb = self.pred_list[-1] if (self.prms.use_tree == False): return xb for tree in self.trees: #construct routing probability tree: mu = tree(xb) nu = torch.zeros(mu.size()) #find the nodes that are leaves: leaves = torch.zeros(mu.size(1)) for j in cutoff_nodes: nu[:,j] = mu[:,j] if 2*j>=nu.size(1): leaves[j] = 1 else: if not (cutoff_nodes==2*j).sum() and not (cutoff_nodes==(2*j+1)).sum(): leaves[j] = 1 # print(f"leaves: {leaves}") #normalize leaf probabilities: nu_leaves = nu*leaves nu_normalize_factor = nu_leaves.sum(1) nu_normalized = (nu_leaves.t()/nu_normalize_factor).cuda() # N = mu.sum(0) eps = 10^-20 self.y_hat = nu_normalized.cuda() @ yb_onehot self.y_hat = self.y_hat.t()/(self.y_hat.sum(1)+eps) pred = (self.y_hat @ nu_normalized.cuda()).t() if save_flag: self.mu_list.append(mu) self.y_hat_val_avg = y_hat_val_avg self.predictions.append(pred.unsqueeze(2)) self.prediction = torch.cat(self.predictions, dim=2) self.prediction = torch.sum(self.prediction, dim=2)/self.prms.n_trees if self.prms.check_smoothness == True: self.pred_list = list(self.pred_list) self.pred_list.append(self.prediction) return self.pred_list else: return self.prediction class Tree(nn.Module): def __init__(self,prms): super(Tree, self).__init__() self.depth = prms.tree_depth self.n_leaf = 2 ** prms.tree_depth self.n_nodes = self.n_leaf#-1 self.n_features = prms.features4tree self.mu_cache = [] self.prms = prms self.decision = nn.Sigmoid() ################################################################################################################# onehot = np.eye(prms.feature_length) # randomly use some neurons in the feature layer to compute decision function self.using_idx = np.random.choice(prms.feature_length, self.n_leaf, replace=True) self.feature_mask = onehot[self.using_idx].T self.feature_mask = nn.parameter.Parameter(torch.from_numpy(self.feature_mask).type(torch.FloatTensor), requires_grad=False) ################################################################################################################# def forward(self, x, save_flag = False): if x.is_cuda and not self.feature_mask.is_cuda: self.feature_mask = self.feature_mask.cuda() feats = torch.mm(x.view(-1,self.feature_mask.size(0)), self.feature_mask) decision = self.decision(feats) # passed sigmoid->[batch_size,n_leaf] decision = self.decision(feats) # passed sigmoid->[batch_size,n_leaf] decision = torch.unsqueeze(decision,dim=2) # ->[batch_size,n_leaf,1] decision_comp = 1-decision decision = torch.cat((decision,decision_comp),dim=2) # -> [batch_size,n_leaf,2] mu = x.data.new(x.size(0),1,1).fill_(1.) big_mu = x.data.new(x.size(0),2,1).fill_(1.) begin_idx = 1 end_idx = 2 for n_layer in range(0, self.depth): # mu stores the probability a sample is routed at certain node # repeat it to be multiplied for left and right routing mu = mu.repeat(1, 1, 2) # the routing probability at n_layer _decision = decision[:, begin_idx:end_idx, :] # -> [batch_size,2**n_layer,2] mu = mu*_decision # -> [batch_size,2**n_layer,2] begin_idx = end_idx end_idx = begin_idx + 2 ** (n_layer+1) # merge left and right nodes to the same layer mu = mu.view(x.size(0), -1, 1) big_mu = torch.cat((big_mu,mu),1) big_mu = big_mu.view(x.size(0), -1) # self.mu_cache.append(big_mu) return big_mu #-> [batch size,n_leaf] def level2nodes(tree_level): return 2**(tree_level+1) def level2node_delta(tree_level): start = level2nodes(tree_level-1) end = level2nodes(tree_level) return [start,end]
[ "guy.amir.tech@gmail.com" ]
guy.amir.tech@gmail.com
485514f8b755653e4f00af632730eb4f46e3e4bc
ed65f5edb7abbb4664f619626ea633deeeb9e571
/E.py
b0f7f9d99454c014cf36ba403fe3dced274d8eb5
[]
no_license
JorgeAndre12/Intelligent-drowsiness-monitor-for-safer-driving-through-vision.
2cab29248da799fec98a07affe41d7adcf761961
2a1e03bcb9d6243246fc180d9c15adb74f34752d
refs/heads/master
2020-05-09T20:30:57.442899
2019-06-01T18:10:05
2019-06-01T18:10:05
181,410,160
0
1
null
null
null
null
UTF-8
Python
false
false
3,323
py
# -*- coding: utf-8 -*- """ Created on Thu Apr 11 01:48:57 2019 @author: VAI """ import smbus import numpy as np import urllib.request import cv2 import pygame import time import os import math import requests class MMA7455(): bus = smbus.SMBus(1) def __init__(self): self.bus.write_byte_data(0x1D, 0x16, 0x55) # Setup the Mode self.bus.write_byte_data(0x1D, 0x10, 0) # Calibrate self.bus.write_byte_data(0x1D, 0x11, 0) # Calibrate self.bus.write_byte_data(0x1D, 0x12, 0) # Calibrate self.bus.write_byte_data(0x1D, 0x13, 0) # Calibrate self.bus.write_byte_data(0x1D, 0x14, 0) # Calibrate self.bus.write_byte_data(0x1D, 0x15, 0) # Calibrate def getValueX(self): return self.bus.read_byte_data(0x1D, 0x06) def getValueY(self): return self.bus.read_byte_data(0x1D, 0x07) def getValueZ(self): return self.bus.read_byte_data(0x1D, 0x08) file = 'b.mp3' pygame.init() pygame.mixer.init() # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascade #https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml face_cascade = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml') #https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml eye_cascade = cv2.CascadeClassifier('haarcascade/haarcascade_eye.xml') cap = cv2.VideoCapture(0) url = 'http://funnel.soracom.io' payload = '{"deviceid" : "Car 0001", "lat" : 19.635, "lon" : -99.276}' headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'} nf=1 ne=1 count=0 mma = MMA7455() xmem=mma.getValueX() ymem=mma.getValueY() zmem=mma.getValueZ() if(xmem > 127): xmem=xmem-255 if(ymem > 127): ymem=ymem-255 if(zmem > 127): zmem=zmem-255 time1=time.time() time2=time.time() while 1: x = mma.getValueX() y = mma.getValueY() z = mma.getValueZ() if(x > 127): x=x-255 if(y > 127): y=y-255 if(z > 127): z=z-255 if(abs(xmem-x)>10): print('crash') r = requests.post(url, data=payload, headers=headers) exit() if(abs(ymem-y)>10): print('crash') r = requests.post(url, data=payload, headers=headers) exit() if(abs(zmem-z)>10): print('crash') r = requests.post(url, data=payload, headers=headers) exit() ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray,1.3, 40) ne=len(eyes) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) nf=len(faces) if(nf>0 and ne<1): time1=time.time() print(time1-time2) if((time1-time2)>=3): pygame.mixer.music.load(file) pygame.mixer.music.play() else: pygame.mixer.music.stop() time1=time.time() time2=time1 cv2.imshow('img',img) k = cv2.waitKey(30) & 0xff if k == 27: break cv2.destroyAllWindows()
[ "noreply@github.com" ]
noreply@github.com
3a88ba0146dd6f13de5b231f22b57f1192ef083e
85b218e0e4456404e2bc6be6b13e3fec11c5e86b
/appointment/urls.py
69d25c665e5f1c461bbff5bcdf960904cafe5695
[]
no_license
Leviona/barbershop-site
6d6fddb0b0c4c7c011cf676b5ccdb6b5e713bbee
f932fa53a765da219bcff9fbac9904d6863ef1ce
refs/heads/master
2020-03-17T04:56:53.382824
2018-06-30T03:51:02
2018-06-30T03:51:02
133,295,856
0
1
null
null
null
null
UTF-8
Python
false
false
144
py
from django.urls import path from . import views app_name = "appointment" urlpatterns = [ path('', views.home_page, name="home_page"), ]
[ "svzef@outlook.com" ]
svzef@outlook.com
9afc659a83985ca5e7a34f87ceb3a5de075cc25b
5a3b070f39715f604a8bfc38888b6ee4382e54ac
/TalkTalk-Server/app.py
aa21f179f70f37f987a80665e81a7a672d8cc074
[]
no_license
aupaprunia/talktalk
717245ec0378559abf2dba0793822d19613faf57
895418aa25ad154449f4036362a77b615092b00b
refs/heads/main
2023-04-13T03:53:37.361677
2021-04-11T19:08:54
2021-04-11T19:08:54
356,480,824
0
0
null
null
null
null
UTF-8
Python
false
false
3,022
py
from flask import Flask, request import pyrebase import requests choice_dict = {1:"Sad", 2: "Happy", 3: "Angry", 4: "Excited"} config = {"apiKey": "AIzaSyBrey3ZZ5X74WrAQuj7HISWLl70PqP8dnA", "authDomain": "trialproject-55deb.firebaseapp.com", "databaseURL": "https://trialproject-55deb-default-rtdb.firebaseio.com", "projectId": "trialproject-55deb", "storageBucket": "trialproject-55deb.appspot.com", "messagingSenderId": "930590452475", "appId": "1:930590452475:web:d8857d9906874468fd5e5e" } firebase = pyrebase.initialize_app(config) auth = firebase.auth() db = firebase.database() app = Flask(__name__) # @app.route('/signup', methods =['GET']) # def signup(): # register = request.get_json() # email = register['email'] # password = register['password'] # auth.create_user_with_email_and_password(email, password) # return {"status": " success", "email": email, "password": password} @app.route('/signin/<string:email>/<string:password>', methods = ['GET']) def signin(email, password): try: result = auth.sign_in_with_email_and_password(email, password) global userId userId = result['localId'] get_token = db.child("Users").child(userId).get() global token token = get_token.val()['token'] name = get_token.val()['name'] return{"token": token, "status": 1, "name": name} except: return {"status": 0} @app.route('/speaker/<int:choice>', methods = ["GET"]) def speaker(choice): try: users = db.child("Online").child("Listener").child(choice_dict[choice]).get() uid = "" flag = True for key in users.val(): if flag == True: uid = key flag = False db.child("Online").child("Listener").child(choice_dict[choice]).child(uid).child("status").set("1") db.child("Users").child(userId).child("token").set(token-1) url = "https://fcm.googleapis.com/fcm/send" payload="{\r\n \"to\":\"/topics/"+userId+",\r\n \"data\": {\r\n \"title\": \"Alert\",\r\n \"body\": \"You have an incoming call...\"\r\n }\r\n}" headers = {'Authorization': 'key=AAAA2KuDavs:APA91bGCwqzJYQntRNVZU4WfjDh71D2kLvI4ei3iXr9BIlrz-lzp3HdzZWKAWghUwZK0i1rvC0RKFl2rdk1uyAf3RozvlPO1snRvwYpxJVz5qAH5keFgzygj8h16D0g-YDHrz6SoqJfh', 'Content-Type': 'application/json'} response = requests.request("POST", url, headers=headers, data=payload) print(response) return {"channel_name": uid, "status":1} except: return {"message": "No Listner available. Try reconnecting later.", "status":0} @app.route('/listner/<int:choice>', methods = ["GET"]) def push_listner(choice): db.child("Online").child("Listener").child(choice_dict[choice]).child(userId).child("status").set("0") db.child("Online").child("Listener").child(choice_dict[choice]).child(userId).child("uid").set(userId) db.child("Users").child(userId).child("token").set(token+1) return {"status" : 1, "message": "You will be connected to a speaker shortly."} if __name__ == '__main__': app.run(debug = True)
[ "=" ]
=
c918caa29cf672dc3e1c12dfb518316fb26e5be4
9d140aab8f296235f5e23befaef1b4be72a97a2b
/MazeRightDown.py
3a63d3f9b922c7d15f309b9025b0afd0b535a05e
[]
no_license
shreyal18ss/pyCodes
38224d6a1a6a14510ccb27100b12f661c6c42bee
82bd602d84eaaf253637206b7aadd90577abc5ba
refs/heads/master
2020-07-04T12:54:55.801195
2020-06-02T14:54:55
2020-06-02T14:54:55
202,291,874
0
0
null
null
null
null
UTF-8
Python
false
false
305
py
M=[[1,8,21,7], [19,17,10,20], [2,18,23,22], [14,25,4,13]] t=M[0][0] x,y=0,0 w=t print(t) while(x<=3): try: if(M[x+1][y]<M[x][y+1]): x+=1 elif(M[x+1][y]>M[x][y+1]): y+=1 except(IndexError): if(x<3): x+=1 else: break t=M[x][y] print(t) w+=t print(w)
[ "noreply@github.com" ]
noreply@github.com
fb88ca66b76486a545fb6f24294c2de0f3b48ebc
8dde2278d17fa0aed506f99cc77abc0543bd6607
/client/bouquets.py
ce5133bf8d96fb0f274c4aebbee9e7b19253fe0a
[ "MIT" ]
permissive
thred/openwebif-client
0a672ca2894f16d4907671c0f905e591df9ecadc
0eb7711b47be95935ae69ec4acbec95357592205
refs/heads/master
2020-04-03T07:40:38.293985
2018-10-30T20:30:48
2018-10-30T20:30:48
155,109,909
0
0
null
null
null
null
UTF-8
Python
false
false
673
py
import commands import utils def getBRef(name): json = utils.requestJson("bouquets") for bouquet in json["bouquets"]: if bouquet[1] == name: return bouquet[0] else: raise ValueError("Bouquet not found: " + name) def getDefaultBRef(): json = utils.requestJson("bouquets") return json["bouquets"][0][0] def consume(): json = utils.requestJson("bouquets") for bouquet in json["bouquets"]: print(bouquet[1]) def help(): print("""\ Usage: owifc bouquets Lists all known bouquets.""") commands.register("bouquets", "Lists all known bouquets.", lambda: consume(), lambda: help())
[ "thred@users.noreply.github.com" ]
thred@users.noreply.github.com
e4b8698761a7e70c874c568c855a7858b383725f
815575ebe1fae77118dae5d9eabb4e749f657e70
/UserInteface/game_engine/RelativeSprite.py
3c0f0580b6f64606b181e4c169c71958ac7397a5
[]
no_license
BlackAndGoldAutonomousRacing/UserInterface
66171a31e9a722a92fc1cdcd94155dadd40d20c7
38535b18b6cb42afe882cc30b4018040ed267768
refs/heads/main
2023-09-02T19:01:25.769073
2021-11-05T00:01:56
2021-11-05T00:01:56
416,441,485
0
0
null
null
null
null
UTF-8
Python
false
false
6,659
py
''' =============================================================================== ENGR 133 Fa 2020 Assignment Information Assignment: Final Project Author: Alec Pannunzio, afpannun@purdue.edu Team ID: LC4-5 =============================================================================== ''' from game_engine.Object2D import Object2D from game_engine.ObjectDraw import ObjectDraw from game_engine.GameEngineToolbox import checkType from game_engine.Sprite import Sprite from math import atan2, sqrt, sin, cos, pi # a more advanced sprite that can be painted in the perspective of another object. Basically a simple way of doing cameras # NOTE: this object will act as a normal Sprite unless you set the camera with setCamera(). # NOTE: if it is a first person game it is customary to set all of the gameobjects' cameras to the RelativeSprite representing the character, INCLUDING setting the camera of the character to itself. ''' class members: hasCamera - bool - whether the RelativeSprite has a camera assigned to it. If this is false this object will be painted the same as a normal Sprite. camera - Object2D - the object that we will paint relative to zeroXPosition - float - offSets the sprites x position by this amount zeroYPosition - float - offSets the sprites y position by this amount zeroRotation - float - offsets the sprites rotation by this amount displayXPosition - float - the x position that we will display (relative to the camera) displayYPosition - float - the y position that we will display (relative to the camera) displayRotation - float - the rotation that we will display (relative to the camera) ''' class RelativeSprite(Sprite): def __init__(self,name,xPosition,yPosition,scaling,imgSource,objectDraw): super(RelativeSprite,self).__init__(name,xPosition,yPosition,scaling,imgSource); checkType(objectDraw,ObjectDraw,"objectDraw must be an ObjectDraw"); self.hasCamera = False; self.objectDraw = objectDraw; self.camera = None; self.zeroXPosition = self.objectDraw.screenSizeX/2; self.zeroYPosition = self.objectDraw.screenSizeY/2; self.zeroRotation = 0; self.displayXPosition = xPosition; self.displayYPosition = yPosition; self.displayRotation = 0; # sets the camera of this object to the passed Object2D def setCamera(self,camera): assert issubclass(camera.__class__, Object2D); # make sure the object we are adding is a child of Object2D self.camera = camera; self.hasCamera = True; # removes the camera from the object so it paints like a normal Sprite def removeCamera(self): self.camera = None; self.hasCamera = False; # calls the update method of superclasses and updates the displayimage to be in the perspective of the camera def update(self): if not self.hasCamera: super(RelativeSprite,self).update(); # update like a normal Sprite if (self.zeroRotation != 0): self.rotation += self.zeroRotation; super(RelativeSprite,self).updateDisplayImage(); # update the display image with the relative values self.rotation -= self.zeroRotation; else: super(Sprite,self).update(); # call the update method of Object2D, NOT Sprite #save current position prevRotation = self.rotation; self.displayXPosition = self.xPosition; self.displayYPosition = self.yPosition; cameraPosition = self.camera.getPosition(); #translate relative to the camera self.displayXPosition -= cameraPosition[0]; self.displayYPosition -= cameraPosition[1]; ''' rotate around camera ''' #convert to polar angle = atan2(self.displayYPosition,self.displayXPosition); # the angle of this object relative to the camera radius = sqrt(self.displayXPosition**2 + self.displayYPosition**2) # thie distance this object is away from the camera # rotate the object around the camera <the camera's rotation> angle -= pi*self.camera.getRotation()/180; # convert back to rectangular and assign to displayPosition self.displayXPosition = radius * cos(angle); self.displayYPosition = radius * sin(angle); self.rotation -= self.camera.getRotation(); # add internal rotation to match camera #add zero offsets self.rotation += self.zeroRotation; self.displayXPosition += self.zeroXPosition; self.displayYPosition += self.zeroYPosition; #set displayRotation self.displayRotation = self.rotation; ''' update the display image with the relative values ''' super(RelativeSprite,self).updateDisplayImage(); # update the display image with the relative values ''' reset rotation back to where it was before ''' self.rotation = prevRotation; # we don't have to reset position since we used displayPosition rather than directly changing the object's position #paint relative to the camera def paint(self,screen): if self.hasCamera: # if we have a camera paint using the relative values screen.blit(self.displayImg,[self.displayXPosition-self.showSizeX/2, self.displayYPosition-self.showSizeY/2]); else: # otherwise just use Sprite's paint method super(RelativeSprite,self).paint(screen); # set the zero position of the relativeSprite def setZeroPosition(self,zeroX,zeroY): checkType(zeroX,(int,float),"zero position must be a number"); checkType(zeroY,(int,float),"zero position must be a number"); self.zeroXPosition = zeroX; self.zeroYPosiiton = zeroY; #set the zero rotation of the relativeSprite def setZeroRotation(self,zeroRot): checkType(zeroRot,(int,float),"zeroRotation must be a number"); self.zeroRotation = zeroRot; ''' =============================================================================== ACADEMIC INTEGRITY STATEMENT I have not used source code obtained from any other unauthorized source, either modified or unmodified. Neither have I provided access to my code to another. The project I am submitting is my own original work. =============================================================================== '''
[ "35432488+samiam567@users.noreply.github.com" ]
35432488+samiam567@users.noreply.github.com
c7ef812fb6b1c0a1bcbf2e8e463e19da84748944
6b265b404d74b09e1b1e3710e8ea872cd50f4263
/Python/Exercises/TreeChecker/check_tree_2.0.py
857bec02ba2b491a4a9f7d5ad9e1b2461082a30e
[ "CC-BY-4.0" ]
permissive
gjbex/training-material
cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae
e748466a2af9f3388a8b0ed091aa061dbfc752d6
refs/heads/master
2023-08-17T11:02:27.322865
2023-04-27T14:42:55
2023-04-27T14:42:55
18,587,808
130
60
CC-BY-4.0
2023-08-03T07:07:25
2014-04-09T06:35:58
Jupyter Notebook
UTF-8
Python
false
false
1,828
py
#!/usr/bin/env python import sys class BaseError(Exception): def __init__(self, position): super().__init__() self._position = position @property def position(self): return self._position def __str__(self): return self.message class MissingRBError(BaseError): def __init__(self, position): super().__init__(position) msg = 'missing right bracket for bracket at {0}' self.message = msg.format(position) class MissingLBError(BaseError): def __init__(self, position): super().__init__(position) msg = 'missing left bracket for bracket at {0}' self.message = msg.format(position) class TrailingCharsError(BaseError): def __init__(self, position): super().__init__(position) self.message = 'trailing characters at position {0}'.format(position) def check_tree(tree): bracket_positions = [] position = 1 for character in tree: if character == '(': bracket_positions.append(position) elif character == ')': if bracket_positions: bracket_positions.pop() else: raise MissingLBError(position) if len(bracket_positions) == 0: break position += 1 if len(bracket_positions) == 0 and position < len(tree) - 1: raise TrailingCharsError(position + 1) elif len(bracket_positions) > 0: raise MissingRBError(bracket_positions.pop()) def main(): tree = ''.join([line.strip() for line in sys.stdin.readlines()]) try: check_tree(tree) except BaseError as error: sys.stderr.write('### error: {0}\n'.format(str(error))) return 1 else: return 0 if __name__ == '__main__': status = main() sys.exit(status)
[ "geertjan.bex@uhasselt.be" ]
geertjan.bex@uhasselt.be
fa96549438a9bdc21019bf9d66d468f439d20122
7517ac3ca1a77a23f75bb774a39cfe826c77f687
/run_carver_on_2018_07_02_using_simple_for_loop.py
d0d2731f001743b2b78a15607075f801acc68db3
[]
no_license
carshadi/carver
7f24859a3becaeaf41046912490c54d31f805163
6f568e1254d024139769950142e139909c2c1c9d
refs/heads/master
2022-08-03T06:10:43.880192
2019-11-15T22:46:53
2019-11-15T22:46:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
334
py
#! /usr/bin/env python import navigator navigator.main(['-i', '/nrs/mouselight/SAMPLES/2018-07-02', '-s', '/groups/mousebrainmicro/mousebrainmicro/scripts/gt/2018-07-02/consensus-neurons-with-machine-centerpoints-labelled-as-swcs', '-o', '/nrs/funke/mouselight-v2/2018-07-02', '-f'])
[ "taylora@janelia.hhmi.org" ]
taylora@janelia.hhmi.org
a8518cb7746c3200f3217bba2498fb3fe7e3c877
878eb4b539d77051dd7330389b90d988d9aef8f3
/CAPITULO 7/Exercicio R.py
af3ef2eb5c93a5dc7848b95c4831130451504429
[ "MIT" ]
permissive
LarmIg/Algoritmos-Python
a031badc9f607cbbc109ee4ca8bfe60d5636d867
f2c9889705cacac007833f6ab9a413b06213f882
refs/heads/master
2022-11-25T05:36:26.313469
2020-07-25T11:59:36
2020-07-25T11:59:36
282,436,201
0
0
MIT
2020-07-25T12:08:13
2020-07-25T12:08:12
null
UTF-8
Python
false
false
767
py
# Elaborar um programa que efetue a leitura de dados em duas matrizes (A e B) de uma dimensão do tipo vetor, sendo a matriz A com dez elementos e a matriz B com cinco elementos. Os elementos a serem armazenados nas matrizes devem ser do tipo cadeia. Construir uma matriz C com a capacidade de armazenar um total de 15 elementos e executar a junção das matrizes A e B na matriz C. Apresentar os dados da matriz C em ordem alfabética descendente A = [] B = [] C = [] for i in range(10): A.append(str(input('Informe um valor para a Matriz A[{}]'.format(i)))) for i in range(5): B.append(str(input('Informe um valor para a Matriz B[{}]'.format(i)))) C = A + B C.sort(reverse = True) for i in range(len(C)): print('C[{}] = {}'.format(i, C[i]))
[ "noreply@github.com" ]
noreply@github.com
6f4a8621cbc7c24e77d06254fbc9e3d8cf13db78
843ed91ada6131fceee495e3e398d426db182457
/core/views.py
d42b2c5459c63c5fe8d3d8358de24af2bfad3970
[]
no_license
meydson/Aula_Django_DIO
67ae1d889e4927dcb44b25ddbb220b79c518b6a5
12d4a646e26fd0dd879255754bba76acc1e608fb
refs/heads/master
2022-11-07T20:25:57.276238
2020-06-30T00:51:25
2020-06-30T00:51:25
275,959,490
0
0
null
null
null
null
UTF-8
Python
false
false
162
py
from django.shortcuts import render, HttpResponse # Create your views here. def hello(requests, nome): return HttpResponse('<h1>Hello {}</h1>'.format(nome))
[ "meydsonbaracho@gmail.com" ]
meydsonbaracho@gmail.com
de171643d720ac13e8a745fb6fe61a49ef535492
431f9d1f7a84ee40520fd88fa6aa4e7b0d235047
/geometric_controller/src/ss/trajectory_simulation.py
1b5ab398a461b4320bbd39442869d76e517a8ecc
[]
no_license
indsy123/Quadrotor-Navigation-using-Receding-Horizon-planning
aa0571457292ea2b1eefcd2119332430083c7c42
255e573a42660420fa0d3ce6dac252df8d737c8c
refs/heads/master
2020-07-26T13:14:34.814707
2020-03-24T21:38:16
2020-03-24T21:38:16
208,655,725
3
2
null
null
null
null
UTF-8
Python
false
false
4,758
py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Tue Oct 3 17:28:19 2017 This initial script generates a dummy trajectory. This should be replaced eventually with what you want the trajectory to be or your own method to generate the trajectory. Basically I made my own message file called "Desired_trajectory" that is a message type have position, velocity, acceleration and direction. Velocity and acceleration need not be here as Lee's paper says the trajectory is an (x,y,z) position of CoG and a direction. The current trajectory is the one used in example (1) in the paper "Geomentric tracking control of a quadrotor in SE(3)" by T Lee. You can change it the way you want. just get a curve [x(t),y(t),z(t)] and a direction [cos(pi*t), sin(pi*t),0] or as you fancy. Differentiate the x, y and z to get velocities and accelerations. While it is possible to get rid of velocities and accelerations here and calculate them in the controller script,I found it was not resulting in much saving in terms of time. It will also be needed to change queue_size and publishing frequency in "r = rospy.Rate(n). With this function my laptop can generate at the most 155 hz. """ __author__ = 'Indrajeet yadav' __version__ = '0.1' __license__ = 'Nil' import numpy as np import rospy from isy_geometric_controller.msg import Desired_Trajectory from isy_geometric_controller.msg import modifiedodometry from nav_msgs.msg import Odometry import time import scipy from scipy import special class trajectory(object): "calculates desired position, linear velocity, linear acceleration and direction" def __init__(self, name_of_uav, time_instance): self.time = time.time() self.counter = 0 self.uav = name_of_uav self.pub = rospy.Publisher('/desired_trajectory', Desired_Trajectory, queue_size = 10, tcp_nodelay = True) self.T = 12 self.w = 2*np.pi/self.T try: #rospy.Subscriber('/'+self.uav+'/odom', Odometry, self.callback, queue_size = 10, tcp_nodelay = True) rospy.Subscriber('/'+self.uav+'/odometry_sensor1/odometry', Odometry, self.callback, queue_size = 100, tcp_nodelay = True) #rospy.Subscriber('/'+self.uav+'/odom', Odometry, self.callback, queue_size = 100, tcp_nodelay = True) except: print('problem subscribing to odometry topic') def callback(self, data): #print self.time msg = Desired_Trajectory() msg.header.stamp = data.header.stamp #msg.header.stamp = rospy.Time.now() t = time.time() tt = t-self.time if tt<=3: #msg.desired_position.x = 1.0 * np.cos(self.w*tt) #msg.desired_position.y = 1.0 * 0.5* np.sin(2*self.w*tt) #msg.desired_position.z = 0.75 + 0.25*np.sin(self.w*tt) msg.desired_velocity.x = 0#-1.0 * (self.w) * np.sin(self.w*tt) msg.desired_velocity.y = 0#1.0*0.5 * (2*self.w) * np.cos(2*self.w*tt) msg.desired_velocity.z = 0.5#0.25*self.w*np.cos(self.w*tt) msg.desired_acceleration.x = 0#-1.0 * (self.w)**2 * np.cos(self.w*tt) msg.desired_acceleration.y = 0#-1.0*0.5 * (2*self.w)**2 * np.sin(2*self.w*tt) msg.desired_acceleration.z = 0#-0.25*self.w**2*np.sin(self.w*tt) msg.desired_direction.x = 1#np.cos(2*self.w*tt) msg.desired_direction.y = 0#np.sin(2*self.w*tt) msg.desired_direction.z = 0 else: #msg.desired_position.x = 1.0 #msg.desired_position.y = 0.0 #msg.desired_position.z = 0.15 msg.desired_velocity.x = 0.0 msg.desired_velocity.y = 0.0 msg.desired_velocity.z = 0 msg.desired_acceleration.x = 0.0 msg.desired_acceleration.y = 0.0 msg.desired_acceleration.z = 0 msg.desired_direction.x = 1 msg.desired_direction.y = 0 msg.desired_direction.z = 0 msg.controller = 1 # position controller self.pub.publish(msg) # may get rid of the code below evntually when the trajectory topic will be # subscribed in the main controller script. Remember to initilize the # "Trajectory" node in controller script eventually. if __name__ == '__main__': name = 'firefly' #name = rospy.get_param('~vehicle_name') rospy.init_node('Trajectory', anonymous=False, log_level=rospy.DEBUG) r = rospy.Rate(200) start_time = time.time() try: while not rospy.is_shutdown(): current_time = time.time() t = current_time-start_time #print t traj = trajectory(name, current_time) rospy.spin() #print 'a' , time.time()-a r.sleep() except rospy.ROSInterruptException(): pass
[ "indragt@udel.edu" ]
indragt@udel.edu
65199f5c83020b074cf08c024357136753dc811f
cbde70bf9eb6ee3d8b26b23a509298f3f199b29b
/tensorflow_binding/transducer_tensorflow/__init__.py
8b21e359f752eb88a4464c39fd2fdb07a2183fcb
[ "MIT" ]
permissive
pkuVanilla1207/warp-rnnt
64aeab173fa2dfec13355b564aba6e792df62e80
6de0527c5e23021a6a914c0826dff5c5ec8fe3d5
refs/heads/master
2023-03-18T07:10:41.867541
2021-03-09T20:06:57
2021-03-09T20:06:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,757
py
import imp import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.ops.nn_grad import _BroadcastMul from typing import Optional, AnyStr lib_file = imp.find_module('kernels', __path__)[1] _warp_transducer = tf.load_op_library(lib_file) def transducer_loss( log_probs, labels, frames_lengths, labels_lengths, average_frames: bool = False, reduction: Optional[AnyStr] = None, blank: int = 0): """The CUDA-Warp Transducer loss. Args: log_probs (FloatTensor): Input tensor with shape (N, T, U, V) where N is the minibatch size, T is the maximum number of input frames, U is the maximum number of output labels and V is the vocabulary of labels (including the blank). labels (IntTensor): Tensor with shape (N, U-1) representing the reference labels for all samples in the minibatch. frames_lengths (IntTensor): Tensor with shape (N,) representing the number of frames for each sample in the minibatch. labels_lengths (IntTensor): Tensor with shape (N,) representing the length of the transcription for each sample in the minibatch. average_frames (bool, optional): Specifies whether the loss of each sample should be divided by its number of frames. Default: False. reduction (string, optional): Specifies the type of reduction. Default: None. blank (int, optional): label used to represent the blank symbol. Default: 0. """ assert average_frames is None or isinstance(average_frames, bool) assert reduction is None or reduction in ("none", "mean", "sum") assert isinstance(blank, int) costs, _ = _warp_transducer.transducer_loss( log_probs, labels, frames_lengths, labels_lengths, blank) if average_frames: costs = costs / frames_lengths # (N,) if reduction == "sum": return tf.reduce_sum(costs) elif reduction == "mean": return tf.reduce_mean(costs) return costs @ops.RegisterGradient("TransducerLoss") def _TransducerLossGrad(op, grad_loss, _): """The derivative provided by Transducer Loss. Args: op: the TransducerLoss op. grad_loss: The backprop for cost. Returns: The Transducer Loss gradient. """ grad = op.outputs[1] # NOTE since here we are batch first, cannot use _BroadcastMul grad_loss = tf.reshape(grad_loss, (-1, 1, 1, 1)) return [grad_loss * grad, None, None, None] @ops.RegisterShape("TransducerLoss") def _TransducerLossShape(op): inputs_shape = op.inputs[0].get_shape().with_rank(4) batch_size = inputs_shape[0] return [batch_size, inputs_shape]
[ "lekai.huang@gmail.com" ]
lekai.huang@gmail.com
235af1bbc670e956e37e472b363d092d53a2e10f
7927424f1983eecc7c7b2f0ebaf61ad552d2a7e7
/zigzag.py
1e4ea4b1030d84d3446c45f2f19960e1f1f9aafc
[]
no_license
6reg/automate
295931d3ecf0e69e01921cc45d452fadfd1e6581
11e5de461ece3d8d111f3dc13de088788baf19a2
refs/heads/main
2023-03-08T18:39:42.991280
2021-02-22T20:53:13
2021-02-22T20:53:13
334,780,031
0
1
null
null
null
null
UTF-8
Python
false
false
746
py
import time, sys indent = 0 # How many spaces to indent indentIncreasing = True # Whether the indentation is increasing or not try: while True: # The main program loop. print(' ' * indent, end='') print('********') time.sleep(0.1) # Pause for the 1/10 of a second. if indentIncreasing: # Increase the number of spaces: indent = indent + 1 if indent == 20: # Change direction: indentIncreasing = False else: # Decrease the number of spaces: indent = indent - 1 if indent == 0: # Change direction: indentIncreasing = True except KeyboardInterrupt: sys.exit()
[ "mathiasgreg@gmail.com" ]
mathiasgreg@gmail.com
4ad2bde6bdca921a681ab2a19739f7ec109f0855
72f55f2b9899af5cf60789b935a69f2e5ddfc814
/src/analytics/migrations/0007_auto_20161212_0735.py
030a645dc7873b36a13eae12fd481f3aeaae826b
[]
no_license
apapatp/svrup-learning-no-rest
cf4ab52de0a060207844a8fb3c58455b7142ff35
554d26245a9db2e22f4ed22928f4815b186e486b
refs/heads/master
2021-01-12T09:32:15.866093
2016-12-30T09:47:43
2016-12-30T09:47:43
76,186,449
0
0
null
null
null
null
UTF-8
Python
false
false
559
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('analytics', '0006_auto_20161212_0624'), ] operations = [ migrations.AlterField( model_name='pageview', name='timestamp', field=models.DateTimeField(default=datetime.datetime(2016, 12, 12, 7, 35, 14, 118666, tzinfo=utc)), preserve_default=True, ), ]
[ "tolu@Tolus-MacBook-Pro.local" ]
tolu@Tolus-MacBook-Pro.local
354649abe3c11f6cdb1d9b5428f93f1147c33e86
f593d8878681406d5f8a9c76d124f7dc4e056342
/ml/xcc.py
8bbf0ef512822b005329f4d6c8754e70446755f9
[ "Apache-2.0" ]
permissive
paxtonhare/MarkLogic-Sublime
3a6adc692825b8325bcbaeb68034bb0a1c1c13c1
75a9c8e8d1e2f53f2939644b46dff682df0b2703
refs/heads/master
2021-08-07T21:56:55.784555
2021-01-26T17:20:34
2021-01-26T17:20:34
16,205,265
15
5
null
2016-05-22T21:05:24
2014-01-24T13:30:48
Python
UTF-8
Python
false
false
6,381
py
import sys import re if sys.version_info >= (3,): import http.client import urllib.parse import urllib.request from urllib.error import HTTPError else: import httplib import urllib import urllib2 from urllib2 import HTTPError import socket from .ml_utils import MlUtils from .ml_settings import MlSettings class Xcc(): def __init__(self): self.settings = { "ml_host": "localhost", "xcc_port": "8000", "content_database": "Documents", "modules_database": "Modules", "user": "admin", "password": "admin", "timeout": "1", "use_https": False } mlSettings = MlSettings() for setting in ["ml_host", "xcc_port", "use_https", "content_database", "modules_database", "user", "password", "timeout", "output_options"]: value = mlSettings.get_xcc_pref(setting) if value == None: continue self.settings[setting] = value if MlSettings.debug(): for k in self.settings: MlUtils.log("%s => %s" % (k, self.settings[k])) self.base_url = "http" if (self.settings["use_https"] == True): self.base_url = self.base_url + "s" self.base_url = self.base_url + "://" + self.settings["ml_host"] + ":" + self.settings["xcc_port"] + "/" MlUtils.log("base_url: " + self.base_url) def encode_params(self, params): if sys.version_info >= (3,): parse = urllib.parse else: parse = urllib return parse.urlencode(params) def http(self, url, user, password, params, verb, headers, realm = "public"): # configure the timeout for htttp timeout = float(self.settings['timeout']) socket.setdefaulttimeout(timeout) if sys.version_info >= (3,): client = urllib.request else: client = urllib2 passwdmngr = client.HTTPPasswordMgrWithDefaultRealm() passwdmngr.add_password(realm, url, user, password) digest_authhandler = client.HTTPDigestAuthHandler(passwdmngr) basic_authhandler = client.HTTPBasicAuthHandler(passwdmngr) opener = client.build_opener(basic_authhandler, digest_authhandler) client.install_opener(opener) if (verb == "PUT" and self.is_string(params)): params = params.encode('utf-8') if sys.version_info >= (3,): req = client.Request(url=url, headers=headers, method=verb, data=params) else: req = client.Request(url=url, headers=headers, data=params) req.get_method = lambda: verb return client.urlopen(req) def is_string(self, input): if sys.version_info >= (3,): return isinstance(input, str) else: return isinstance(input, basestring) def get_header(self, response, header): if sys.version_info >= (3,): return response.getheader(header) else: return response.info().getheader(header) def fix_entity_refs(self, query): return '&amp;'.join(query.split('&')) def run_query(self, query, query_type="xquery", check=False, skip_dbs=False): if (skip_dbs == False and "content_database" in self.settings): content_db = self.settings["content_database"] else: content_db = None if (skip_dbs == False and "modules_database" in self.settings): modules_db = self.settings["modules_database"] else: modules_db = None query = self.fix_entity_refs(query) query = query.replace('"', '""') eval_func = "xdmp:eval" if query_type == "javascript": eval_func = "xdmp:javascript-eval" new_query = """ %s( "%s", (), <options xmlns="xdmp:eval"> <isolation>different-transaction</isolation> """ % (eval_func, query)#.format(query, eval_func) if (content_db != None): new_query = new_query + '<database>{{xdmp:database("{0}")}}</database>'.format(content_db) if (modules_db != None): new_query = new_query + '<modules>{{xdmp:database("{0}")}}</modules>'.format(modules_db) if (check == True): new_query = new_query + '<static-check>true</static-check>' new_query = new_query + """ </options>) """ if (check == True): new_query = "try {" + new_query + "} catch($ex) { $ex[error:code != ('XDMP-MODNOTFOUND')] }" output_options = "" if "output_options" in self.settings: for option in self.settings["output_options"]: output_options = """%sdeclare option xdmp:output "%s";\n""" % (output_options, option) new_query = output_options + new_query p = { "xquery": new_query } params = self.encode_params(p) headers = { "Content-type": "application/x-www-form-urlencoded", "Accept-Encoding": "gzip,deflate,sdch", "Accept": "*/*" } url = self.base_url + "eval" MlUtils.log("url: " + url) try: response = self.http(url, self.settings["user"], self.settings["password"], str.encode(params), "POST", headers) MlUtils.log(response) content_length = self.get_header(response, "Content-Length") if content_length != "0": content_type = self.get_header(response, "Content-Type") if content_type: boundary = re.sub("^.*boundary=(.*)$", "\\1", content_type) body = response.read() if boundary: # remove the last content = re.sub(r"[\r\n]+--%s--[\r\n]+$" % boundary, "", body.decode()) # remove the first content = re.compile(r"^[\r\n]+--%s.+?[\r\n]+" % boundary, re.M | re.DOTALL).sub("", content) # split on the boundaries regex_str = r"[\r\n]+--%s.+?[\r\n]+" % boundary prog = re.compile(regex_str, re.M | re.DOTALL) parts = [] partSplitter = re.compile(r"[\r\n][\r\n]", re.M | re.DOTALL) for part in prog.split(content): splits = partSplitter.split(part) parts.append(splits[len(splits) - 1]) result = "\n\n".join(parts) else: result = body.decode() MlUtils.log(result) return result else: return "" except HTTPError as e: raise Exception(e.read().decode("utf-8")) def insert_file(self, uri, file_contents): if ("modules_database" in self.settings): modules_db = self.settings["modules_database"] else: raise Exception('No modules database configured') params = {} params["uri"] = uri params["format"] = "text" params["dbname"] = modules_db headers = { 'Content-Type': "text/xml", 'Accept': "text/html, text/xml, image/gif, image/jpeg, application/vnd.marklogic.sequence, application/vnd.marklogic.document, */*" } url = self.base_url + "insert?" + self.encode_params(params) try: response = self.http(url, self.settings["user"], self.settings["password"], file_contents, "PUT", headers) except HTTPError as e: return e.read().decode("utf-8")
[ "paxton@greenllama.com" ]
paxton@greenllama.com
da51eb2e09b9061ead9e3fc0c508dcf164e120ad
6fb3449a8c38e37a279e01268358dd062445f458
/mysite/urls.py
2d49f38d3201317df024e7ec1a900383551ca778
[]
no_license
mikalail/SportsStats
b756563e6291ffc9d2d7104329dd0a8277048464
44c0a4e012273c27143683e4774cf76964145936
refs/heads/master
2020-12-24T10:57:25.636835
2016-11-08T23:28:53
2016-11-08T23:28:53
73,224,263
0
0
null
null
null
null
UTF-8
Python
false
false
810
py
"""mysite URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import include, url from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'', include('sports.urls')), ]
[ "mikalail@yahoo.com" ]
mikalail@yahoo.com
f661b97983d5da36c5d8f23356b77bb41fdbff71
dd05972a3bf9d15f332fbff420f10afe1977c0d8
/competition/base_example/aliceTest.py
76fec14b823615e7488647e1a92bf8e51c2b7006
[ "BSD-2-Clause" ]
permissive
StephanieWehner/QI-Competition2018
b70df8c5bb343c534c2c0bd8fc0e7d6bb6183f25
cc1139c81e39f66b77c046414bcac8de45807557
refs/heads/master
2020-03-23T05:45:09.885955
2018-08-08T20:03:29
2018-08-08T20:03:29
141,164,280
1
3
null
null
null
null
UTF-8
Python
false
false
3,108
py
# # Copyright (c) 2017, Stephanie Wehner and Axel Dahlberg # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. All advertising materials mentioning features or use of this software # must display the following acknowledgement: # This product includes software developed by Stephanie Wehner, QuTech. # 4. Neither the name of the QuTech organization nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from SimulaQron.general.hostConfig import * from SimulaQron.cqc.backend.cqcHeader import * from SimulaQron.cqc.pythonLib.cqc import * from SimulaQron.toolbox.measurements import parity_meas import random ##################################################################################################### # # main # def main(): # Initialize the connection Alice = CQCConnection("Alice") # Create EPR pairs q1 = Alice.createEPR("Bob") q2 = Alice.createEPR("Bob") # Make sure we order the qubits consistently with Bob # Get entanglement IDs q1_ID = q1.get_entInfo().id_AB q2_ID = q2.get_entInfo().id_AB if q1_ID < q2_ID: qa = q1 qc = q2 else: qa = q2 qc = q1 # Get row row = 0 # Perform the three measurements if row == 0: m0 = parity_meas([qa, qc], "XI", Alice) m1 = parity_meas([qa, qc], "XX", Alice) m2 = parity_meas([qa, qc], "IX", Alice) else: m0 = 0 m1 = 0 m2 = 0 print("\n") print("==========================") print("App {}: row is:".format(Alice.name)) for _ in range(row): print("(___)") print("({}{}{})".format(m0, m1, m2)) for _ in range(2-row): print("(___)") print("==========================") print("\n") # Clear qubits qa.measure() qc.measure() # Stop the connections Alice.close() ################################################################################################## main()
[ "axel.dahlberg12@gmail.com" ]
axel.dahlberg12@gmail.com
290b82503d5a09f87feff4d7c52eaa5bb272622d
848fad01ed3f55e4c9a47d227f7cbabfe4f4df73
/utils/oss.py
86ba32fb2bafa32bb59bdb18e0650710b4b7c800
[]
no_license
cx2c/ali_sdk
51934a7b941fdb710b849ee6d61ce0ea9a88474f
9edb3ff58a71feb5917982d68a57854c10c26e15
refs/heads/master
2020-03-21T06:49:26.720386
2018-06-22T02:34:32
2018-06-22T02:34:32
138,244,060
1
0
null
null
null
null
UTF-8
Python
false
false
122
py
#! /usr/bin/env python # -*- coding: utf-8 -*- # __author__ = "w.z" # Date: 2018/3/21 class Oss(object): pass
[ "w.z@zhangweideMBPX.lan" ]
w.z@zhangweideMBPX.lan
35d3072fb03715d46ecb63d4005ca431e4838776
b42850bc3e36bbd1683070393582617f2b3cd8e6
/Inheritance/players_and_monsters/muse_elf.py
a9582d2cc187778ca11c8be953479c42fb935ab3
[]
no_license
marianidchenko/Python_OOP
aecca18be6df3850c0efbf2fa6d25bf3ff53ae96
547c12cbdad5b8c16fa55bba6c03b71db181ad2b
refs/heads/main
2023-07-09T05:42:43.863681
2021-08-14T14:55:51
2021-08-14T14:55:51
381,572,168
0
0
null
null
null
null
UTF-8
Python
false
false
84
py
from Inheritance.players_and_monsters.elf import Elf class MuseElf(Elf): pass
[ "marianidchenko@gmail.com" ]
marianidchenko@gmail.com
658b34c8593e518f6e856b6afb5c1d107b89f6bc
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
/examples/pytorch/stgcn_wave/model.py
2463721f1b38ea34e09db1c8e3b064a7db69e439
[ "Apache-2.0" ]
permissive
dmlc/dgl
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
refs/heads/master
2023-08-31T16:33:21.139163
2023-08-31T07:49:22
2023-08-31T07:49:22
130,375,797
12,631
3,482
Apache-2.0
2023-09-14T15:48:24
2018-04-20T14:49:09
Python
UTF-8
Python
false
false
3,480
py
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from dgl.nn.pytorch import GraphConv from dgl.nn.pytorch.conv import ChebConv class TemporalConvLayer(nn.Module): """Temporal convolution layer. arguments --------- c_in : int The number of input channels (features) c_out : int The number of output channels (features) dia : int The dilation size """ def __init__(self, c_in, c_out, dia=1): super(TemporalConvLayer, self).__init__() self.c_out = c_out self.c_in = c_in self.conv = nn.Conv2d( c_in, c_out, (2, 1), 1, dilation=dia, padding=(0, 0) ) def forward(self, x): return torch.relu(self.conv(x)) class SpatioConvLayer(nn.Module): def __init__(self, c, Lk): # c : hidden dimension Lk: graph matrix super(SpatioConvLayer, self).__init__() self.g = Lk self.gc = GraphConv(c, c, activation=F.relu) # self.gc = ChebConv(c, c, 3) def init(self): stdv = 1.0 / math.sqrt(self.W.weight.size(1)) self.W.weight.data.uniform_(-stdv, stdv) def forward(self, x): x = x.transpose(0, 3) x = x.transpose(1, 3) output = self.gc(self.g, x) output = output.transpose(1, 3) output = output.transpose(0, 3) return torch.relu(output) class FullyConvLayer(nn.Module): def __init__(self, c): super(FullyConvLayer, self).__init__() self.conv = nn.Conv2d(c, 1, 1) def forward(self, x): return self.conv(x) class OutputLayer(nn.Module): def __init__(self, c, T, n): super(OutputLayer, self).__init__() self.tconv1 = nn.Conv2d(c, c, (T, 1), 1, dilation=1, padding=(0, 0)) self.ln = nn.LayerNorm([n, c]) self.tconv2 = nn.Conv2d(c, c, (1, 1), 1, dilation=1, padding=(0, 0)) self.fc = FullyConvLayer(c) def forward(self, x): x_t1 = self.tconv1(x) x_ln = self.ln(x_t1.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x_t2 = self.tconv2(x_ln) return self.fc(x_t2) class STGCN_WAVE(nn.Module): def __init__( self, c, T, n, Lk, p, num_layers, device, control_str="TNTSTNTST" ): super(STGCN_WAVE, self).__init__() self.control_str = control_str # model structure controller self.num_layers = len(control_str) self.layers = nn.ModuleList([]) cnt = 0 diapower = 0 for i in range(self.num_layers): i_layer = control_str[i] if i_layer == "T": # Temporal Layer self.layers.append( TemporalConvLayer(c[cnt], c[cnt + 1], dia=2**diapower) ) diapower += 1 cnt += 1 if i_layer == "S": # Spatio Layer self.layers.append(SpatioConvLayer(c[cnt], Lk)) if i_layer == "N": # Norm Layer self.layers.append(nn.LayerNorm([n, c[cnt]])) self.output = OutputLayer(c[cnt], T + 1 - 2 ** (diapower), n) for layer in self.layers: layer = layer.to(device) def forward(self, x): for i in range(self.num_layers): i_layer = self.control_str[i] if i_layer == "N": x = self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) else: x = self.layers[i](x) return self.output(x)
[ "noreply@github.com" ]
noreply@github.com
b6e8f2be226188fbb1defabbcc1d134f8fc8e070
3570f2e7b8d5666cbd2d29a4c75965a75699a3e2
/pyodbc/run_test.py
1b0460f4bd5adc94625a5a8b380978050e9a9c4a
[]
no_license
ilanschnell/recipes
7876225db2eb08b21d4d1ab426d40f94650192fd
c946b446a002d55ecffff6ce789cf9dcb57a65a6
refs/heads/master
2023-08-19T19:40:17.750037
2022-01-21T00:27:38
2022-01-21T00:27:38
119,077,116
0
0
null
null
null
null
UTF-8
Python
false
false
1,181
py
import sys from os.path import isfile print(sys.version) print(sys.executable) import pyodbc print(pyodbc) if sys.platform == 'darwin': driver_path = '/Users/ilan/a/envs/py38/lib/libsqlite3odbc.dylib' elif sys.platform.startswith('linux'): driver_path = '/home/osboxes/bin/libsqlite3odbc-0.9996.so' if not isfile(driver_path): raise Exception('so such file: %r' % driver_path) connect_string = ( "DRIVER={%s};SERVER=localhost;DATABASE=./test.sqlite;Trusted_connection=yes" % driver_path ) cnxn = pyodbc.connect(connect_string) cursor = cnxn.cursor() try: cursor.execute('drop table foo') except: pass cursor.execute('create table foo (symbol varchar(5), price float)') N = 1000 for i in range(N): cursor.execute("insert into foo (symbol, price) values (?, ?)", (str(i), float(i))) cursor.execute("commit") cursor.execute("select * from foo") dictarray = cursor.fetchdictarray() cursor.close() for i in range(N): assert dictarray['symbol'][i] == str(i) assert (dictarray['price'][i] - float(i)) < 1E-10 # tab completion fails in ipython for pyodbc.Cursor assert pyodbc.Cursor.fetchdictarray.__doc__ print("Done.")
[ "ilanschnell@gmail.com" ]
ilanschnell@gmail.com
b11a8a7651e0f8dc115584ee90faf956ed6a1f89
997449072baa8e50a143ae1152fd4fa83c8e1068
/devel/.private/rrtplanner/lib/python2.7/dist-packages/rrtplanner/msg/_rrtResult.py
7672fe8883172dee48ff70b467d5d95c919942d0
[]
no_license
idrissahil/catkin_ws
c547a6f7be812cc0bb1a93042026f746d34e7e70
b5d8b60c882b60bb19b8d4529257ca513b8256e3
refs/heads/master
2022-01-24T12:51:28.038620
2019-06-02T16:05:45
2019-06-02T16:05:45
175,048,655
1
0
null
null
null
null
UTF-8
Python
false
false
11,030
py
# This Python file uses the following encoding: utf-8 """autogenerated by genpy from rrtplanner/rrtResult.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct import geometry_msgs.msg import nav_msgs.msg import std_msgs.msg class rrtResult(genpy.Message): _md5sum = "58d6f138c7de7ef47c75d4b7e5df5472" _type = "rrtplanner/rrtResult" _has_header = False #flag to mark the presence of a Header object _full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== # Define the result nav_msgs/Path path ================================================================================ MSG: nav_msgs/Path #An array of poses that represents a Path for a robot to follow Header header geometry_msgs/PoseStamped[] poses ================================================================================ MSG: std_msgs/Header # Standard metadata for higher-level stamped data types. # This is generally used to communicate timestamped data # in a particular coordinate frame. # # sequence ID: consecutively increasing ID uint32 seq #Two-integer timestamp that is expressed as: # * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs') # * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs') # time-handling sugar is provided by the client library time stamp #Frame this data is associated with # 0: no frame # 1: global frame string frame_id ================================================================================ MSG: geometry_msgs/PoseStamped # A Pose with reference coordinate frame and timestamp Header header Pose pose ================================================================================ MSG: geometry_msgs/Pose # A representation of pose in free space, composed of position and orientation. Point position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point # This contains the position of a point in free space float64 x float64 y float64 z ================================================================================ MSG: geometry_msgs/Quaternion # This represents an orientation in free space in quaternion form. float64 x float64 y float64 z float64 w """ __slots__ = ['path'] _slot_types = ['nav_msgs/Path'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: path :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(rrtResult, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.path is None: self.path = nav_msgs.msg.Path() else: self.path = nav_msgs.msg.Path() def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self buff.write(_get_struct_3I().pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs)) _x = self.path.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.path.poses) buff.write(_struct_I.pack(length)) for val1 in self.path.poses: _v1 = val1.header buff.write(_get_struct_I().pack(_v1.seq)) _v2 = _v1.stamp _x = _v2 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v1.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v3 = val1.pose _v4 = _v3.position _x = _v4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v5 = _v3.orientation _x = _v5 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: if self.path is None: self.path = nav_msgs.msg.Path() end = 0 _x = self start = end end += 12 (_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.path.header.frame_id = str[start:end].decode('utf-8') else: self.path.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.path.poses = [] for i in range(0, length): val1 = geometry_msgs.msg.PoseStamped() _v6 = val1.header start = end end += 4 (_v6.seq,) = _get_struct_I().unpack(str[start:end]) _v7 = _v6.stamp _x = _v7 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v6.frame_id = str[start:end].decode('utf-8') else: _v6.frame_id = str[start:end] _v8 = val1.pose _v9 = _v8.position _x = _v9 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v10 = _v8.orientation _x = _v10 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.path.poses.append(val1) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self buff.write(_get_struct_3I().pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs)) _x = self.path.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.path.poses) buff.write(_struct_I.pack(length)) for val1 in self.path.poses: _v11 = val1.header buff.write(_get_struct_I().pack(_v11.seq)) _v12 = _v11.stamp _x = _v12 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v11.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v13 = val1.pose _v14 = _v13.position _x = _v14 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v15 = _v13.orientation _x = _v15 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: if self.path is None: self.path = nav_msgs.msg.Path() end = 0 _x = self start = end end += 12 (_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.path.header.frame_id = str[start:end].decode('utf-8') else: self.path.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.path.poses = [] for i in range(0, length): val1 = geometry_msgs.msg.PoseStamped() _v16 = val1.header start = end end += 4 (_v16.seq,) = _get_struct_I().unpack(str[start:end]) _v17 = _v16.stamp _x = _v17 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v16.frame_id = str[start:end].decode('utf-8') else: _v16.frame_id = str[start:end] _v18 = val1.pose _v19 = _v18.position _x = _v19 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v20 = _v18.orientation _x = _v20 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.path.poses.append(val1) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_4d = None def _get_struct_4d(): global _struct_4d if _struct_4d is None: _struct_4d = struct.Struct("<4d") return _struct_4d _struct_3I = None def _get_struct_3I(): global _struct_3I if _struct_3I is None: _struct_3I = struct.Struct("<3I") return _struct_3I _struct_2I = None def _get_struct_2I(): global _struct_2I if _struct_2I is None: _struct_2I = struct.Struct("<2I") return _struct_2I _struct_3d = None def _get_struct_3d(): global _struct_3d if _struct_3d is None: _struct_3d = struct.Struct("<3d") return _struct_3d
[ "idrissahil3@gmail.com" ]
idrissahil3@gmail.com
caebf84579717f9af88612898b4b4390d7755b86
f62be83925849ab2841565ab264dedf1ee74a689
/S&PTimeTest.py
3e69a50e2aaa5ea747781acb9fb522f9a524ad58
[]
no_license
evy555/Stock-day-of-week-return-analysis
043501581615bfe1979878ad01cada990eb9cb08
28cee2a04475db6801a4fe8c8f0a51c9a5f2959b
refs/heads/master
2020-04-01T20:59:05.613334
2016-06-08T22:46:08
2016-06-08T22:46:08
60,735,677
0
0
null
null
null
null
UTF-8
Python
false
false
4,653
py
import pandas as pd import numpy as np import os import pandas.io.data from pandas import Series, DataFrame from pandas import ExcelWriter from pandas import read_csv import matplotlib.pyplot as plt import datetime from scipy.stats import ttest_1samp import matplotlib.pyplot as plt from random import randint now = datetime.datetime.now() list = '^GSPC' start = None while start is None: try: start = datetime.datetime(randint(1950,2015), randint(1,12), randint(1,31)) except: pass end = datetime.datetime(now.year, now.month, now.day) df = pd.io.data.get_data_yahoo(list, start, end)['Adj Close'] df = DataFrame(df) df['Returns'] = df.pct_change() df['Date'] = df.index df['Date'] = [time.date() for time in df['Date']] l = df.index.values for i in range(0,len(l)): df.loc[l[i], 'DayoftheWeek'] = datetime.datetime.strptime(str(df.loc[l[i], 'Date']), '%Y-%m-%d').strftime('%A') days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] Monday = 0 MonCount = 0 Mon = [] Tuesday = 0 TueCount = 0 Tue = [] Wednesday = 0 WedCount = 0 Wed = [] Thursday = 0 ThuCount = 0 Thu = [] Friday = 0 FriCount = 0 Fri = [] #Need to loop through days and then loop through df to sum up all returns while also summing the total count. Then create average for i in range(1,len(l)): dump = 0 if df.loc[l[i], 'DayoftheWeek'] == 'Monday': Monday = Monday + df.loc[l[i], "Returns"] MonCount = MonCount + 1 Mon.append(df.loc[l[i],'Returns']) if df.loc[l[i], 'DayoftheWeek'] == 'Tuesday': Tuesday = Tuesday + df.loc[l[i], "Returns"] TueCount = TueCount + 1 Tue.append(df.loc[l[i],'Returns']) if df.loc[l[i], 'DayoftheWeek'] == 'Wednesday': Wednesday = Wednesday + df.loc[l[i], "Returns"] WedCount = WedCount + 1 Wed.append(df.loc[l[i],'Returns']) if df.loc[l[i], 'DayoftheWeek'] == 'Thursday': Thursday = Thursday + df.loc[l[i], "Returns"] ThuCount = ThuCount + 1 Thu.append(df.loc[l[i],'Returns']) if df.loc[l[i], 'DayoftheWeek'] == 'Friday': Friday = Friday + df.loc[l[i], "Returns"] FriCount = FriCount + 1 Fri.append(df.loc[l[i],'Returns']) else: dump = dump + df.loc[l[i], 'Returns'] dict = {'Monday': Monday/MonCount, 'Tuesday': Tuesday/TueCount, 'Wednesday': Wednesday/WedCount, 'Thursday': Thursday/ThuCount, 'Friday': Friday/FriCount} dg = pd.Series(dict, name='DailyValue') dff = DataFrame(dg) dff['Day'] = dff.index dff['Sorter'] = [5,1,4,2,3] dff.sort_values(by = ['Sorter'], inplace = True) #dff.sort(['Day'], ascending = True) #dff.plot(kind='bar', grid = True, y = ['DailyValue']) plt.show() # Buy/Sell decision for i in range(1,len(l)): if df.loc[l[i], 'DayoftheWeek'] == 'Friday': df.loc[l[i], "Signal"] = "Sell" df.loc[l[i], "Market"] = 1 elif df.loc[l[i], 'DayoftheWeek'] == 'Monday': df.loc[l[i], "Signal"] = "Buy" df.loc[l[i], "Market"] = 0 else: df.loc[l[i], 'Signal'] = "Hold" df.loc[l[i], "Market"] = 1 # Investment calculations df['Investment'] = "" df['S&P500 Investment'] = '' df['Investment'][0] = 10000 df['S&P500 Investment'][0] = 10000 for i in range(1,len(l)): df.loc[l[i], 'S&P500 Investment'] = df.loc[l[i-1], 'S&P500 Investment'] * (1 + df.loc[l[i], 'Returns']) if df.loc[l[i], "Signal"] == "Sell": df.loc[l[i], "Investment"] = df.loc[l[i-1], 'Investment'] * (1 + df.loc[l[i], "Returns"]) elif df.loc[l[i], "Signal"] == "Buy": df.loc[l[i], "Investment"] = df.loc[l[i-1], 'Investment'] elif df.loc[l[i], 'Signal'] == "Hold": df.loc[l[i], 'Investment'] = df.loc[l[i-1], 'Investment'] * (1 + df.loc[l[i], "Returns"]) print(df.head()) #Excess Return over S&P500 Column #for i in range(1,len(l)): # df.loc[l[i], 'Excess Return'] = df.loc[l[i], 'Investment'] - df.loc[l[i], 'S&P500 Investment'] file = ExcelWriter('Time1.xlsx') df.to_excel(file, 'Data') file.close() os.startfile('Time1.xlsx') df.plot(y = ['Investment', 'S&P500 Investment']) plt.show() print("Average Monday return: %s" % (Monday/MonCount)) print("Average Tuesday return: %s" % (Tuesday/TueCount)) print("Average Wednesday return: %s" % (Wednesday/WedCount)) print("Average Thursday return: %s" % (Thursday/ThuCount)) print("Average Friday return: %s" % (Friday/FriCount)) print("1 sample t-tests for each day to test significance of daily returns against 0 are as follows:") print(ttest_1samp(Mon,0)) print(ttest_1samp(Tue,0)) print(ttest_1samp(Wed,0)) print(ttest_1samp(Thu,0)) print(ttest_1samp(Fri,0))
[ "evansj556@yahoo.com" ]
evansj556@yahoo.com
2a77ffc8692138609c559a9df4f3206508debd09
0c153f489e523afdc33b950a6b9ee21af09e968e
/cpp/run_scripts/run_fom_basis.py
57249f1dc5a1dc4ca8146c3f4d9c3f9afc8a949f
[]
no_license
Pressio/pressio-sisc-burgers1d
86f1acb31d40d1aefa83b61bb4e8a7d70621cf1a
671f45b7abd5dc59d574b6d26cc4a5f23ee90306
refs/heads/master
2021-01-26T01:04:20.594259
2020-04-26T11:32:00
2020-04-26T11:32:00
243,249,905
0
0
null
null
null
null
UTF-8
Python
false
false
1,870
py
#!/usr/bin/env python import sys, os, time import subprocess import numpy as np import os.path from argparse import ArgumentParser import re import myutils, constants def main(exeName): # args for the executable args = ("./"+exeName, "input.txt") print("Starting basis runs") # loop over mesh sizes for iMesh in range(0, constants.num_meshes): currentMeshSize = constants.mesh_sizes[iMesh] print("Current currentMeshSize = ", currentMeshSize) # create folder parentDir='meshSize' + str(currentMeshSize) if not os.path.exists(parentDir): os.system('mkdir ' + parentDir) # loop over various basis size for i in range(0, constants.num_rom_sizes): romSize = constants.rom_sizes[i] print("Current romSize = ", romSize) # based on the size of rom and number of ode steps, # compute the sampling frequency assert(constants.numStepsBasis % romSize == 0) samplingFreq = int(constants.numStepsBasis/romSize) # create input file myutils.createInputFileFomForBasis(currentMeshSize, samplingFreq) os.system("./" + exeName + " input.txt") #popen = subprocess.Popen(args, stdout=subprocess.PIPE) #popen.wait() #output = popen.stdout.read() # create dir for this number of basis childDir=parentDir + '/basis' + str(romSize) if not os.path.exists(childDir): os.system('mkdir ' + childDir) # copy files there os.system('mv input.txt ' + childDir) os.system('mv basis.txt ' + childDir) os.system('mv snapshots.txt ' + childDir) os.system('mv yFom.txt ' + childDir) print("Done with basis runs") if __name__== "__main__": parser = ArgumentParser() parser.add_argument("-exe", "--exe", dest="exeName", help="generate basis for fom") args = parser.parse_args() main(args.exeName)
[ "fnrizzi@sandia.gov" ]
fnrizzi@sandia.gov
ebc3ed1ffe0e2caca9b9f1ca118b77aa614a399c
04a0ff31201c67a0e6a9654369ddd3f712303584
/module_5/pages/basket_page.py
9458b62174897720b13b36a5b9c95bcc2af01498
[]
no_license
titun9/stepik_lessons
58c73e136a7be2950f7071d637a495e182a1291f
26515d9edb2a2b8f7a09a598405d5cecb6a94f7d
refs/heads/master
2023-06-09T07:21:20.696296
2021-06-30T14:00:25
2021-06-30T14:00:25
366,700,404
0
0
null
null
null
null
UTF-8
Python
false
false
1,190
py
from .base_page import BasePage from .locators import BasketPageLocators from .locators import BasePageLocators class BasketPage(BasePage): def get_correct_message_empty_basket(self): language = self.browser.find_element(*BasePageLocators.LANGUAGE_PAGE).get_attribute("lang") dictionary_message = {"ru": "Ваша корзина пуста", "en-gb": "Your basket is empty", \ "es": "Tu carrito esta vacío", "fr": "Votre panier est vide"} message = dictionary_message[language] return message def should_be_message_empty_basket(self): message = self.get_correct_message_empty_basket() message_basket_empty = self.browser.find_element(*BasketPageLocators.MESSAGE_EMPTY_BASKET).text assert message in message_basket_empty, f"No message that basket is empty" def should_be_product_in_basket_page(self): assert self.is_element_present(*BasketPageLocators.TABLE_ADDED_PRODUCT), \ "Basket is not empty" def should_not_be_product_in_basket_page(self): assert self.is_not_element_present(*BasketPageLocators.TABLE_ADDED_PRODUCT), \ "Basket is empty"
[ "butkevichas@cheops-edu.ru" ]
butkevichas@cheops-edu.ru
696193e4863c900c995b49d8854b2fd947ef2ebd
9dc21ebb553fd116826c7cbae7d8c5eba47423d1
/cloneGraph.py
81681ac2a31cf11b69ac78e24d755d692f4aee77
[]
no_license
KJSui/leetcode-2020
a475a8b8481231757222c5afaad2856a92572f89
37cf89e7fb1351b1deff09271d9bb5852395054e
refs/heads/main
2023-04-05T19:46:25.647605
2021-05-06T20:40:06
2021-05-06T20:40:06
365,031,592
0
0
null
null
null
null
UTF-8
Python
false
false
426
py
class Solution: def __init__(self): self.copy = {} def cloneGraph(self, node): if not node: return None newNode = Node(node.val) neight = [] for i in neight: if i in self.copy: neight.append(self.copy[i]) else: neight.append(self.cloneGraph(i)) newNode.neighbors = neight return newNode
[ "jsui@digitalocean.com" ]
jsui@digitalocean.com
7e1915a371be95b8b39560a1b5d338aaa86da5fe
d0e26d18017b825dd18919b0f87d0c99c15e1247
/twitterclone/urls.py
f38223bc8a2886dc69db15f43acff166caa1a2ab
[]
no_license
davidstewy/twitterclone
9e202f852514e72141c39e83f6d47e44f095be28
7e4395f9415854ca1edc3b81232f1303badb9543
refs/heads/master
2020-04-30T05:55:22.162908
2019-03-20T02:54:13
2019-03-20T02:54:13
176,638,200
0
0
null
null
null
null
UTF-8
Python
false
false
1,083
py
"""twitterclone URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from twitterclone.views import homepage from twitteruser.urls import urlpatterns as userurls from notification.urls import urlpatterns as notificationsurls from tweet.urls import urlpatterns as tweeturls urlpatterns = [ path('admin/', admin.site.urls), path('', homepage, name='homepage'), ] urlpatterns += userurls urlpatterns += tweeturls urlpatterns += notificationsurls
[ "davidstewy@gmail.com" ]
davidstewy@gmail.com