blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
8b968103f0b9abceb9934710a91e091091a816f6
1ab794d1368ad5b6770c68d6efba85f095ecdfe1
/getter_setter2.py
8157abac58cdc29759d58dc1d077e9c34861b354
[]
no_license
ssavann/PY-POO
d94303dd0d754c3aed4e9c7c32c9018acad04e65
b16f4ab35deca89e1dad0007e7b5285c3a3cca3c
refs/heads/master
2023-01-13T21:36:16.718070
2020-11-25T02:11:27
2020-11-25T02:11:27
315,803,120
0
0
null
null
null
null
UTF-8
Python
false
false
677
py
''' POO : les propriétés -> Modifier/controler les attributs (Encapsulation) <getter> Accesseur --> Recuperer la valeur de l'attribut <setter> Mutateur --> Définir la valeur de l'attribut <property> (<getter>, <setter>) ''' class Humain: #constructeur def __init__(self, nom, prenom): self._nom = nom self.prenom = prenom def get_nom(self): return self._nom def set_nom(self, nouveauNom): if nouveauNom == "": print("Chaine vide interdite") else: self._nom = nouveauNom LeNom = property(get_nom, set_nom) #programme h1 = Humain("Tremblay", "David") #h1.LeNom = "Desjardins" h1.LeNom = "" print("L'objet h1 se nomme", h1._nom, h1.prenom)
[ "sousavann@gmail.com" ]
sousavann@gmail.com
b2909cd9124676debb2c51fd41fb133e1e0cd503
da522791a92ca6627b25c0fe46032c533a7d3029
/client/server.py
f1e98f98ed5afcd1ffc5beec5a7df0c1cbe0c431
[]
no_license
Shreyas097/P2P-CI-communication
3cc2fe0e8e67fddbc1b67104b773145f2bf7ea42
beff05c51527998ef6a98638b55d7bceac869cde
refs/heads/master
2020-05-25T14:35:56.013912
2019-05-21T14:02:37
2019-05-21T14:02:37
187,848,460
0
0
null
null
null
null
UTF-8
Python
false
false
5,283
py
import socket import threading import os import shlex count=0 list_peers=list() list_idx=list() list_rfc=list() SERVER_PORT = 7734 class RFCRecord: def __init__(self, rfc_no = -1, rfc_title = 'None', peer_hostname ='None', peer_id =-1): self.rfc_no = rfc_no self.rfc_title = rfc_title self.peer_hostname = peer_hostname self.peer_id=peer_id def __str__(self): return str(self.rfc_no)+' '+str(self.rfc_title)+' '+str(self.peer_hostname)+' '+str(self.peer_id) class PeerRecord: def __init__(self,peer_hostname='None',peer_postno=10000,peer_id=-1): self.peer_hostname=peer_hostname self.peer_postno=peer_postno self.peer_id=peer_id def __str__(self): return str(self.peer_hostname)+' '+str(self.peer_postno)+' '+str(self.peer_id) def port_acquire(peer_id): for peer_record in list_peers: if peer_record.peer_id == peer_id: return peer_record.peer_postno def REGISTER(data, clientsocket): global count count = count+1 rlist=shlex.split(data) temp=list() a=list() b=list() rfc_list=str(data).rsplit(':',1) c=shlex.split(rfc_list[1]) list_peers.insert(0,PeerRecord(rlist[3],rlist[5],count)) for i,j in zip(c[::2],c[1::2]): list_idx.insert(0,RFCRecord(i,j,rlist[3],count)) reply="CLIENT REGISTRATION SUCCESSFUL" clientsocket.send(reply) def LIST_ALL(clientsocket): global status status=0 global phrase phrase='' reply=list() if list_idx: for x in list_idx: peer_port = port_acquire(x.peer_id) reply.append(RFCRecord(x.rfc_no,x.rfc_title,x.peer_hostname,peer_port)) status=200 phrase='OK' else: status=400 phrase='BAD REQUEST' response="P2P-CI/1.0 "+str(status)+" "+str(phrase)+" - LIST_ALL"+"\n" for i in reply: reply_list=shlex.split(str(i)) response=response+"File name: "+str(reply_list[1])+"_"+reply_list[0]+" HOST: "+reply_list[2]+" PORT: "+str(reply_list[3])+"\n" clientsocket.send(response) def LOOKUP(clientsocket, rlist): reply=list() flag=0 for x in list_idx: if int(x.rfc_no)==int(rlist[1]): reply.append(RFCRecord(x.rfc_no,x.rfc_title,x.peer_hostname,x.peer_id)) code=200 phrase='OK' flag = 1 if(flag==0): code=404 phrase='FILE NOT FOUND' response="P2P-CI/1.0 "+str(code)+" "+str(phrase)+" - LOOKUP"+"\n" for i in reply: reply_list=shlex.split(str(i)) response=response+"File found: "+str(reply_list[1])+"_"+reply_list[0]+" HOST: "+reply_list[2]+" CLIENT NUM: "+str(reply_list[3])+"\n" clientsocket.send(response) def ADD(clientsocket, rlist, count, data): list_idx.insert(0,RFCRecord(rlist[1],rlist[8],rlist[4],count)) code=200 phrase='OK' a=data.splitlines() title=a[3].split(":") response="P2P-CI/1.0 "+str(code)+" "+str(phrase)+" - ADD"+"\n" response=response+"File added: RFC_"+rlist[1]+" HOST: "+rlist[4]+" PORT: "+rlist[6] clientsocket.send(response) def REMOVE(rlist, count): rfc_pos = 0 phostname=rlist[4] peerport=rlist[6] rfc_title=rlist[8] for q in list_idx: if q.rfc_no==rlist[1] and q.peer_hostname==phostname and q.peer_id == count: del list_idx[rfc_pos] rfc_pos = rfc_pos + 1 clientsocket.send("REMOVAL SUCCESS") def EXIT(rlist, count): global peerhost templ=list() temil=list() phostname=rlist[3] peerport=rlist[5] for q in list_peers: if q.peer_postno==peerport: peerhost=q.peer_hostname idx2=[x for x,y in enumerate(list_idx) if y.peer_hostname==str(peerhost)] for i in sorted(idx2, reverse=True): del list_idx[i] idx=[x for x,y in enumerate(list_peers) if y.peer_postno==peerport] for i in idx: del list_peers[i] clientsocket.send("CLIENT CLOSED - SHUTDOWN SUCCESSFUL") def req_processing(clientsocket, clientaddr): data = clientsocket.recv(1024) global count print "*"*37 print "REQUEST FROM CLIENT: " print data print "*"*37 rlist=shlex.split(data) if rlist[0] == 'REGISTER': REGISTER(data, clientsocket) elif rlist[0] == 'LISTALL': LIST_ALL(clientsocket) elif rlist[0] == 'LOOKUP': LOOKUP(clientsocket, rlist) elif rlist[0] == 'ADD': ADD(clientsocket, rlist, count, data) elif rlist[0] == 'EXIT': EXIT(rlist, count) elif rlist[0] == 'REMOVE': REMOVE(rlist, count) if __name__=="__main__": HOST=socket.gethostname() PORT=SERVER_PORT IP = socket.gethostbyname(HOST) print "SERVER NAME - HOST NAME: "+HOST+" PORT:"+str(PORT)+" IP:"+IP serversocket = socket.socket() serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversocket.bind((HOST,PORT)) serversocket.listen(5) print "SERVER ACTIVELY LISTENING FOR CONNECTIONS \n" while(1): clientsocket, clientaddr = serversocket.accept() serverThread = threading.Thread(target=req_processing, args=(clientsocket,clientaddr)) serverThread.start() serversocket.close()
[ "noreply@github.com" ]
Shreyas097.noreply@github.com
e72622090d163499f2589915a6bbfb4e5ab08d70
daf2da00f5c5000ab63697a16a2af9027bfdb521
/CLI/src/f4i_del_vthing_endpoint.py
9244f871f63e7e3f5aa5abc6ae27a1f5d61aa91e
[ "BSD-4-Clause" ]
permissive
fed4iot/VirIoT
b4b781ca419faaf50858118b1bc8c80cea6c011a
0301a908554f31748985b82b9f310ce9b9af1f9b
refs/heads/master
2023-01-29T06:41:27.379050
2023-01-25T08:56:51
2023-01-25T08:56:51
242,765,526
12
11
NOASSERTION
2021-12-07T18:53:03
2020-02-24T15:02:23
C
UTF-8
Python
false
false
1,664
py
#!/usr/bin/python3 import argparse import requests import json import os from pathlib import Path viriot_dir = str(Path.home()) + "/.viriot" token_file = viriot_dir + "/token" def get_token(): if not os.path.isfile(token_file): print("Token not found") return None with open(token_file, 'r') as file: data = json.load(file) token = data["access_token"] return token def printj(msg): print("\n") print(json.dumps(json.loads(msg), indent=4, sort_keys=True)) print("\n") def init_args(parser): parser.add_argument('-c', action='store', dest='controllerUrl', help='Controller url (default: http://127.0.0.1:8090)', default='http://127.0.0.1:8090') parser.add_argument('-v', action='store', dest='vThingID', help='vThingID (default: helloWorld/hello)', default='helloWorld/hello') parser.set_defaults(func=run) def run(args): url = args.controllerUrl + "/delVThingEndpoint" print("Setting vThing endpoint, please wait ....") msg={} msg['vThingID'] = args.vThingID payload = json.dumps(msg) printj(payload) token = get_token() if not token: return headers = { 'Authorization': "Bearer " + token, 'accept': "application/json", 'content-type': "application/json", 'cache-control': "no-cache", } response = requests.request("POST", url, data=payload, headers=headers) print(response.json().get('message') + "\n") if __name__ == '__main__': parser = argparse.ArgumentParser() init_args(parser) args = parser.parse_args() args.func(args)
[ "andreaD" ]
andreaD
52b6869373b921dfebae787ad8e70c1b548dc106
c7cebec6209866b02ee654cffeafe0f2cf0646f1
/samsung sw/level2/mode.py
3fe4aa5dac038c280907fab6877799aea5700ff4
[]
no_license
dondon17/algorithm
5492cf039a96ecf5a944816bdca9b5755e5a2623
da4d6ca1c21c31c6521a62b38855e0b9cf4b0d91
refs/heads/master
2023-05-02T14:54:35.185914
2021-05-30T07:31:40
2021-05-30T07:31:40
323,802,402
0
0
null
null
null
null
UTF-8
Python
false
false
426
py
import collections def solution(scores): data = collections.Counter(scores) data_list = dict(data) max_value = max(list(data.values())) mode_val = [num for num, freq in data_list.items() if freq == max_value] return max(mode_val) t = int(input()) for i in range(1, t+1): n = int(input()) scores = list(map(int, input().split())) answer = solution(scores) print("#{} {}".format(i, answer))
[ "qwerqw889@ajou.ac.kr" ]
qwerqw889@ajou.ac.kr
bec8e741e255e39fa4861665362abc28d22d5f28
5cc204e2ecb9a756127e7c71633a1edcdb3e989b
/Archive_sand/MOF_plus/ff_gen/ff_gen/objectives/energy_fit.py
59b2452633a8988fb134b6502d0a377336b29ac0
[]
no_license
hopefulp/sandbox
1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8
4d26767f287be6abc88dc74374003b04d509bebf
refs/heads/master
2023-06-27T17:50:16.637851
2023-06-15T03:53:39
2023-06-15T03:53:39
218,209,112
1
0
null
2022-09-13T13:22:34
2019-10-29T05:14:02
C++
UTF-8
Python
false
false
3,911
py
""" This file implements a force_fit class. It handles the reference force arrays and calculates the msd. In addition, a weight matrix is held to evaluate various kinds of weighted mean square deviations to be used as ingredients to fittness values """ import string import numpy as np import copy import os import refclass import matplotlib.pyplot as plt import pickle class energy_fit: """ class to compute the msd between the reference forces and the model forces. """ def __init__(self, tag, skip_bonded = False, ref_diff = 0.0, set_cell = False, verbose = False): self.tag = tag self.ref_diff = ref_diff self.skip_bonded = skip_bonded self.set_cell = set_cell self.verbose = verbose self.geomparams = None return def initialize(self, pd, ref): self.pd = pd self.ref = ref self.generate_reference() self.fact_energy = 1.0 #self.get_weights() self.cycle = 0 self.fdiagnostics = open('force_energy_%s.punch' % self.tag, 'w') return def set_geomparams(self, geomparams): self.geomparams = geomparams return def generate_reference(self): self.structures = copy.deepcopy(self.ref(info = 'structures', branch = 'forcematch', tag = self.tag)) self.geomvectors = copy.deepcopy(self.ref(info = 'vectors', branch = 'forcematch', tag = self.tag)) self.nstruc = np.shape(self.structures)[0] self.natoms = np.shape(self.structures)[1] self.energy = np.zeros((self.nstruc)) energies = copy.deepcopy(self.ref(info = 'energies', branch = 'forcematch', tag = self.tag)[:,0]) self.indmin = np.argsort(energies)[0] self.min_e = energies[self.indmin] # self.ref_energy = energies - self.min_e self.ref_energy = energies - self.ref_diff if self.set_cell == True: self.cells = - copy.deepcopy(self.ref(info = 'cells', branch = 'forcematch', tag = self.tag)) return def __call__(self): for i in range(self.nstruc): if self.set_cell == True: self.pd.set_cell(self.cells[i,:,:]) if self.geomparams != None: structure = self.structures[i,:,:] + (self.geomparams * \ self.geomvectors[i,:,:]) self.pd.set_xyz(structure) else: self.pd.set_xyz(self.structures[i,:,:]) self.pd.set_atoms_moved() if self.skip_bonded: self.pd.skip_bonded() e, f = self.pd.calc_energy_force() self.energy[i] = e #self.energy = self.energy-self.energy[self.indmin] self.calc_msd() self.cycle += 1 if self.verbose: self.print_energies() return self.msd, [self.a_msd] def calc_msd(self): assert np.shape(self.energy) == np.shape(self.ref_energy) delt = (self.energy - self.ref_energy)*self.fact_energy self.msd = (delt*delt).sum()/self.nstruc self.a_msd = [self.msd] self.fdiagnostics.write("%s %6.6f\n" % (self.cycle, self.msd)) return def print_energies(self, element = 100): lref = [] lfit = [] for i in xrange(self.nstruc): if i % 500 == 0: #lref.append(np.sum(self.ref_energy[i:i+500]+self.min_e)/self.nstruc) #lfit.append(np.sum(self.energy[i:i+500]+self.energy[self.indmin])/self.nstruc) lref.append(np.sum(self.ref_energy[i:i+500])/500.0) lfit.append(np.sum(self.energy[i:i+500])/500.0) print i, self.ref_energy[i], self.energy[i], self.ref_energy[i] -self.energy[i] pickle.dump(lref, open('ref.pickle', 'wb')) pickle.dump(lfit, open('fit.pickle', 'wb')) return
[ "hopefulp@gmail.com" ]
hopefulp@gmail.com
b2238d921ad8dbc4f7739b6e6a2c56d29ecb57bd
08eee2aa3767a94e14f2fb8c1ab8044d69d2ef37
/BacData/BacItem.py
be5152851d5298d2dae70fbacfb8669671d07734
[]
no_license
msorins/Bacalaureat2017-Crawler
fd74c40709cbf9da60dfbff58e14b0c7279c5279
af18b50b8b642bec81dcbfc05c2341add7d950ea
refs/heads/master
2020-12-02T17:58:42.357631
2017-07-07T10:03:12
2017-07-07T10:03:12
96,457,865
1
0
null
null
null
null
UTF-8
Python
false
false
1,319
py
import scrapy class BacItem(scrapy.Item): # define the fields for your item here like: nr = scrapy.Field() nume = scrapy.Field() posIerarhieJudet = scrapy.Field() posIerarhieTara = scrapy.Field() unitInvatamant = scrapy.Field() judet = scrapy.Field() promotieAnterioara = scrapy.Field() formaEducatie = scrapy.Field() specializare = scrapy.Field() examenOralRomana = scrapy.Field() notaScrisaRomana = scrapy.Field() notaContestatieRomana = scrapy.Field() notaFinalaRomana = scrapy.Field() limbaMaterna = scrapy.Field() limbaModerna = scrapy.Field() notaLimbaModerna = scrapy.Field() disciplinaObligatorie = scrapy.Field() disciplinaAlegere = scrapy.Field() competenteDigitale = scrapy.Field() medie = scrapy.Field() rezultatFinal = scrapy.Field() competenteMaterna = scrapy.Field() notaScrisaMaterna = scrapy.Field() notaContestatieMaterna = scrapy.Field() notaFinalaMaterna = scrapy.Field() notaDisciplinaObligatorie = scrapy.Field() notaContestatieDisciplinaObligatorie = scrapy.Field() notaFinalaDisciplinaObligatorie = scrapy.Field() notaDisciplinaAlegere = scrapy.Field() notaContestatieDisciplinaAlegere = scrapy.Field() notaFinalaDisciplinaAlegere = scrapy.Field() pass
[ "sorynsoo@gmail.com" ]
sorynsoo@gmail.com
28c2e084211d7511caea9b3e9b12e41899b03a51
3c48cb09c8925108284a5a074f2ce45614f974e1
/GroupClientAudio.py
76e90510aa302f76f6aefd2c427a4e5fd557858b
[]
no_license
HsimWong/PythonChat
fe0269f6a1cb252ea0cfe14f4245d076c50402bf
0dbe53959a69fb4a14240d60a0cc8532a98c9588
refs/heads/master
2020-06-12T03:15:29.709148
2019-07-01T00:07:33
2019-07-01T00:07:33
194,178,846
4
0
null
null
null
null
UTF-8
Python
false
false
1,215
py
from socket import socket, AF_INET, SOCK_STREAM from threading import Thread import pyaudio from array import array HOST = input("Enter Server IP\n") PORT = 4000 BufferSize = 4096 FORMAT=pyaudio.paInt16 CHANNELS=2 RATE=44100 CHUNK=1024 def SendAudio(): while True: data = stream.read(CHUNK) dataChunk = array('h', data) vol = max(dataChunk) if(vol > 500): print("Recording Sound...") else: print("Silence..") client.sendall(data) def RecieveAudio(): while True: data = recvall(BufferSize) stream.write(data) def recvall(size): databytes = b'' while len(databytes) != size: to_read = size - len(databytes) if to_read > (4 * CHUNK): databytes += client.recv(4 * CHUNK) else: databytes += client.recv(to_read) return databytes client = socket(family=AF_INET, type=SOCK_STREAM) client.connect((HOST, PORT)) audio=pyaudio.PyAudio() stream=audio.open(format=FORMAT,channels=CHANNELS, rate=RATE, input=True, output = True,frames_per_buffer=CHUNK) RecieveAudioThread = Thread(target=RecieveAudio).start() SendAudioThread = Thread(target=SendAudio).start()
[ "16074101@emails.bjut.edu.cn" ]
16074101@emails.bjut.edu.cn
7760f4ddb317f0829c74cbe78d8c30155f2a7ae2
2baf3b1d757ee94cbac55f15ca847a2eaf85ebb0
/code/batch_driver.py
b917168f9d867023213d7641c60331cb34b246ab
[]
no_license
gatech-cse6730/proj2-code
e96a932ebdec19aa5d6946ce4af3c0260deac780
a8453dd56107b142f4c313dc6dd26d7ced1b4f78
refs/heads/master
2021-01-01T04:35:38.666339
2016-05-02T18:55:20
2016-05-02T18:55:20
56,277,428
0
8
null
2016-05-02T13:59:19
2016-04-14T23:59:16
Python
UTF-8
Python
false
false
928
py
from driver import Driver import numpy as np class BatchDriver(object): """ Performs a batch of simulation runs. Only for use by analyzer.py. """ def __init__(self, num_sims=20): self.num_sims = num_sims def drive(self, initial_pop=50): random_seed = 0 results = [] print('-'*50) print('---> Initializing batch for initial_pop %d.', initial_pop) for i in xrange(self.num_sims): print('---> Initializing simulation run %d for initial pop %d.' % (i, initial_pop)) driver = Driver(vis=False) result = driver.drive(max_iterations=1500, random_seed=random_seed, initial_pop=initial_pop) results.append(result) random_seed += 2 return results if __name__ == '__main__': driver = BatchDriver() driver.drive()
[ "matthewbentonmay@gmail.com" ]
matthewbentonmay@gmail.com
7b5135db6ba20d4dd0d5ac8f2cb1c8c27a76fff4
dba3e87b75a782e2083d59957d79b4448854a8b4
/settings.py
d8eb8ebb7727327530297a1513d028b6773ebf14
[]
no_license
mountainhound/roan-trader
803f8eaccd6c950dcecdd14a18e45a25bfbb02e9
47ef5ef50e1a7fd84ffcc7c5372fb96e041c573d
refs/heads/master
2020-03-07T07:53:58.567099
2018-04-25T18:46:00
2018-04-25T18:46:00
127,361,676
0
0
null
null
null
null
UTF-8
Python
false
false
460
py
#GDAX Credentials GDAX_API_KEY = "CHANGE_ME" GDAX_PRIVATE_KEY = "CHANGE_ME" GDAX_PASSPHRASE = "CHANGE_ME" #BANDWIDTH API Credentials BANDWIDTH_USER = "CHANGE_ME" BANDWIDTH_TOKEN = "CHANGE_ME" BANDWIDTH_SECRET = "CHANGE_ME" ROOT_NUMBER = "CHANGE_ME" ORIGIN_NUMBER = "CHANGE_ME" #MAILGUN API Credentials MAILGUN_API_KEY = "CHANGE_ME" MAILGUN_API_URL = "CHANGE_ME" #COIN Settings COIN_LIST = ["LTC","ETH","BTC"] #APP Settings APP_URL = "http://localhost:5000"
[ "parker@fopark.com" ]
parker@fopark.com
0ccd2913d0c8ddb2a1b22ebbba253486884ded42
64e6d36fe639bdf37773a1d1a2ec5664538d325e
/sdk/python/tests/dsl/metric_utils_test.py
5898561f61e31b7da4c33a30e21d956c70ff804d
[ "Apache-2.0" ]
permissive
JohanWork/pipelines
5c4ba6bba30ace675df80761dd483612a79c3f29
f32915e88f501dd9256eb259288a00e0e6f9c9e0
refs/heads/master
2021-07-16T00:11:46.065197
2021-04-10T16:20:04
2021-04-10T16:20:04
244,719,809
1
0
Apache-2.0
2020-03-03T19:10:35
2020-03-03T19:10:35
null
UTF-8
Python
false
false
2,235
py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for kfp.dsl.metrics_utils.""" import os import unittest import yaml import json import jsonschema from google.protobuf import json_format from google.protobuf.struct_pb2 import Struct from kfp.dsl import metrics_utils from google.protobuf import json_format class MetricsUtilsTest(unittest.TestCase): def test_confusion_matrix(self): conf_matrix = metrics_utils.ConfusionMatrix() conf_matrix.set_categories(['dog', 'cat', 'horses']) conf_matrix.log_row('dog', [2, 6, 0]) conf_matrix.log_cell('cat', 'dog', 3) with open(os.path.join(os.path.dirname(__file__), 'test_data', 'expected_confusion_matrix.json')) as json_file: expected_json = json.load(json_file) self.assertEqual(expected_json, conf_matrix.get_metrics()) def test_bulkload_confusion_matrix(self): conf_matrix = metrics_utils.ConfusionMatrix() conf_matrix.load_matrix(['dog', 'cat', 'horses'], [ [2, 6, 0], [3, 5,6], [5,7,8]]) with open(os.path.join(os.path.dirname(__file__), 'test_data', 'expected_bulk_loaded_confusion_matrix.json')) as json_file: expected_json = json.load(json_file) self.assertEqual(expected_json, conf_matrix.get_metrics()) def test_confidence_metrics(self): confid_metrics = metrics_utils.ConfidenceMetrics() confid_metrics.confidenceThreshold = 24.3 confid_metrics.recall = 24.5 confid_metrics.falsePositiveRate = 98.4 expected_dict = { 'confidenceThreshold': 24.3, 'recall': 24.5, 'falsePositiveRate': 98.4 } self.assertEqual(expected_dict, confid_metrics.get_metrics()) if __name__ == '__main__': unittest.main()
[ "noreply@github.com" ]
JohanWork.noreply@github.com
43e5286bb463c6cdd9fb51b5ad5f5f618582a702
74b310d13d317cd29a5b5a8f22fcbeeb7cb0f07d
/gallery/ising-square-tc/plot.py
1c0142441ac43d25ecc9bd0532d5c10a52964ebb
[ "Apache-2.0" ]
permissive
todo-group/exact
1bb146ee9a616a36a0f2b4f4b2c0ce1db3e7c39e
ee76421fab9b2b1eaf77d6b01830a18e66f7180a
refs/heads/master
2021-12-15T15:16:54.043117
2021-12-14T11:27:41
2021-12-14T11:27:41
60,764,380
3
0
null
null
null
null
UTF-8
Python
false
false
945
py
import math import matplotlib.pyplot as plt filename = "result-p15.dat" with open(filename, 'r') as f: for line in f: data = line.split() if (data[0] == "inf"): free_energy_inf = float(data[6]) energy_inf = float(data[7]) L = [] free_energy = [] energy = [] with open(filename, 'r') as f: for line in f: data = line.split() if (data[0] != '#' and data[0] != "inf"): L.append(int(data[0])) free_energy.append(math.fabs(float(data[6])-free_energy_inf)) energy.append(math.fabs(float(data[7])-energy_inf)) plt.plot(L, free_energy, marker = 'o', label = 'finite-size error of free energy density') plt.plot(L, energy, marker = 'v', label = 'finite-size error of energy density') plt.xlabel('L') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.savefig('plot.pdf') plt.show() print(L) print(free_energy) print(energy)
[ "wistaria@phys.s.u-tokyo.ac.jp" ]
wistaria@phys.s.u-tokyo.ac.jp
f63531c28c4ac3e571b51097354a358deee7dda5
baa39ba9fce126d012b3161231890eaf193a7d4c
/code/projects/word_count/main.py
ef24d67bededa99c3e480b89265d595c408a932d
[]
no_license
pmayd/python-complete
fa0e3e7ad35ce7b8fba9effa7868d206e8901ff6
6c5d35cb88452898e6798ab9914139151228318b
refs/heads/main
2021-12-23T06:22:31.366878
2021-11-29T07:25:26
2021-11-29T07:25:26
233,419,063
1
0
null
null
null
null
UTF-8
Python
false
false
596
py
#!/usr/bin/env python3 """ Reads a file and returns the number of lines, words, and characters - similar to the UNIX wc utility """ from pathlib import Path def word_count(): """ To give you a better sense of how a Python program works, this section looks at a small sample that roughly replicates the UNIX wc utility and reports the number of lines, words, and characters in a file. The sample in this listing is deliberately written to be clear to programmers who are new to Python and to be as simple as possible. """ pass if __name__ == "__main__": word_count()
[ "michael.aydinbas@gmail.com" ]
michael.aydinbas@gmail.com
ab3c213726a9356b82943980533101b0cf5a99d5
cdb05e62e14623f18f84797c4b6780a9d7032478
/perfkitbenchmarker/linux_benchmarks/messaging_service_benchmark.py
1da35bc6a27cbc1ceeb88f397306207334ae391a
[ "BSD-3-Clause", "MIT", "Classpath-exception-2.0", "AGPL-3.0-only", "GPL-2.0-only", "Apache-2.0", "LicenseRef-scancode-public-domain" ]
permissive
pdeyhim/PerfKitBenchmarker
eece17148862f8c902d7404f9740abefb305d43c
0e1f6caaceb771437e0e8d8d7724b41be040df0d
refs/heads/master
2023-07-20T06:13:42.021733
2023-07-19T03:47:04
2023-07-19T03:47:04
67,570,369
0
0
Apache-2.0
2023-07-19T03:47:05
2016-09-07T03:56:58
Python
UTF-8
Python
false
false
5,514
py
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Messaging Service benchmark. This benchmark runs in a client VM and benchmarks messaging services from different cloud providers. It measures latency to publish/pull messages from the client VM. This benchmark first send a command to the client VM to publish messages. When that completes it send commands to pull the messages. Measuring latency of single message publish/pull in each scenario: - publish: it publishes N messages of size X (N and X can be specified with number_of_messages, and message_size FLAGS respectively). It measures the latency between each call to publish the message and the message being successfully published. - pull: It pulls N messages 1 by 1. It measures the latency of: - A call to pull the message and the message being received. - A call to pull the message and the message being received and acknowledged. """ from typing import Any, Dict, List from absl import flags from perfkitbenchmarker import benchmark_spec as bm_spec from perfkitbenchmarker import configs from perfkitbenchmarker import sample BENCHMARK_NAME = 'messaging_service' BENCHMARK_CONFIG = """ messaging_service: description: messaging_service benchmark vm_groups: default: os_type: debian10 vm_spec: AWS: machine_type: m5.2xlarge zone: us-east-1a Azure: machine_type: Standard_D8d_v4 zone: eastus GCP: machine_type: n2-standard-8 zone: us-central1-a messaging_service: delivery: pull """ SINGLE_OP = 'single_op' END_TO_END = 'end_to_end' MEASUREMENT_CHOICES = [SINGLE_OP, END_TO_END] FLAGS = flags.FLAGS _MEASUREMENT = flags.DEFINE_enum( 'messaging_service_measurement', 'single_op', MEASUREMENT_CHOICES, help='Way to measure latency.') _NUMBER_OF_MESSAGES = flags.DEFINE_integer( 'messaging_service_number_of_messages', 100, help='Number of messages to use on benchmark.') _MESSAGE_SIZE = flags.DEFINE_integer( 'messaging_service_message_size', 10, help='Number of characters to have in a message. ' "Ex: 1: 'A', 2: 'AA', ...") def GetConfig(user_config: Dict[Any, Any]) -> Dict[Any, Any]: return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def _CreateSamples(results: Dict[str, Any], number_of_messages: int, message_size: int, cloud: str) -> List[sample.Sample]: """Handles sample creation from benchmark_scenario results.""" samples = [] common_metadata = { 'number_of_messages': number_of_messages, 'message_size': message_size, 'cloud': cloud } for metric_name in results: metric_value = results[metric_name]['value'] metric_unit = results[metric_name]['unit'] metric_metadata = results[metric_name]['metadata'] metric_metadata.update(common_metadata) # aggregated metrics, such as: mean, p50, p99... samples.append( sample.Sample(metric_name, metric_value, metric_unit, metric_metadata)) return samples def Prepare(benchmark_spec: bm_spec.BenchmarkSpec): """Prepares the client VM. Runs the prepare function from get_instance. It prepares the cloud environment with resource creation (for GCP Cloud Pub/Sub it creates topic and subscription) and prepares the client VM with packages and files needed to run the benchmark. Args: benchmark_spec: The benchmark specification. """ benchmark_spec.messaging_service.PrepareClientVm() def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]: """Measure the latency to publish, pull, or publish and pull messages. Runs the run function from get_instance. It runs the benchmark specified with the flag: 'messaging_service_benchmark' from the client VM. Args: benchmark_spec: The benchmark specification. Returns: List of samples. Produced when running the benchmark from client VM (on 'messaging_service.Run()' call). """ service = benchmark_spec.messaging_service if _MEASUREMENT.value == SINGLE_OP: publish_results = service.Run(service.PUBLISH_LATENCY, int(_NUMBER_OF_MESSAGES.value), int(_MESSAGE_SIZE.value)) pull_results = service.Run(service.PULL_LATENCY, int(_NUMBER_OF_MESSAGES.value), int(_MESSAGE_SIZE.value)) publish_results.update(pull_results) results = publish_results elif _MEASUREMENT.value == END_TO_END: results = service.Run(service.END_TO_END_LATENCY, int(_NUMBER_OF_MESSAGES.value), int(_MESSAGE_SIZE.value)) # Creating samples from results samples = _CreateSamples(results, int(_NUMBER_OF_MESSAGES.value), int(_MESSAGE_SIZE.value), FLAGS.cloud) return samples def Cleanup(_: bm_spec.BenchmarkSpec): pass
[ "copybara-worker@google.com" ]
copybara-worker@google.com
0bf0a1ab2006bc6c08a47a99e365f27567f909c5
24057bbc039c4cd39000c09b5d8954dc080d90a8
/Clase 2/Ejemplos/02 - funciones/02 - argumentos/b_args_con_clave.py
1fd88e08f39777b41b4d2e82ee63e2c98edf4772
[ "MIT" ]
permissive
Python-Academy-Argentina/Fundamentals
9c2e551a38729bd58f54f0b0071fe4f704b6ed19
000b7c75269b99dfe3abfe01062bf17115bab2c3
refs/heads/master
2023-06-08T04:55:34.884583
2021-06-21T15:26:46
2021-06-21T15:26:46
364,298,252
11
4
MIT
2021-06-08T20:27:32
2021-05-04T15:15:57
Python
UTF-8
Python
false
false
1,764
py
# b_args_con_clave.py # muestra ejemplos de funciones con argumentos con clave (keyword arguments) # en la función <operaciones> de a_args_posicionales.py vemos que se aceptan # dos argumentos: <a> y <b> # estos argumentos tienen nombre (clave), aunque aceptan ser tratados posicionales # en la medida en la que no se los referencie por su nombre: def operaciones(a, b): """ Devuelve el resultado de resolver distintas operaciones aritméticas entre <a> y <b>. """ print(f'\nEl orden no importa para las sumas:') print(f'{a} + {b} es {a + b} y {b} + {a} también es {b + a}') print(f'\nNi tampoco para las multiplicaciones:') print(f'{a} * {b} es {a * b} y {b} * {a} también es {b * a}') print('\nSin embargo, restas y divisiones ya no son tan simples...') print(f'{a} - {b} es {a - b}, pero {b} - {a} es {b - a}') print(f'{a} / {b} es {a / b}, pero {b} / {a} es {b / a}') print('\nPasando a = 5 y b = 2 con sus respectivos nombres:') operaciones(b=2, a=5) # doy vuelta los valores, pero el resultado es el mismo print('\nPasando a=7 y b=1 con sus respectivos nombres:') operaciones(b=1, a=7) # si quisiera prohibir el uso de claves para ciertos argumentos, Python nos permite # delimitarlos pasando / como si fuera una variable: print('\n\nAhora probamos pasar argumentos posicionales con clave:') def suma_posicionales(a, b, /): # después de / puedo seguir agregando argumentos con clave """ Esta no admite hacer algo como 'suma_posicionales(a=1, b=0)'. """ print(f'\n{a} + {b} es {a + b}') print('\nPasando <a> y <b> sin clave:') suma_posicionales(5, 2) # esto no falla print('\nPasando a=5 y b=2 con clave:') suma_posicionales(a=5, b=2) # esto resulta en TypeError print()
[ "67435760+xthehatterx@users.noreply.github.com" ]
67435760+xthehatterx@users.noreply.github.com
455626e28d9feee7af502b9f88ffba687191d61e
637f82f7803b639178c1a961abcf357039ddc0ef
/src/azure-cli-core/azure/cli/core/auth/identity.py
729f01a64f559e99d4cd9bb8a131ccd11c22f8b7
[ "MIT", "BSD-3-Clause", "LGPL-2.0-or-later", "GPL-1.0-or-later", "MPL-2.0", "Apache-2.0", "BSD-2-Clause", "LGPL-2.1-only", "LGPL-2.1-or-later" ]
permissive
vadeveka/azure-cli
44a6986b9a4c1765a2c305a4edf07152f55012f9
abb232d7105118500fa0fb1255e5183132806725
refs/heads/dev
2022-07-06T15:48:48.766428
2021-12-17T09:05:52
2021-12-17T09:05:52
197,075,859
0
0
MIT
2022-06-27T12:55:19
2019-07-15T21:37:08
Python
UTF-8
Python
false
false
15,362
py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import json import os import re from azure.cli.core._environment import get_config_dir from msal import PublicClientApplication from knack.log import get_logger from knack.util import CLIError # Service principal entry properties from .msal_authentication import _CLIENT_ID, _TENANT, _CLIENT_SECRET, _CERTIFICATE, _CLIENT_ASSERTION, \ _USE_CERT_SN_ISSUER from .msal_authentication import UserCredential, ServicePrincipalCredential from .persistence import load_persisted_token_cache, file_extensions, load_secret_store from .util import check_result AZURE_CLI_CLIENT_ID = '04b07795-8ddb-461a-bbee-02f9e1bf7b46' logger = get_logger(__name__) class Identity: # pylint: disable=too-many-instance-attributes """Class to manage identities: - user - service principal - TODO: managed identity """ # MSAL token cache. # It follows singleton pattern so that all MSAL app instances share the same token cache. _msal_token_cache = None # MSAL HTTP cache for MSAL's tenant discovery, retry-after error cache, etc. # It *must* follow singleton pattern so that all MSAL app instances share the same HTTP cache. # https://github.com/AzureAD/microsoft-authentication-library-for-python/pull/407 _msal_http_cache = None # Instance of ServicePrincipalStore. # It follows singleton pattern so that _secret_file is read only once. _service_principal_store_instance = None def __init__(self, authority, tenant_id=None, client_id=None, encrypt=False): """ :param authority: Authentication authority endpoint. For example, - AAD: https://login.microsoftonline.com - ADFS: https://adfs.redmond.azurestack.corp.microsoft.com/adfs :param tenant_id: Tenant GUID, like 00000000-0000-0000-0000-000000000000. If unspecified, default to 'organizations'. :param client_id: Client ID of the CLI application. :param encrypt: Whether to encrypt MSAL token cache and service principal entries. """ self.authority = authority self.tenant_id = tenant_id self.client_id = client_id or AZURE_CLI_CLIENT_ID self.encrypt = encrypt # Build the authority in MSAL style self._msal_authority, self._is_adfs = _get_authority_url(authority, tenant_id) config_dir = get_config_dir() self._token_cache_file = os.path.join(config_dir, "msal_token_cache") self._secret_file = os.path.join(config_dir, "service_principal_entries") self._http_cache_file = os.path.join(config_dir, "msal_http_cache.bin") # We make _msal_app_instance an instance attribute, instead of a class attribute, # because MSAL apps can have different tenant IDs. self._msal_app_instance = None @property def _msal_app_kwargs(self): """kwargs for creating UserCredential or ServicePrincipalCredential. MSAL token cache and HTTP cache are lazily created. """ if not Identity._msal_token_cache: Identity._msal_token_cache = self._load_msal_token_cache() if not Identity._msal_http_cache: Identity._msal_http_cache = self._load_msal_http_cache() return { "authority": self._msal_authority, "token_cache": Identity._msal_token_cache, "http_cache": Identity._msal_http_cache } @property def _msal_app(self): """A PublicClientApplication instance for user login/logout. The instance is lazily created. """ if not self._msal_app_instance: self._msal_app_instance = PublicClientApplication(self.client_id, **self._msal_app_kwargs) return self._msal_app_instance def _load_msal_token_cache(self): # Store for user token persistence cache = load_persisted_token_cache(self._token_cache_file, self.encrypt) return cache def _load_msal_http_cache(self): import atexit import pickle logger.debug("_load_msal_http_cache: %s", self._http_cache_file) try: with open(self._http_cache_file, 'rb') as f: persisted_http_cache = pickle.load(f) except (pickle.UnpicklingError, FileNotFoundError) as ex: logger.debug("Failed to load MSAL HTTP cache: %s", ex) persisted_http_cache = {} # Ignore a non-exist or corrupted http_cache atexit.register(lambda: pickle.dump( # When exit, flush it back to the file. # If 2 processes write at the same time, the cache will be corrupted, # but that is fine. Subsequent runs would reach eventual consistency. persisted_http_cache, open(self._http_cache_file, 'wb'))) return persisted_http_cache @property def _service_principal_store(self): """A ServicePrincipalStore instance for service principal entries persistence. The instance is lazily created. """ if not Identity._service_principal_store_instance: store = load_secret_store(self._secret_file, self.encrypt) Identity._service_principal_store_instance = ServicePrincipalStore(store) return Identity._service_principal_store_instance def login_with_auth_code(self, scopes, **kwargs): # Emit a warning to inform that a browser is opened. # Only show the path part of the URL and hide the query string. logger.warning("The default web browser has been opened at %s. Please continue the login in the web browser. " "If no web browser is available or if the web browser fails to open, use device code flow " "with `az login --use-device-code`.", self._msal_app.authority.authorization_endpoint) from .util import read_response_templates success_template, error_template = read_response_templates() # For AAD, use port 0 to let the system choose arbitrary unused ephemeral port to avoid port collision # on port 8400 from the old design. However, ADFS only allows port 8400. result = self._msal_app.acquire_token_interactive( scopes, prompt='select_account', port=8400 if self._is_adfs else None, success_template=success_template, error_template=error_template, **kwargs) return check_result(result) def login_with_device_code(self, scopes, **kwargs): flow = self._msal_app.initiate_device_flow(scopes, **kwargs) if "user_code" not in flow: raise ValueError( "Fail to create device flow. Err: %s" % json.dumps(flow, indent=4)) logger.warning(flow["message"]) result = self._msal_app.acquire_token_by_device_flow(flow, **kwargs) # By default it will block return check_result(result) def login_with_username_password(self, username, password, scopes, **kwargs): result = self._msal_app.acquire_token_by_username_password(username, password, scopes, **kwargs) return check_result(result) def login_with_service_principal(self, client_id, credential, scopes): """ `credential` is a dict returned by ServicePrincipalAuth.build_credential """ sp_auth = ServicePrincipalAuth.build_from_credential(self.tenant_id, client_id, credential) # This cred means SDK credential object cred = ServicePrincipalCredential(sp_auth, **self._msal_app_kwargs) result = cred.acquire_token_for_client(scopes) check_result(result) # Only persist the service principal after a successful login entry = sp_auth.get_entry_to_persist() self._service_principal_store.save_entry(entry) def login_with_managed_identity(self, scopes, identity_id=None): # pylint: disable=too-many-statements raise NotImplementedError def login_in_cloud_shell(self, scopes): raise NotImplementedError def logout_user(self, user): accounts = self._msal_app.get_accounts(user) for account in accounts: self._msal_app.remove_account(account) def logout_all_users(self): for e in file_extensions.values(): _try_remove(self._token_cache_file + e) def logout_service_principal(self, sp): # remove service principal secrets self._service_principal_store.remove_entry(sp) def logout_all_service_principal(self): # remove service principal secrets for e in file_extensions.values(): _try_remove(self._secret_file + e) def get_user(self, user=None): accounts = self._msal_app.get_accounts(user) if user else self._msal_app.get_accounts() return accounts def get_user_credential(self, username): return UserCredential(self.client_id, username, **self._msal_app_kwargs) def get_service_principal_credential(self, client_id): entry = self._service_principal_store.load_entry(client_id, self.tenant_id) sp_auth = ServicePrincipalAuth(entry) return ServicePrincipalCredential(sp_auth, **self._msal_app_kwargs) def get_service_principal_entry(self, client_id): """This method is only used by --sdk-auth. DO NOT use it elsewhere.""" return self._service_principal_store.load_entry(client_id, self.tenant_id) def get_managed_identity_credential(self, client_id=None): raise NotImplementedError class ServicePrincipalAuth: def __init__(self, entry): self.__dict__.update(entry) if _CERTIFICATE in entry: from OpenSSL.crypto import load_certificate, FILETYPE_PEM, Error self.public_certificate = None try: with open(self.certificate, 'r') as file_reader: self.certificate_string = file_reader.read() cert = load_certificate(FILETYPE_PEM, self.certificate_string) self.thumbprint = cert.digest("sha1").decode().replace(':', '') if entry.get(_USE_CERT_SN_ISSUER): # low-tech but safe parsing based on # https://github.com/libressl-portable/openbsd/blob/master/src/lib/libcrypto/pem/pem.h match = re.search(r'-----BEGIN CERTIFICATE-----(?P<cert_value>[^-]+)-----END CERTIFICATE-----', self.certificate_string, re.I) self.public_certificate = match.group() except (UnicodeDecodeError, Error) as ex: raise CLIError('Invalid certificate, please use a valid PEM file. Error detail: {}'.format(ex)) @classmethod def build_from_credential(cls, tenant_id, client_id, credential): entry = { _TENANT: tenant_id, _CLIENT_ID: client_id } entry.update(credential) return ServicePrincipalAuth(entry) @classmethod def build_credential(cls, secret_or_certificate=None, client_assertion=None, use_cert_sn_issuer=None): """Build credential from user input. The credential looks like below, but only one key can exist. { 'client_secret': 'my_secret', 'certificate': '/path/to/cert.pem', 'client_assertion': 'my_federated_token' } """ entry = {} if secret_or_certificate: if os.path.isfile(secret_or_certificate): entry[_CERTIFICATE] = secret_or_certificate if use_cert_sn_issuer: entry[_USE_CERT_SN_ISSUER] = use_cert_sn_issuer else: entry[_CLIENT_SECRET] = secret_or_certificate elif client_assertion: entry[_CLIENT_ASSERTION] = client_assertion return entry def get_entry_to_persist(self): persisted_keys = [_CLIENT_ID, _TENANT, _CLIENT_SECRET, _CERTIFICATE, _USE_CERT_SN_ISSUER, _CLIENT_ASSERTION] return {k: v for k, v in self.__dict__.items() if k in persisted_keys} class ServicePrincipalStore: """Save secrets in MSAL custom secret store for Service Principal authentication. """ def __init__(self, secret_store): self._secret_store = secret_store self._entries = [] def load_entry(self, sp_id, tenant): self._load_persistence() matched = [x for x in self._entries if sp_id == x[_CLIENT_ID]] if not matched: raise CLIError("Could not retrieve credential from local cache for service principal {}. " "Run `az login` for this service principal." .format(sp_id)) matched_with_tenant = [x for x in matched if tenant == x[_TENANT]] if matched_with_tenant: cred = matched_with_tenant[0] else: logger.warning("Could not retrieve credential from local cache for service principal %s under tenant %s. " "Trying credential under tenant %s, assuming that is an app credential.", sp_id, tenant, matched[0][_TENANT]) cred = matched[0] return cred def save_entry(self, sp_entry): self._load_persistence() self._entries = [ x for x in self._entries if not (sp_entry[_CLIENT_ID] == x[_CLIENT_ID] and sp_entry[_TENANT] == x[_TENANT])] self._entries.append(sp_entry) self._save_persistence() def remove_entry(self, sp_id): self._load_persistence() state_changed = False # clear service principal creds matched = [x for x in self._entries if x[_CLIENT_ID] == sp_id] if matched: state_changed = True self._entries = [x for x in self._entries if x not in matched] if state_changed: self._save_persistence() def _save_persistence(self): self._secret_store.save(self._entries) def _load_persistence(self): self._entries = self._secret_store.load() def _get_authority_url(authority_endpoint, tenant): """Convert authority endpoint (active_directory) to MSAL authority: - AAD: https://login.microsoftonline.com/your_tenant - ADFS: https://adfs.redmond.azurestack.corp.microsoft.com/adfs For ADFS, tenant is discarded. """ # Some Azure Stack (bellevue)'s metadata returns # "loginEndpoint": "https://login.microsoftonline.com/" # Normalize it by removing the trailing /, so that authority_url won't become # "https://login.microsoftonline.com//tenant_id". authority_endpoint = authority_endpoint.rstrip('/').lower() is_adfs = authority_endpoint.endswith('adfs') if is_adfs: authority_url = authority_endpoint else: authority_url = '{}/{}'.format(authority_endpoint, tenant or "organizations") return authority_url, is_adfs def _try_remove(path): try: os.remove(path) except FileNotFoundError: pass
[ "noreply@github.com" ]
vadeveka.noreply@github.com
5bd57c08061cf6dac76186a8312d874462cbc7c0
5d8585f6518fa96dc7e60ba8c6d5cecac010433d
/qsbk/venv/bin/tkconch
798588e3ec42e70107afd355071f5b90666d335e
[]
no_license
Fessible/PythonLearning
25218cdd38f708f62e7e50990015713ca88541ec
8a07610a8548ba8b006da61e51f5fcdac6788c21
refs/heads/master
2022-12-10T03:27:47.177447
2019-07-26T07:44:47
2019-07-26T07:44:47
191,890,736
0
0
null
2022-12-08T05:55:28
2019-06-14T06:48:35
Python
UTF-8
Python
false
false
422
#!/Users/rhm/github/PythonLearning/qsbk/venv/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.2.1','console_scripts','tkconch' __requires__ = 'Twisted==19.2.1' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('Twisted==19.2.1', 'console_scripts', 'tkconch')() )
[ "499953902@qq.com" ]
499953902@qq.com
1de2aad3e1648f8a0ece58926c2bb7625469dbd9
b588428c3e115cc638808762b058b633a9495ca8
/ModelPlotter/migrations/0006_xmlfile.py
5f4da49c9159814e145a20d7b7f028583bf19e2b
[ "MIT" ]
permissive
gw-vis/VISapps
a5a8b3f20bd7256bc21ee254cee475d53aa8597b
42978cc7747b05fbcd3325b15ded4be08e9da45e
refs/heads/main
2023-03-05T05:58:58.452626
2021-02-20T05:35:08
2021-02-20T05:35:08
340,561,510
0
0
null
null
null
null
UTF-8
Python
false
false
572
py
# Generated by Django 2.0.13 on 2019-07-31 06:30 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ModelPlotter', '0005_optics'), ] operations = [ migrations.CreateModel( name='XMLfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('filename', models.CharField(max_length=50)), ('t_file', models.FileField(upload_to='')), ], ), ]
[ "miyo@icrr.u-tokyo.ac.jp" ]
miyo@icrr.u-tokyo.ac.jp
0bbcb49bcd2d54187bbc70f78e4838c2bfc15499
768faa9e30b7879e68f1c4f45ad305c45909fe48
/views.py
309bc10c5396c9bb2e537225c0982b2803701edf
[]
no_license
jptree/bond-project
9d539b4d595d763741932fa6aa995f19d839a513
6cab9789b6667250104a39888ff26932cc423b48
refs/heads/master
2023-07-01T18:19:09.151985
2021-08-11T03:33:14
2021-08-11T03:33:14
393,229,834
1
0
null
null
null
null
UTF-8
Python
false
false
12,318
py
def intro(): import streamlit as st st.sidebar.success("Select a view above.") st.markdown( """ This application was developed by Jason F. Petri. There are various views to select from--all of which help in the analysis of the securities provided. *August 10, 2021* """ ) def pnl_cusip_attribution(): import streamlit as st import numpy as np import pandas as pd from datetime import timedelta from main import cumulative_pnl, cumulative_change_window_gains_losses, cumulative_winners_losers_new df = pd.read_csv('ctap_analytics_pnl_decomp_sample_day.csv') df['timestamp'] = pd.to_datetime(df['timestamp']) # df['cumulative_pnl'] = cumulative_pnl(df).reset_index() cumulative = df.groupby(['cusip', 'timestamp'])['spread_pnl'].sum().groupby(level=0).cumsum().reset_index() cumulative.columns = ['cusip', 'timestamp', 'cumulative_pnl'] df = pd.merge(df, cumulative, on=['timestamp', 'cusip']) # Add time frequency? time_window = st.slider( "Select chart time window range:", value=(df['timestamp'].min().to_pydatetime(), df['timestamp'].max().to_pydatetime()), min_value=df['timestamp'].min().to_pydatetime(), max_value=df['timestamp'].max().to_pydatetime(), step=timedelta(0, 60 * 5), format='D-M-YYYY H:m' ) start_time = time_window[0] end_time = time_window[1] data = df[(df['timestamp'] >= start_time) & (df['timestamp'] <= end_time)] chart = st.line_chart(cumulative_pnl(data)) n_cusip_input = st.text_input( label='Enter number of CUSIPS desired in table below', value='5' ) try: n_cusip = int(n_cusip_input) except ValueError: st.error('Please enter a valid integer') n_cusip = 5 winners, losers = cumulative_winners_losers_new(data, n_cusip) winners = winners.reset_index() winners.index = np.arange(1, len(winners) + 1) winners.columns = ['CUSIP', 'Profits'] losers = losers.reset_index() losers.index = np.arange(1, len(losers) + 1) losers.columns = ['CUSIP', 'Losses'] st.header('Total P&L from Beginning to End of Period') col1, spacing, col2 = st.columns([5, 1, 5]) with col1: st.table(winners) st.text('List of all CUSIPS') st.text(' '.join(list(winners['CUSIP']))) with col2: st.table(losers) st.text('List of all CUSIPS') st.text(' '.join(list(losers['CUSIP']))) def individual_cusip_analysis(): import streamlit as st import pandas as pd from datetime import timedelta import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('ctap_analytics_pnl_decomp_sample_day.csv') df['timestamp'] = pd.to_datetime(df['timestamp']) cumulative = df.groupby(['cusip', 'timestamp'])['spread_pnl'].sum().groupby(level=0).cumsum().reset_index() cumulative.columns = ['cusip', 'timestamp', 'cumulative_pnl'] df = pd.merge(df, cumulative, on=['timestamp', 'cusip']) desired_cusips_input = st.text_input( label='Enter desired CUSIPS separated by a space', value='928563AJ4 285512AE9 254709AM0 760759AU4 92826CAP7' ) desired_cusips = desired_cusips_input.split(' ') time_window = st.slider( "Select chart time window range:", value=(df['timestamp'].min().to_pydatetime(), df['timestamp'].max().to_pydatetime()), min_value=df['timestamp'].min().to_pydatetime(), max_value=df['timestamp'].max().to_pydatetime(), step=timedelta(0, 60 * 5), format='D-M-YYYY H:m' ) start_time = time_window[0] end_time = time_window[1] data = df[(df['timestamp'] >= start_time) & (df['timestamp'] <= end_time)] data = data[data['cusip'].isin(desired_cusips)] other_cusip_info = data.drop_duplicates('cusip')[['cusip', 'type', 'benchmark_cusip', 'ticker', 'securitydes', 'industrySector', 'mat_bucket', 'liq_score', 'liq_bucket', 'weightedage']].set_index('cusip') cusip_data = data.groupby(['cusip', 'timestamp'])['cumulative_pnl'].sum() normalize = st.checkbox(label='Normalize data') plot_data = {} for c in desired_cusips: if normalize: plot_data[c] = cusip_data[c] - cusip_data[c][0] else: plot_data[c] = cusip_data[c] chart = st.line_chart(plot_data) st.header('Characteristics of desired cusips') st.table(other_cusip_info) st.header('P&L Change Distribution') selected_cusip = st.selectbox(label='Select one of your CUSIPs', options=desired_cusips) number_of_bins = st.slider(label='Select the number of bins', min_value=10, max_value=100, value=40) selected_cusip_data = data[data['cusip'] == selected_cusip]['spread_pnl'] fig, ax = plt.subplots() ax.hist(selected_cusip_data, number_of_bins, density=False) ax.set_xlabel('P&L') ax.set_ylabel('Frequency') ax.set_title(f'Frequency of P&L for {selected_cusip}, {number_of_bins} Bins') st.pyplot(fig) def bucket_industry(): import streamlit as st import pandas as pd from datetime import timedelta from numpy import cumsum from main import cumulative_pnl, cumulative_change_window_gains_losses, cumulative_winners_losers_new # def select_all(column, key, all, label): # if all: # return st.multiselect( # label=label, # options=df[column].unique(), # default=df[column].unique(), # key=key # ) # else: # return st.multiselect( # label=label, # options=df[column].unique() # ) def filter(column, selected, data): return data[data[column].isin(selected)] df = pd.read_csv('ctap_analytics_pnl_decomp_sample_day.csv') df['timestamp'] = pd.to_datetime(df['timestamp']) cumulative = df.groupby(['cusip', 'timestamp'])['spread_pnl'].sum().groupby(level=0).cumsum().reset_index() cumulative.columns = ['cusip', 'timestamp', 'cumulative_pnl'] df = pd.merge(df, cumulative, on=['timestamp', 'cusip']) data = df.copy() st.header('Filter universe of securities below') industry_filter = st.multiselect(label='Select industries', options=df['industrySector'].unique(), default=df['industrySector'].unique()) # industry_all = st.checkbox("Select all", key='industry') # industry_filter = select_all('industrySector', 'industryFilter', industry_all, 'Select industries') if not industry_filter: st.error('Select at least one industry!') else: data = filter('industrySector', industry_filter, data) benchmark_filter = st.multiselect(label='Select benchmark CUSIPs', options=df['benchmark_cusip'].unique(), default=df['benchmark_cusip'].unique()) # benchmark_all = st.checkbox("Select all", key='benchmark') # benchmark_filter = select_all('benchmark_cusip', 'benchmarkFilter', benchmark_all, 'Select benchmark CUSIPs') if not benchmark_filter: st.error('Select at least one benchmark CUSIPs!') else: data = filter('benchmark_cusip', benchmark_filter, data) maturity_filter = st.multiselect(label='Select maturity buckets', options=df['mat_bucket'].unique(), default=df['mat_bucket'].unique()) # maturity_all = st.checkbox("Select all", key='maturity') # maturity_filter = select_all('mat_bucket', 'maturityFilter', maturity_all, 'Select maturity buckets') if not maturity_filter: st.error('Select at least one maturity bucket!') else: data = filter('mat_bucket', maturity_filter, data) liquidity_filter = st.multiselect(label='Select liquidity buckets', options=df['liq_bucket'].unique(), default=df['liq_bucket'].unique()) # liquidity_all = st.checkbox("Select all", key='liquidity') # liquidity_filter = select_all('liq_bucket', 'liquidityFilter', liquidity_all, 'Select liquidity buckets') if not liquidity_filter: st.error('Select at least one liquidity bucket!') else: data = filter('liq_bucket', liquidity_filter, data) # st.table(data) st.header('Plot of P&L performance') time_window = st.slider( "Select chart time window range:", value=(df['timestamp'].min().to_pydatetime(), df['timestamp'].max().to_pydatetime()), min_value=df['timestamp'].min().to_pydatetime(), max_value=df['timestamp'].max().to_pydatetime(), step=timedelta(0, 60 * 5), format='D-M-YYYY H:m' ) start_time = time_window[0] end_time = time_window[1] # data = df[(df['timestamp'] >= start_time) & (df['timestamp'] <= end_time)] sorting_options = ['industrySector', 'benchmark_cusip', 'mat_bucket', 'liq_bucket'] selected_sort = st.selectbox(label='Select the characteristic you want to plot on', options=sorting_options) normalize = st.checkbox(label='Normalize data') plot_data = cumsum(data.groupby([selected_sort, 'timestamp'])['spread_pnl'].sum().unstack().T.fillna(0)) plot_data = plot_data[(plot_data.index >= start_time) & (plot_data.index <= end_time)] if len(plot_data) != 0: plot = {} for s in data[selected_sort].unique(): if normalize: plot[s] = plot_data[s] - plot_data[s][0] else: plot[s] = plot_data[s] chart = st.line_chart(plot) else: st.error('No data available with the current combination of filters!') # change_pnl = data.groupby([selected_sort, 'timestamp'])['cumulative_pnl'].sum().reset_index() # change_pnl = change_pnl.groupby([selected_sort])['cumulative_pnl'].last() - change_pnl.groupby([selected_sort])['cumulative_pnl'].first() # change_pnl = change_pnl.reset_index() # change_pnl.columns = [selected_sort, 'Cumulative P&L Difference'] # change_pnl = change_pnl.sort_values(by='Cumulative P&L Difference').reset_index()[[selected_sort, 'Cumulative P&L Difference']] # # st.table([change_pnl[change_pnl]) # st.table(change_pnl) change_pnl = plot_data.iloc[-1] - plot_data.iloc[0] change_pnl = change_pnl.sort_values().reset_index() change_pnl.columns = [selected_sort, 'Cumulative P&L Difference'] st.table(change_pnl) def merchant_extraction(): import streamlit as st from main import load_and_use_model from PIL import Image import numpy as np sample_inputs = ['DISCOUNT DRUG MART 83 MEDINA OH', 'AKRONYM BREWING LLC AKRON OH', 'STEAK-N-SHAKE#0578 Q99 BRUNSWICK OH', 'BUFFALO WILD WINGS MED STRONGSVILLE OH', 'CHICK-FIL-A #01920 FAIRLAWN OH', 'APPLE STORE #45 FRESNO CA'] # sample_choice = random.choice(sample_inputs) sample_outputs = ['DISCOUNT DRUG MART', 'AKRONYM BREWING', 'STEAK-N-SHAKE', 'BUFFALO WILD WINGS', 'CHICK-FIL-A', 'APPLE STORE #'] raw_input = st.text_input(label='Enter a raw transaction statement', value='CHICK-FIL-A #01920 FAIRLAWN OH') st.text(f'Output: {load_and_use_model("models/20210809-120113", [raw_input])}') st.header('Sample transaction inputs and outputs') st.table({'Inputs': sample_inputs, 'Outputs': sample_outputs}) st.header('What is a convolutional neural network?') st.image(np.array(Image.open('cnn_image.jpeg'))) st.header('Full disclosures...') st.markdown( """ This *current* model will not be fantastic (on data it has not seen yet) for one big reason: I need more data! I am not a big time shopper, and these neural network models need data to function. The model will only be able to perform well if it has learned patterns from previous training data. This model was trained on my banking history and my parents (they do more shopping than I do!). All in, I had about 300 records that I manually classified (dictating where the merchant name is). The model could near human levels of performance with a larger training set and more trainable parameters. """ )
[ "jasonfpetri@outlook.com" ]
jasonfpetri@outlook.com
89de947f8d9c81ba8d53f65322d7259200b4fe9a
54648f2e17e0146f7adc51aeddacd943bf2f2c20
/saleor/saleor/graphql/translations/tests/test_translations.py
7dbed94fc48c5c61a427561cc5ed1b9bbec9c47d
[ "BSD-3-Clause", "CC-BY-4.0" ]
permissive
ridwanray/shop
999792222ab12ab72af8901fdc9ee3b769b8e283
0f90e94e63129f0f1797f27f26f9c3ec08712fbb
refs/heads/master
2022-11-19T15:33:27.467896
2020-06-08T12:06:52
2020-06-08T12:06:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
59,496
py
import graphene import pytest from django.contrib.auth.models import Permission from saleor.graphql.tests.utils import assert_no_permission, get_graphql_content from saleor.graphql.translations.schema import TranslatableKinds from saleor.graphql.translations.types import LanguageCodeEnum def test_product_translation(user_api_client, product): product.translations.create(language_code="pl", name="Produkt") query = """ query productById($productId: ID!) { product(id: $productId) { translation(languageCode: PL) { name language { code } } } } """ product_id = graphene.Node.to_global_id("Product", product.id) response = user_api_client.post_graphql(query, {"productId": product_id}) data = get_graphql_content(response)["data"] assert data["product"]["translation"]["name"] == "Produkt" assert data["product"]["translation"]["language"]["code"] == "PL" def test_product_variant_translation(user_api_client, variant): variant.translations.create(language_code="pl", name="Wariant") query = """ query productVariantById($productVariantId: ID!) { productVariant(id: $productVariantId) { translation(languageCode: PL) { name language { code } } } } """ product_variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) response = user_api_client.post_graphql( query, {"productVariantId": product_variant_id} ) data = get_graphql_content(response)["data"] assert data["productVariant"]["translation"]["name"] == "Wariant" assert data["productVariant"]["translation"]["language"]["code"] == "PL" def test_collection_translation(user_api_client, collection): collection.translations.create(language_code="pl", name="Kolekcja") query = """ query collectionById($collectionId: ID!) { collection(id: $collectionId) { translation(languageCode: PL) { name language { code } } } } """ collection_id = graphene.Node.to_global_id("Collection", collection.id) response = user_api_client.post_graphql(query, {"collectionId": collection_id}) data = get_graphql_content(response)["data"] assert data["collection"]["translation"]["name"] == "Kolekcja" assert data["collection"]["translation"]["language"]["code"] == "PL" def test_category_translation(user_api_client, category): category.translations.create(language_code="pl", name="Kategoria") query = """ query categoryById($categoryId: ID!) { category(id: $categoryId) { translation(languageCode: PL) { name language { code } } } } """ category_id = graphene.Node.to_global_id("Category", category.id) response = user_api_client.post_graphql(query, {"categoryId": category_id}) data = get_graphql_content(response)["data"] assert data["category"]["translation"]["name"] == "Kategoria" assert data["category"]["translation"]["language"]["code"] == "PL" def test_voucher_translation(staff_api_client, voucher, permission_manage_discounts): voucher.translations.create(language_code="pl", name="Bon") query = """ query voucherById($voucherId: ID!) { voucher(id: $voucherId) { translation(languageCode: PL) { name language { code } } } } """ voucher_id = graphene.Node.to_global_id("Voucher", voucher.id) response = staff_api_client.post_graphql( query, {"voucherId": voucher_id}, permissions=[permission_manage_discounts] ) data = get_graphql_content(response)["data"] assert data["voucher"]["translation"]["name"] == "Bon" assert data["voucher"]["translation"]["language"]["code"] == "PL" def test_sale_translation(staff_api_client, sale, permission_manage_discounts): sale.translations.create(language_code="pl", name="Wyprz") query = """ query saleById($saleId: ID!) { sale(id: $saleId) { translation(languageCode: PL) { name language { code } } } } """ sale_id = graphene.Node.to_global_id("Sale", sale.id) response = staff_api_client.post_graphql( query, {"saleId": sale_id}, permissions=[permission_manage_discounts] ) data = get_graphql_content(response)["data"] assert data["sale"]["translation"]["name"] == "Wyprz" assert data["sale"]["translation"]["language"]["code"] == "PL" def test_page_translation(user_api_client, page): page.translations.create(language_code="pl", title="Strona") query = """ query pageById($pageId: ID!) { page(id: $pageId) { translation(languageCode: PL) { title language { code } } } } """ page_id = graphene.Node.to_global_id("Page", page.id) response = user_api_client.post_graphql(query, {"pageId": page_id}) data = get_graphql_content(response)["data"] assert data["page"]["translation"]["title"] == "Strona" assert data["page"]["translation"]["language"]["code"] == "PL" def test_attribute_translation(user_api_client, color_attribute): color_attribute.translations.create(language_code="pl", name="Kolor") query = """ query { attributes(first: 1) { edges { node { translation(languageCode: PL) { name language { code } } } } } } """ response = user_api_client.post_graphql(query) data = get_graphql_content(response)["data"] attribute = data["attributes"]["edges"][0]["node"] assert attribute["translation"]["name"] == "Kolor" assert attribute["translation"]["language"]["code"] == "PL" def test_attribute_value_translation(user_api_client, pink_attribute_value): pink_attribute_value.translations.create(language_code="pl", name="Różowy") query = """ query { attributes(first: 1) { edges { node { values { translation(languageCode: PL) { name language { code } } } } } } } """ attribute_value_id = graphene.Node.to_global_id( "AttributeValue", pink_attribute_value.id ) response = user_api_client.post_graphql( query, {"attributeValueId": attribute_value_id} ) data = get_graphql_content(response)["data"] attribute_value = data["attributes"]["edges"][0]["node"]["values"][-1] assert attribute_value["translation"]["name"] == "Różowy" assert attribute_value["translation"]["language"]["code"] == "PL" def test_shipping_method_translation( staff_api_client, shipping_method, permission_manage_shipping ): shipping_method.translations.create(language_code="pl", name="DHL Polska") query = """ query shippingZoneById($shippingZoneId: ID!) { shippingZone(id: $shippingZoneId) { shippingMethods { translation(languageCode: PL) { name language { code } } } } } """ shipping_zone_id = graphene.Node.to_global_id( "ShippingZone", shipping_method.shipping_zone.id ) response = staff_api_client.post_graphql( query, {"shippingZoneId": shipping_zone_id}, permissions=[permission_manage_shipping], ) data = get_graphql_content(response)["data"] shipping_method = data["shippingZone"]["shippingMethods"][-1] assert shipping_method["translation"]["name"] == "DHL Polska" assert shipping_method["translation"]["language"]["code"] == "PL" def test_menu_item_translation(user_api_client, menu_item): menu_item.translations.create(language_code="pl", name="Odnośnik 1") query = """ query menuItemById($menuItemId: ID!) { menuItem(id: $menuItemId) { translation(languageCode: PL) { name language { code } } } } """ menu_item_id = graphene.Node.to_global_id("MenuItem", menu_item.id) response = user_api_client.post_graphql(query, {"menuItemId": menu_item_id}) data = get_graphql_content(response)["data"] assert data["menuItem"]["translation"]["name"] == "Odnośnik 1" assert data["menuItem"]["translation"]["language"]["code"] == "PL" def test_shop_translation(user_api_client, site_settings): site_settings.translations.create(language_code="pl", header_text="Nagłówek") query = """ query { shop { translation(languageCode: PL) { headerText language { code } } } } """ response = user_api_client.post_graphql(query) data = get_graphql_content(response)["data"] assert data["shop"]["translation"]["headerText"] == "Nagłówek" assert data["shop"]["translation"]["language"]["code"] == "PL" def test_product_no_translation(user_api_client, product): query = """ query productById($productId: ID!) { product(id: $productId) { translation(languageCode: PL) { name language { code } } } } """ product_id = graphene.Node.to_global_id("Product", product.id) response = user_api_client.post_graphql(query, {"productId": product_id}) data = get_graphql_content(response)["data"] assert data["product"]["translation"] is None def test_product_variant_no_translation(user_api_client, variant): query = """ query productVariantById($productVariantId: ID!) { productVariant(id: $productVariantId) { translation(languageCode: PL) { name language { code } } } } """ product_variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) response = user_api_client.post_graphql( query, {"productVariantId": product_variant_id} ) data = get_graphql_content(response)["data"] assert data["productVariant"]["translation"] is None def test_collection_no_translation(user_api_client, collection): query = """ query collectionById($collectionId: ID!) { collection(id: $collectionId) { translation(languageCode: PL) { name language { code } } } } """ collection_id = graphene.Node.to_global_id("Collection", collection.id) response = user_api_client.post_graphql(query, {"collectionId": collection_id}) data = get_graphql_content(response)["data"] assert data["collection"]["translation"] is None def test_category_no_translation(user_api_client, category): query = """ query categoryById($categoryId: ID!) { category(id: $categoryId) { translation(languageCode: PL) { name language { code } } } } """ category_id = graphene.Node.to_global_id("Category", category.id) response = user_api_client.post_graphql(query, {"categoryId": category_id}) data = get_graphql_content(response)["data"] assert data["category"]["translation"] is None def test_voucher_no_translation(staff_api_client, voucher, permission_manage_discounts): query = """ query voucherById($voucherId: ID!) { voucher(id: $voucherId) { translation(languageCode: PL) { name language { code } } } } """ voucher_id = graphene.Node.to_global_id("Voucher", voucher.id) response = staff_api_client.post_graphql( query, {"voucherId": voucher_id}, permissions=[permission_manage_discounts] ) data = get_graphql_content(response)["data"] assert data["voucher"]["translation"] is None def test_page_no_translation(user_api_client, page): query = """ query pageById($pageId: ID!) { page(id: $pageId) { translation(languageCode: PL) { title language { code } } } } """ page_id = graphene.Node.to_global_id("Page", page.id) response = user_api_client.post_graphql(query, {"pageId": page_id}) data = get_graphql_content(response)["data"] assert data["page"]["translation"] is None def test_attribute_no_translation(user_api_client, color_attribute): query = """ query { attributes(first: 1) { edges { node { translation(languageCode: PL) { name language { code } } } } } } """ response = user_api_client.post_graphql(query) data = get_graphql_content(response)["data"] attribute = data["attributes"]["edges"][0]["node"] assert attribute["translation"] is None def test_attribute_value_no_translation(user_api_client, pink_attribute_value): query = """ query { attributes(first: 1) { edges { node { values { translation(languageCode: PL) { name language { code } } } } } } } """ attribute_value_id = graphene.Node.to_global_id( "AttributeValue", pink_attribute_value.id ) response = user_api_client.post_graphql( query, {"attributeValueId": attribute_value_id} ) data = get_graphql_content(response)["data"] attribute_value = data["attributes"]["edges"][0]["node"]["values"][-1] assert attribute_value["translation"] is None def test_shipping_method_no_translation( staff_api_client, shipping_method, permission_manage_shipping ): query = """ query shippingZoneById($shippingZoneId: ID!) { shippingZone(id: $shippingZoneId) { shippingMethods { translation(languageCode: PL) { name language { code } } } } } """ shipping_zone_id = graphene.Node.to_global_id( "ShippingZone", shipping_method.shipping_zone.id ) response = staff_api_client.post_graphql( query, {"shippingZoneId": shipping_zone_id}, permissions=[permission_manage_shipping], ) data = get_graphql_content(response)["data"] shipping_method = data["shippingZone"]["shippingMethods"][0] assert shipping_method["translation"] is None def test_menu_item_no_translation(user_api_client, menu_item): query = """ query menuItemById($menuItemId: ID!) { menuItem(id: $menuItemId) { translation(languageCode: PL) { name language { code } } } } """ menu_item_id = graphene.Node.to_global_id("MenuItem", menu_item.id) response = user_api_client.post_graphql(query, {"menuItemId": menu_item_id}) data = get_graphql_content(response)["data"] assert data["menuItem"]["translation"] is None def test_shop_no_translation(user_api_client, site_settings): query = """ query { shop { translation(languageCode: PL) { headerText language { code } } } } """ response = user_api_client.post_graphql(query) data = get_graphql_content(response)["data"] assert data["shop"]["translation"] is None def test_product_create_translation( staff_api_client, product, permission_manage_translations ): query = """ mutation productTranslate($productId: ID!) { productTranslate( id: $productId, languageCode: PL, input: {name: "Produkt PL"}) { product { translation(languageCode: PL) { name language { code } } } } } """ product_id = graphene.Node.to_global_id("Product", product.id) response = staff_api_client.post_graphql( query, {"productId": product_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["productTranslate"] assert data["product"]["translation"]["name"] == "Produkt PL" assert data["product"]["translation"]["language"]["code"] == "PL" def test_product_update_translation( staff_api_client, product, permission_manage_translations ): product.translations.create(language_code="pl", name="Produkt") query = """ mutation productTranslate($productId: ID!) { productTranslate( id: $productId, languageCode: PL, input: {name: "Produkt PL"}) { product { translation(languageCode: PL) { name language { code } } } } } """ product_id = graphene.Node.to_global_id("Product", product.id) response = staff_api_client.post_graphql( query, {"productId": product_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["productTranslate"] assert data["product"]["translation"]["name"] == "Produkt PL" assert data["product"]["translation"]["language"]["code"] == "PL" def test_product_variant_create_translation( staff_api_client, variant, permission_manage_translations ): query = """ mutation productVariantTranslate($productVariantId: ID!) { productVariantTranslate( id: $productVariantId, languageCode: PL, input: {name: "Wariant PL"}) { productVariant { translation(languageCode: PL) { name language { code } } } } } """ product_variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) response = staff_api_client.post_graphql( query, {"productVariantId": product_variant_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["productVariantTranslate"] assert data["productVariant"]["translation"]["name"] == "Wariant PL" assert data["productVariant"]["translation"]["language"]["code"] == "PL" def test_product_variant_update_translation( staff_api_client, variant, permission_manage_translations ): variant.translations.create(language_code="pl", name="Wariant") query = """ mutation productVariantTranslate($productVariantId: ID!) { productVariantTranslate( id: $productVariantId, languageCode: PL, input: {name: "Wariant PL"}) { productVariant { translation(languageCode: PL) { name language { code } } } } } """ product_variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) response = staff_api_client.post_graphql( query, {"productVariantId": product_variant_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["productVariantTranslate"] assert data["productVariant"]["translation"]["name"] == "Wariant PL" assert data["productVariant"]["translation"]["language"]["code"] == "PL" def test_collection_create_translation( staff_api_client, collection, permission_manage_translations ): query = """ mutation collectionTranslate($collectionId: ID!) { collectionTranslate( id: $collectionId, languageCode: PL, input: {name: "Kolekcja PL"}) { collection { translation(languageCode: PL) { name language { code } } } } } """ collection_id = graphene.Node.to_global_id("Collection", collection.id) response = staff_api_client.post_graphql( query, {"collectionId": collection_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["collectionTranslate"] assert data["collection"]["translation"]["name"] == "Kolekcja PL" assert data["collection"]["translation"]["language"]["code"] == "PL" def test_collection_update_translation( staff_api_client, collection, permission_manage_translations ): collection.translations.create(language_code="pl", name="Kolekcja") query = """ mutation collectionTranslate($collectionId: ID!) { collectionTranslate( id: $collectionId, languageCode: PL, input: {name: "Kolekcja PL"}) { collection { translation(languageCode: PL) { name language { code } } } } } """ collection_id = graphene.Node.to_global_id("Collection", collection.id) response = staff_api_client.post_graphql( query, {"collectionId": collection_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["collectionTranslate"] assert data["collection"]["translation"]["name"] == "Kolekcja PL" assert data["collection"]["translation"]["language"]["code"] == "PL" def test_category_create_translation( staff_api_client, category, permission_manage_translations ): query = """ mutation categoryTranslate($categoryId: ID!) { categoryTranslate( id: $categoryId, languageCode: PL, input: {name: "Kategoria PL"}) { category { translation(languageCode: PL) { name language { code } } } } } """ category_id = graphene.Node.to_global_id("Category", category.id) response = staff_api_client.post_graphql( query, {"categoryId": category_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["categoryTranslate"] assert data["category"]["translation"]["name"] == "Kategoria PL" assert data["category"]["translation"]["language"]["code"] == "PL" def test_category_update_translation( staff_api_client, category, permission_manage_translations ): category.translations.create(language_code="pl", name="Kategoria") query = """ mutation categoryTranslate($categoryId: ID!) { categoryTranslate( id: $categoryId, languageCode: PL, input: {name: "Kategoria PL"}) { category { translation(languageCode: PL) { name language { code } } } } } """ category_id = graphene.Node.to_global_id("Category", category.id) response = staff_api_client.post_graphql( query, {"categoryId": category_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["categoryTranslate"] assert data["category"]["translation"]["name"] == "Kategoria PL" assert data["category"]["translation"]["language"]["code"] == "PL" def test_voucher_create_translation( staff_api_client, voucher, permission_manage_translations ): query = """ mutation voucherTranslate($voucherId: ID!) { voucherTranslate( id: $voucherId, languageCode: PL, input: {name: "Bon PL"}) { voucher { translation(languageCode: PL) { name language { code } } } } } """ voucher_id = graphene.Node.to_global_id("Voucher", voucher.id) response = staff_api_client.post_graphql( query, {"voucherId": voucher_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["voucherTranslate"] assert data["voucher"]["translation"]["name"] == "Bon PL" assert data["voucher"]["translation"]["language"]["code"] == "PL" def test_voucher_update_translation( staff_api_client, voucher, permission_manage_translations ): voucher.translations.create(language_code="pl", name="Kategoria") query = """ mutation voucherTranslate($voucherId: ID!) { voucherTranslate( id: $voucherId, languageCode: PL, input: {name: "Bon PL"}) { voucher { translation(languageCode: PL) { name language { code } } } } } """ voucher_id = graphene.Node.to_global_id("Voucher", voucher.id) response = staff_api_client.post_graphql( query, {"voucherId": voucher_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["voucherTranslate"] assert data["voucher"]["translation"]["name"] == "Bon PL" assert data["voucher"]["translation"]["language"]["code"] == "PL" def test_sale_create_translation( staff_api_client, sale, permission_manage_translations ): query = """ mutation saleTranslate($saleId: ID!) { saleTranslate( id: $saleId, languageCode: PL, input: {name: "Wyprz PL"}) { sale { translation(languageCode: PL) { name language { code } } } } } """ sale_id = graphene.Node.to_global_id("Sale", sale.id) response = staff_api_client.post_graphql( query, {"saleId": sale_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["saleTranslate"] assert data["sale"]["translation"]["name"] == "Wyprz PL" assert data["sale"]["translation"]["language"]["code"] == "PL" def test_page_create_translation( staff_api_client, page, permission_manage_translations ): query = """ mutation pageTranslate($pageId: ID!) { pageTranslate( id: $pageId, languageCode: PL, input: {title: "Strona PL"}) { page { translation(languageCode: PL) { title language { code } } } } } """ page_id = graphene.Node.to_global_id("Page", page.id) response = staff_api_client.post_graphql( query, {"pageId": page_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["pageTranslate"] assert data["page"]["translation"]["title"] == "Strona PL" assert data["page"]["translation"]["language"]["code"] == "PL" def test_page_update_translation( staff_api_client, page, permission_manage_translations ): page.translations.create(language_code="pl", title="Strona") query = """ mutation pageTranslate($pageId: ID!) { pageTranslate( id: $pageId, languageCode: PL, input: {title: "Strona PL"}) { page { translation(languageCode: PL) { title language { code } } } } } """ page_id = graphene.Node.to_global_id("Page", page.id) response = staff_api_client.post_graphql( query, {"pageId": page_id}, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["pageTranslate"] assert data["page"]["translation"]["title"] == "Strona PL" assert data["page"]["translation"]["language"]["code"] == "PL" def test_attribute_create_translation( staff_api_client, color_attribute, permission_manage_translations ): query = """ mutation attributeTranslate($attributeId: ID!) { attributeTranslate( id: $attributeId, languageCode: PL, input: {name: "Kolor PL"}) { attribute { translation(languageCode: PL) { name language { code } } } } } """ attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id) response = staff_api_client.post_graphql( query, {"attributeId": attribute_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["attributeTranslate"] assert data["attribute"]["translation"]["name"] == "Kolor PL" assert data["attribute"]["translation"]["language"]["code"] == "PL" def test_attribute_update_translation( staff_api_client, color_attribute, permission_manage_translations ): color_attribute.translations.create(language_code="pl", name="Kolor") query = """ mutation attributeTranslate($attributeId: ID!) { attributeTranslate( id: $attributeId, languageCode: PL, input: {name: "Kolor PL"}) { attribute { translation(languageCode: PL) { name language { code } } } } } """ attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id) response = staff_api_client.post_graphql( query, {"attributeId": attribute_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["attributeTranslate"] assert data["attribute"]["translation"]["name"] == "Kolor PL" assert data["attribute"]["translation"]["language"]["code"] == "PL" def test_attribute_value_create_translation( staff_api_client, pink_attribute_value, permission_manage_translations ): query = """ mutation attributeValueTranslate($attributeValueId: ID!) { attributeValueTranslate( id: $attributeValueId, languageCode: PL, input: {name: "Róż PL"}) { attributeValue { translation(languageCode: PL) { name language { code } } } } } """ attribute_value_id = graphene.Node.to_global_id( "AttributeValue", pink_attribute_value.id ) response = staff_api_client.post_graphql( query, {"attributeValueId": attribute_value_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["attributeValueTranslate"] assert data["attributeValue"]["translation"]["name"] == "Róż PL" assert data["attributeValue"]["translation"]["language"]["code"] == "PL" def test_attribute_value_update_translation( staff_api_client, pink_attribute_value, permission_manage_translations ): pink_attribute_value.translations.create(language_code="pl", name="Różowy") query = """ mutation attributeValueTranslate($attributeValueId: ID!) { attributeValueTranslate( id: $attributeValueId, languageCode: PL, input: {name: "Róż PL"}) { attributeValue { translation(languageCode: PL) { name language { code } } } } } """ attribute_value_id = graphene.Node.to_global_id( "AttributeValue", pink_attribute_value.id ) response = staff_api_client.post_graphql( query, {"attributeValueId": attribute_value_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["attributeValueTranslate"] assert data["attributeValue"]["translation"]["name"] == "Róż PL" assert data["attributeValue"]["translation"]["language"]["code"] == "PL" def test_shipping_method_create_translation( staff_api_client, shipping_method, permission_manage_translations ): query = """ mutation shippingPriceTranslate($shippingMethodId: ID!) { shippingPriceTranslate( id: $shippingMethodId, languageCode: PL, input: {name: "DHL PL"}) { shippingMethod { translation(languageCode: PL) { name language { code } } } } } """ shipping_method_id = graphene.Node.to_global_id( "ShippingMethod", shipping_method.id ) response = staff_api_client.post_graphql( query, {"shippingMethodId": shipping_method_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["shippingPriceTranslate"] assert data["shippingMethod"]["translation"]["name"] == "DHL PL" assert data["shippingMethod"]["translation"]["language"]["code"] == "PL" def test_shipping_method_update_translation( staff_api_client, shipping_method, permission_manage_translations ): shipping_method.translations.create(language_code="pl", name="DHL") query = """ mutation shippingPriceTranslate($shippingMethodId: ID!) { shippingPriceTranslate( id: $shippingMethodId, languageCode: PL, input: {name: "DHL PL"}) { shippingMethod { translation(languageCode: PL) { name language { code } } } } } """ shipping_method_id = graphene.Node.to_global_id( "ShippingMethod", shipping_method.id ) response = staff_api_client.post_graphql( query, {"shippingMethodId": shipping_method_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["shippingPriceTranslate"] assert data["shippingMethod"]["translation"]["name"] == "DHL PL" assert data["shippingMethod"]["translation"]["language"]["code"] == "PL" def test_menu_item_update_translation( staff_api_client, menu_item, permission_manage_translations ): menu_item.translations.create(language_code="pl", name="Odnośnik") query = """ mutation menuItemTranslate($menuItemId: ID!) { menuItemTranslate( id: $menuItemId, languageCode: PL, input: {name: "Odnośnik PL"}) { menuItem { translation(languageCode: PL) { name language { code } } } } } """ menu_item_id = graphene.Node.to_global_id("MenuItem", menu_item.id) response = staff_api_client.post_graphql( query, {"menuItemId": menu_item_id}, permissions=[permission_manage_translations], ) data = get_graphql_content(response)["data"]["menuItemTranslate"] assert data["menuItem"]["translation"]["name"] == "Odnośnik PL" assert data["menuItem"]["translation"]["language"]["code"] == "PL" def test_shop_create_translation(staff_api_client, permission_manage_translations): query = """ mutation shopSettingsTranslate { shopSettingsTranslate( languageCode: PL, input: {headerText: "Nagłówek PL"}) { shop { translation(languageCode: PL) { headerText language { code } } } } } """ response = staff_api_client.post_graphql( query, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["shopSettingsTranslate"] assert data["shop"]["translation"]["headerText"] == "Nagłówek PL" assert data["shop"]["translation"]["language"]["code"] == "PL" def test_shop_update_translation( staff_api_client, site_settings, permission_manage_translations ): site_settings.translations.create(language_code="pl", header_text="Nagłówek") query = """ mutation shopSettingsTranslate { shopSettingsTranslate( languageCode: PL, input: {headerText: "Nagłówek PL"}) { shop { translation(languageCode: PL) { headerText language { code } } } } } """ response = staff_api_client.post_graphql( query, permissions=[permission_manage_translations] ) data = get_graphql_content(response)["data"]["shopSettingsTranslate"] assert data["shop"]["translation"]["headerText"] == "Nagłówek PL" assert data["shop"]["translation"]["language"]["code"] == "PL" @pytest.mark.parametrize( "kind, expected_typename", [ (TranslatableKinds.PRODUCT, "ProductTranslatableContent"), (TranslatableKinds.COLLECTION, "CollectionTranslatableContent"), (TranslatableKinds.CATEGORY, "CategoryTranslatableContent"), (TranslatableKinds.PAGE, "PageTranslatableContent"), (TranslatableKinds.SHIPPING_METHOD, "ShippingMethodTranslatableContent"), (TranslatableKinds.VOUCHER, "VoucherTranslatableContent"), (TranslatableKinds.SALE, "SaleTranslatableContent"), (TranslatableKinds.ATTRIBUTE, "AttributeTranslatableContent"), (TranslatableKinds.ATTRIBUTE_VALUE, "AttributeValueTranslatableContent"), (TranslatableKinds.VARIANT, "ProductVariantTranslatableContent"), (TranslatableKinds.MENU_ITEM, "MenuItemTranslatableContent"), ], ) def test_translations_query( user_api_client, product, collection, voucher, sale, shipping_method, page, menu_item, kind, expected_typename, ): query = """ query TranslationsQuery($kind: TranslatableKinds!) { translations(kind: $kind, first: 1) { edges { node { __typename } } } } """ response = user_api_client.post_graphql(query, {"kind": kind.name}) data = get_graphql_content(response)["data"]["translations"] assert data["edges"][0]["node"]["__typename"] == expected_typename def test_translations_query_inline_fragment(user_api_client, product): product.translations.create(language_code="pl", name="Produkt testowy") query = """ { translations(kind: PRODUCT, first: 1) { edges { node { ... on ProductTranslatableContent { name translation(languageCode: PL) { name } } } } } } """ response = user_api_client.post_graphql(query) data = get_graphql_content(response)["data"]["translations"]["edges"][0] assert data["node"]["name"] == "Test product" assert data["node"]["translation"]["name"] == "Produkt testowy" QUERY_TRANSLATION_PRODUCT = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on ProductTranslatableContent{ id name translation(languageCode: $languageCode){ name } product{ id name } } } } """ @pytest.mark.parametrize( "is_published, perm_codenames, return_product", [ (True, ["manage_translations"], True), (False, ["manage_translations"], False), (False, ["manage_translations", "manage_products"], True), ], ) def test_translation_query_product( staff_api_client, product, product_translation_fr, is_published, perm_codenames, return_product, ): product.is_published = is_published product.save() product_id = graphene.Node.to_global_id("Product", product.id) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": product_id, "kind": TranslatableKinds.PRODUCT.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_PRODUCT, variables, permissions=perms ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == product.name assert data["translation"]["name"] == product_translation_fr.name if return_product: assert data["product"]["name"] == product.name else: assert not data["product"] QUERY_TRANSLATION_COLLECTION = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on CollectionTranslatableContent{ id name translation(languageCode: $languageCode){ name } collection{ id name } } } } """ @pytest.mark.parametrize( "is_published, perm_codenames, return_collection", [ (True, ["manage_translations"], True), (False, ["manage_translations"], False), (False, ["manage_translations", "manage_products"], True), ], ) def test_translation_query_collection( staff_api_client, collection, collection_translation_fr, is_published, perm_codenames, return_collection, ): collection.is_published = is_published collection.save() collection_id = graphene.Node.to_global_id("Collection", collection.id) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": collection_id, "kind": TranslatableKinds.COLLECTION.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_COLLECTION, variables, permissions=perms ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == collection.name assert data["translation"]["name"] == collection_translation_fr.name if return_collection: assert data["collection"]["name"] == collection.name else: assert not data["collection"] QUERY_TRANSLATION_CATEGORY = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on CategoryTranslatableContent{ id name translation(languageCode: $languageCode){ name } category { id name } } } } """ def test_translation_query_category( staff_api_client, category, category_translation_fr, permission_manage_translations ): category_id = graphene.Node.to_global_id("Category", category.id) variables = { "id": category_id, "kind": TranslatableKinds.CATEGORY.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_CATEGORY, variables, permissions=[permission_manage_translations], ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == category.name assert data["translation"]["name"] == category_translation_fr.name assert data["category"]["name"] == category.name QUERY_TRANSLATION_ATTRIBUTE = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on AttributeTranslatableContent{ id name translation(languageCode: $languageCode){ name } attribute { id name } } } } """ def test_translation_query_attribute( staff_api_client, translated_attribute, permission_manage_translations ): attribute = translated_attribute.attribute attribute_id = graphene.Node.to_global_id("Attribute", attribute.id) variables = { "id": attribute_id, "kind": TranslatableKinds.ATTRIBUTE.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_ATTRIBUTE, variables, permissions=[permission_manage_translations], ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == attribute.name assert data["translation"]["name"] == translated_attribute.name assert data["attribute"]["name"] == attribute.name QUERY_TRANSLATION_ATTRIBUTE_VALUE = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on AttributeValueTranslatableContent{ id name translation(languageCode: $languageCode){ name } attributeValue { id name } } } } """ def test_translation_query_attribute_value( staff_api_client, pink_attribute_value, translated_attribute_value, permission_manage_translations, ): attribute_value_id = graphene.Node.to_global_id( "AttributeValue", pink_attribute_value.id ) variables = { "id": attribute_value_id, "kind": TranslatableKinds.ATTRIBUTE_VALUE.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_ATTRIBUTE_VALUE, variables, permissions=[permission_manage_translations], ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == pink_attribute_value.name assert data["translation"]["name"] == translated_attribute_value.name assert data["attributeValue"]["name"] == pink_attribute_value.name QUERY_TRANSLATION_VARIANT = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on ProductVariantTranslatableContent{ id name translation(languageCode: $languageCode){ name } productVariant { id name } } } } """ @pytest.mark.parametrize( "is_published, perm_codenames, return_variant", [ (True, ["manage_translations"], True), (False, ["manage_translations"], False), (False, ["manage_translations", "manage_products"], True), ], ) def test_translation_query_variant( staff_api_client, product, variant, variant_translation_fr, is_published, perm_codenames, return_variant, ): product.is_published = is_published product.save() variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": variant_id, "kind": TranslatableKinds.VARIANT.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_VARIANT, variables, permissions=perms ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == variant.name assert data["translation"]["name"] == variant_translation_fr.name if return_variant: assert data["productVariant"]["name"] == variant.name else: assert not data["productVariant"] QUERY_TRANSLATION_PAGE = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on PageTranslatableContent{ id title translation(languageCode: $languageCode){ title } page { id title } } } } """ @pytest.mark.parametrize( "is_published, perm_codenames, return_page", [ (True, ["manage_translations"], True), (False, ["manage_translations"], False), (False, ["manage_translations", "manage_pages"], True), ], ) def test_translation_query_page( staff_api_client, page, page_translation_fr, is_published, perm_codenames, return_page, ): page.is_published = is_published page.save() page_id = graphene.Node.to_global_id("Page", page.id) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": page_id, "kind": TranslatableKinds.PAGE.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_PAGE, variables, permissions=perms ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["title"] == page.title assert data["translation"]["title"] == page_translation_fr.title if return_page: assert data["page"]["title"] == page.title else: assert not data["page"] QUERY_TRANSLATION_SHIPPING_METHOD = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on ShippingMethodTranslatableContent{ id name translation(languageCode: $languageCode){ name } shippingMethod { id name } } } } """ @pytest.mark.parametrize( "perm_codenames, return_shipping_method", [ (["manage_translations"], False), (["manage_translations", "manage_shipping"], True), ], ) def test_translation_query_shipping_method( staff_api_client, shipping_method, shipping_method_translation_fr, perm_codenames, return_shipping_method, ): shipping_method_id = graphene.Node.to_global_id( "ShippingMethod", shipping_method.id ) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": shipping_method_id, "kind": TranslatableKinds.SHIPPING_METHOD.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_SHIPPING_METHOD, variables, permissions=perms ) content = get_graphql_content(response, ignore_errors=True) data = content["data"]["translation"] assert data["name"] == shipping_method.name assert data["translation"]["name"] == shipping_method_translation_fr.name if return_shipping_method: assert data["shippingMethod"]["name"] == shipping_method.name else: assert not data["shippingMethod"] QUERY_TRANSLATION_SALE = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on SaleTranslatableContent{ id name translation(languageCode: $languageCode){ name } sale { id name } } } } """ @pytest.mark.parametrize( "perm_codenames, return_sale", [ (["manage_translations"], False), (["manage_translations", "manage_discounts"], True), ], ) def test_translation_query_sale( staff_api_client, sale, sale_translation_fr, perm_codenames, return_sale ): sale_id = graphene.Node.to_global_id("Sale", sale.id) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": sale_id, "kind": TranslatableKinds.SALE.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_SALE, variables, permissions=perms ) content = get_graphql_content(response, ignore_errors=True) data = content["data"]["translation"] assert data["name"] == sale.name assert data["translation"]["name"] == sale_translation_fr.name if return_sale: assert data["sale"]["name"] == sale.name else: assert not data["sale"] QUERY_TRANSLATION_VOUCHER = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on VoucherTranslatableContent{ id name translation(languageCode: $languageCode){ name } voucher { id name } } } } """ @pytest.mark.parametrize( "perm_codenames, return_voucher", [ (["manage_translations"], False), (["manage_translations", "manage_discounts"], True), ], ) def test_translation_query_voucher( staff_api_client, voucher, voucher_translation_fr, perm_codenames, return_voucher ): voucher_id = graphene.Node.to_global_id("Voucher", voucher.id) perms = list(Permission.objects.filter(codename__in=perm_codenames)) variables = { "id": voucher_id, "kind": TranslatableKinds.VOUCHER.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_VOUCHER, variables, permissions=perms ) content = get_graphql_content(response, ignore_errors=True) data = content["data"]["translation"] assert data["name"] == voucher.name assert data["translation"]["name"] == voucher_translation_fr.name if return_voucher: assert data["voucher"]["name"] == voucher.name else: assert not data["voucher"] QUERY_TRANSLATION_MENU_ITEM = """ query translation( $kind: TranslatableKinds!, $id: ID!, $languageCode: LanguageCodeEnum! ){ translation(kind: $kind, id: $id){ __typename ...on MenuItemTranslatableContent{ id name translation(languageCode: $languageCode){ name } menuItem { id name } } } } """ def test_translation_query_menu_item( staff_api_client, menu_item, menu_item_translation_fr, permission_manage_translations, ): menu_item_id = graphene.Node.to_global_id("MenuItem", menu_item.id) variables = { "id": menu_item_id, "kind": TranslatableKinds.MENU_ITEM.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_MENU_ITEM, variables, permissions=[permission_manage_translations], ) content = get_graphql_content(response) data = content["data"]["translation"] assert data["name"] == menu_item.name assert data["translation"]["name"] == menu_item_translation_fr.name assert data["menuItem"]["name"] == menu_item.name def test_translation_query_incorrect_kind( staff_api_client, menu_item, permission_manage_translations ): menu_item_id = graphene.Node.to_global_id("MenuItem", menu_item.id) variables = { "id": menu_item_id, "kind": TranslatableKinds.PRODUCT.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql( QUERY_TRANSLATION_MENU_ITEM, variables, permissions=[permission_manage_translations], ) content = get_graphql_content(response) assert not content["data"]["translation"] def test_translation_query_no_permission(staff_api_client, menu_item): menu_item_id = graphene.Node.to_global_id("MenuItem", menu_item.id) variables = { "id": menu_item_id, "kind": TranslatableKinds.MENU_ITEM.name, "languageCode": LanguageCodeEnum.FR.name, } response = staff_api_client.post_graphql(QUERY_TRANSLATION_MENU_ITEM, variables) assert_no_permission(response)
[ "rafmme@gmail.com" ]
rafmme@gmail.com
f795d91ef6a578af3e5b22300688668c97622fb4
deb89dfe22f3668dbfafabe8b353560e605ce17e
/Optimizer.py
8f6a68f4eccf3551307ca31faf7059433549532d
[ "MIT" ]
permissive
AndrCarvalho/DroneOpenTool
012699d69f2ba443e5d11b43c3c340e6da6a2deb
db22d18bc0f2ae4f2d6338130b8b536f0d4af829
refs/heads/master
2023-08-21T21:53:50.184130
2021-10-02T18:39:31
2021-10-02T18:39:31
307,439,304
0
0
null
null
null
null
UTF-8
Python
false
false
2,151
py
import pyproj geodesic = pyproj.Geod(ellps='WGS84') #notice: [lon lat] notation!!! # Removes co-linear points in sequence, do multiple times to remove every occurrence. def remove_path_excess(path): new_path = [path[0]] for i in range(0, len(path)-1, 2): if i + 2 >= len(path): continue #Project fw_az1, bck_az1, dist1 = geodesic.inv(path[i][1], path[i][0], path[i + 1][1], path[i + 1][0]) fw_az2, bck_az2, dist2 = geodesic.inv(path[i + 1][1], path[i + 1][0], path[i + 2][1], path[i + 2][0]) if abs(fw_az2 - fw_az1) >= 5: new_path.append(path[i + 1]) new_path.append(path[i + 2]) if path[len(path)-1] not in new_path: new_path.append(path[len(path)-1]) #print("smooth:", new_path) return new_path def smooth_path(path): new_path = [path[0]] for i in range(0, len(path)-1, 2): if i + 2 >= len(path): continue #Project fw_az1, bck_az1, dist1 = geodesic.inv(path[i][1], path[i][0], path[i + 1][1], path[i + 1][0]) fw_az2, bck_az2, dist2 = geodesic.inv(path[i + 1][1], path[i + 1][0], path[i + 2][1], path[i + 2][0]) if abs(abs(fw_az2 - fw_az1) - 45) <= 5: pass # remove else: new_path.append(path[i + 1]) # keep the node new_path.append(path[i + 2]) if path[len(path)-1] not in new_path: new_path.append(path[len(path)-1]) #print("smooth:", new_path) return new_path def optimize_path(path): initial_path = path check = False while not check: old_path = path path = remove_path_excess(path) if path == old_path: check = True #print("Initial path:", len(initial_path), " | Remove Excess path:", len(path)) #print("Difference path excess:", len(initial_path) - len(path)) n_excess_path = path path = smooth_path(path) #print("Non-excess path:", len(n_excess_path), " | Smooth path:", len(path)) #print("Difference path smoothing:", len(n_excess_path) - len(path)) print("Points removed:", str(len(initial_path) - len(path))) return path
[ "AndrCarvalho@users.noreply.github.com" ]
AndrCarvalho@users.noreply.github.com
3ae3771d1e81939378ef90067c2137fbe4d29742
0c85ffd1fc2ecb2ada296a1af6afbb0b57ad4130
/sm.py
cbdf9524b55fdbfe6ae201337c7ceb5d29f3c9a4
[ "MIT" ]
permissive
sevenjames/fpie-scripts
c945d4f212dfe05cceaab537e5e03684be12b059
4aed642ca60ee1da4ffb07823a2e206957597498
refs/heads/master
2021-02-14T03:02:42.640879
2020-06-11T08:03:40
2020-06-11T08:03:40
244,760,779
0
0
null
null
null
null
UTF-8
Python
false
false
200
py
"""minimized stick mouse""" if starting: import time frame_start = time.time() mouse.deltaX = filters.delta(joystick[0].xRotation) * 0.25 time.sleep(max(1./60 - (time.time() - frame_start), 0))
[ "sevenjames@users.noreply.github.com" ]
sevenjames@users.noreply.github.com
467367f6c141bab5ccd0bf6bb614ab26e0f5199f
2c68c8e13ee4e6d37388c76d7cc1a53f0e2caf90
/LTM/For_excel/test_check_ip.py
a7f5efab584c692f186aa034fbf87e78b4e674f6
[]
no_license
oscarobwu/Memo_F5
f6b6226dfde0a7a17ece04d6439003bf1e3b48bb
53d651c8fd51e5ce792d669167a01850596998ec
refs/heads/main
2023-02-26T18:51:30.893181
2021-02-05T23:34:57
2021-02-05T23:34:57
336,413,167
0
0
null
null
null
null
UTF-8
Python
false
false
1,350
py
#!/usr/bin/python #-*- coding: utf-8 -*- #=============================================================================== # # Filename: test_check_ip.py # # USAGE: test_check_ip.py # # DESCRIPTION: # # OPTIONS: --- # REQUIREMENTS: --- # BUGS: --- # NOTES: --- # AUTHOR: Oscarob Wu(oscarobwu@gmail.com), # ORGANIZATION: # VERSION: 1.0 # Created Time: 2021-01-28 18:18:47 # Last modified: 2021-01-28 18:59 # REVISION: --- #=============================================================================== import re import os os.system("") # Group of ***different*** functions for different styles class style(): BLACK = '\033[30m' RED = '\033[91m' GREEN = '\033[42m' YELLOW = '\033[33m' BLUE = '\033[34m' MAGENTA = '\033[35m' CYAN = '\033[36m' WHITE = '\033[37m' UNDERLINE = '\033[4m' RESET = '\033[0m' print(style.YELLOW + "Hello, World!" + '\033[0m') ipv=input("Enter an ip address : ") a=ipv.split('.') s=str(bin(int(a[0]))+bin(int(a[1]))+bin(int(a[2]))+bin(int(a[3]))) s=s.replace("0b",".") m=re.search('\.[0,1]{1,8}\.[0,1]{1,8}\.[0,1]{1,8}\.[0,1]{1,8}$',s) if m is not None: print(style.GREEN + "Valid sequence of input" + '\033[0m') else : print(style.RED + "Invalid input sequence" + '\033[0m') print("End Job Good Bye!!!")
[ "root@Debian10-6-Jenkins-Ansible-02.localdomain" ]
root@Debian10-6-Jenkins-Ansible-02.localdomain
5091cde131222d65fd968409d5c999a7772611c5
975ecceab70eb4d75b1fe1b61a14c8375c291adb
/3.1_introduction_to_python_I/in_practice/12-Stu_NumberChain/number_chain.py
eb70bb0b43edbb154ae8e222d54796be5843789a
[]
no_license
carlabeltran/data_analytics_visualization
82306b44570ba89ef298d8cf07f9151e0d7cb031
7985138ff3fbbdcf077c08aaea6dcb64f12b9a22
refs/heads/master
2020-12-09T17:38:34.458647
2020-01-12T03:03:35
2020-01-12T03:03:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
377
py
user_plays = "y" starting_number = 0 while user_plays == "y": user_number = int(input("How many numbers?")) for number in range(starting_number, user_number + starting_number): print(number) starting_number = user_number + starting_number user_plays = input( "Do you want to play again? Enter 'y' to play again. Enter 'n' to exit the game.")
[ "philipstubbs13@gmail.com" ]
philipstubbs13@gmail.com
256be955ac5bbbb095774605fb0337a77bc3fa15
8152b055c7275751ac56476218012448de1bfd57
/mia/xmgr/views.py
29fc63a4ea48ac80acada10b5c56efe657370cd6
[]
no_license
gregfelice/mia
7f1376c2e1a54dd9038b66bd5e71941a7d30410b
4a3a0ff6670e385d70abf6c793bc91d927e8a859
refs/heads/master
2021-01-17T12:00:45.455553
2016-07-21T03:33:26
2016-07-21T03:33:26
63,834,246
0
0
null
null
null
null
UTF-8
Python
false
false
543
py
from django.http import HttpResponse from django.template import loader from django.shortcuts import get_object_or_404, render from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from django.views import generic from .models import Person class IndexView(generic.ListView): template_name = 'index.html' context_object_name = 'people' def get_queryset(self): return Person.objects.all() class DetailView(generic.DetailView): model = Person template_name = 'detail.html'
[ "gregf@bogo.constantlabs.com" ]
gregf@bogo.constantlabs.com
33189ef2893eeb9cacde0e3f2a86934c39a8868e
78f85c9d1587244c8a5aa087f88d4af862865858
/english_speak_backend/storage/csv_converter2.py
1b8d17fbf12c12105e45f530d1ba2cce6845e1b6
[]
no_license
fedor-malyshkin/english-speak-backend
ceaf7759bac4a1ff5ef7cf8b2bfe327701393cf8
28d75646a4acd45f18209b3c28ba8a2bef1edded
refs/heads/master
2023-03-08T13:33:19.940272
2021-02-13T17:40:15
2021-02-13T17:40:15
264,500,506
0
0
null
2021-02-13T17:40:16
2020-05-16T18:26:18
Python
UTF-8
Python
false
false
4,565
py
import csv import re from english_speak_backend.storage import builder REGEXP_MEANING_SAMPLE = r'\((.*)\)' REGEXP_NOTE_VARIANT = r'var\. [0-9]' def convert_to_map(row): return {'name': row[0], 'meaning': row[1], 'sample': row[2], 'note': row[3]} class CsvConverter2: def __init__(self): self.orig = [] self.additional = [] self.aggregated = [] def read(self): phrasal_verbs_data = builder.flat_file_pairs("data/expressions.list") self.orig = phrasal_verbs_data.get_all() with open("data/csv/lw-output.csv") as fp: reader = csv.reader(fp, delimiter=",", quotechar='"') self.additional = [convert_to_map(row) for row in reader] @staticmethod def convert_ep(m): m['plain_name'] = CsvConverter2.remove_spare_spaces(m['name']) m['meaning'] = CsvConverter2.remove_spare_spaces(m['meaning']) m['sample'] = CsvConverter2.remove_spare_spaces(m['sample']) m['note'] = CsvConverter2.remove_spare_spaces(m['note']) return m @staticmethod def convert_ep_name(name): name = name.replace("*", "").strip() name = name.replace("+", "").strip() return name def convert(self): self.additional = [CsvConverter2.convert_ep(map) for map in self.additional] self.orig = [CsvConverter2.convert_original(map) for map in self.orig if map['name'].startswith("LW:")] self.aggregated = self.orig self.aggregated.extend(self.additional) @staticmethod def convert_original(m): m['plain_name'], m['meaning'] = CsvConverter2.extract_name_and_meaning(m['name']) m['plain_name'] = CsvConverter2.removeLW(m['plain_name']) m['sample'] = CsvConverter2.remove_spare_spaces(m.get('meaning')) m['note'] = CsvConverter2.remove_spare_spaces(m.get('note')) return m @staticmethod def extract_name_and_meaning(input2): m = re.search(r'(.+) += (.+)', input2) if m is not None: return m.group(1).strip(), m.group(2).strip() else: return '', '' @staticmethod def convert_original_name(name): name = re.sub(r'(sb|sth)/(sth|sb)?', "", name).strip() name = re.sub(r'(smb|sth)/(sth|smb)?', "", name).strip() name = re.sub(r'(somewhere|sth)/(somewhere|sth)?', "", name).strip() name = re.sub(r' (sb|smb|somebody|someone)', "", name).strip() name = re.sub(r' (something|sth)', "", name).strip() return name @staticmethod def removeLW(input2): if not input2: return "" m = re.search(r'LW:(.+)', input2) if m is not None: return m.group(1).strip() else: return "" @staticmethod def extract_original_sample(input2): if not input2: return "" reg_expression = REGEXP_MEANING_SAMPLE m = re.search(reg_expression, input2) if m is not None: return m.group(1).strip() else: return "" @staticmethod def remove_original_sample(input2): if not input2: return "" reg_expression = REGEXP_MEANING_SAMPLE m = re.search(reg_expression, input2) if m is not None: return re.sub(REGEXP_MEANING_SAMPLE, "", input2).strip() else: return input2 @staticmethod def remove_original_variants(input2): if not input2: return "" reg_expression = REGEXP_NOTE_VARIANT m = re.search(reg_expression, input2) if m is not None: return re.sub(REGEXP_NOTE_VARIANT, "", input2).strip() else: return input2 @staticmethod def remove_spare_spaces(input2): if not input2: return "" return input2.replace(" ", " ") def output(self): """ Write data to a CSV file path """ with open("data/csv/lw.csv", "w") as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(['plain_name', 'meaning', 'sample', 'note']) for line in self.aggregated: writer.writerow([line['plain_name'], line['meaning'], line['sample'], line['note']]) if __name__ == "__main__": cnv = CsvConverter2() cnv.read() cnv.convert() cnv.output()
[ "fedor.malyshkin@yandex.ru" ]
fedor.malyshkin@yandex.ru
e5a79048e3102c2eba2db114ca4160c3e3953ac2
73f6ba42a793d18ad5b4c44cfdc278e51aa1b9b0
/perum/admin.py
826368cf4da71c07441a9b74c16ab876d01bd44b
[]
no_license
akbarlintang/perumahan
e14eb922a86c76581d8faae5700ff21e83ba13ee
66c908a382bc32e9b9abc69b3a6f22eab12d8d2c
refs/heads/main
2022-12-30T08:11:23.856824
2020-10-22T15:33:30
2020-10-22T15:33:30
306,377,878
0
0
null
null
null
null
UTF-8
Python
false
false
316
py
from django.contrib import admin # Register your models here. from .models import * admin.site.register(Pelanggan) admin.site.register(Unit) admin.site.register(Administrasi) admin.site.register(Harga) admin.site.register(Angsuran) admin.site.register(Booking) admin.site.register(Lokasi) admin.site.register(Tipe)
[ "lintangajiakbar@gmail.com" ]
lintangajiakbar@gmail.com
4c3c32a1d4681cd40a82b7f1ba078e8eca062f0a
9aea1b19a8681b4c6b15d628a080982fb2d98b39
/mianJing111111/geeksforgeeks/tree/Convert a BST to a Binary Tree such that sum of all greater keys is added to every key.py
b7844963d0363ace092c3fb4c04ebe355d2e84b6
[]
no_license
yzl232/code_training
ee7612efc6f166742fcf48e1af715f57a624d3aa
fc165027c3d7b1fec58ebfad2f9ada275a6b8c03
refs/heads/master
2021-01-21T04:32:02.522931
2016-07-01T21:35:29
2016-07-01T21:35:29
26,989,266
2
0
null
null
null
null
UTF-8
Python
false
false
1,276
py
# encoding=utf-8 ''' Convert a BST to a Binary Tree such that sum of all greater keys is added to every key Given a Binary Search Tree (BST), convert it to a Binary Tree such that every key of the original BST is changed to key plus sum of all greater keys in BST. Examples: Input: Root of following BST 5 / \ 2 13 Output: The given BST is converted to following Binary Tree 18 / \ 20 13 Source: Convert a BST Solution: Do reverse Inoorder traversal. Keep track of the sum of nodes visited so far. Let this sum be sum. For every node currently being visited, first add the key of this node to sum, i.e. sum = sum + node->key. Then change the key of current node to sum, i.e., node->key = sum. When a BST is being traversed in reverse Inorder, for every key currently being visited, all keys that are already visited are all greater keys. ''' #顺序。 right, root, left #因为bst本身排好了序 。 所以就容易了很多 class Solution: def addGreater(self, root): self.s = 0 self.dfs(root) def dfs(self, root): if not root: return self.dfs(root.right) self.s +=root.val root.val = self.s self.dfs(root.left)
[ "buptyuzhenglin@gmail.com" ]
buptyuzhenglin@gmail.com
4f9d5463d4ac234ca46cd78969c11d0fe88dc7e6
3bd31700ebf05dcbdf16ba622fcc1551522c3118
/denoise/denoise_shrink_mask.py
e52f41c315cb391f09f5aa6e16a435e4b5f3623a
[ "MIT" ]
permissive
yl3506/iMVPD_denoise
78a410ad8093e213078af92c354ef8aeacd28100
96dff9d4a72086d1b7348403d0c71c43b5456933
refs/heads/master
2020-03-31T06:14:57.865578
2019-01-18T05:01:46
2019-01-18T05:01:46
151,973,975
2
0
null
null
null
null
UTF-8
Python
false
false
1,914
py
# generate noise mask and shrink by 1 voxel on the surface import os, json import nibabel as nib import numpy as np from scipy import ndimage # initalize data ### work_dir = '/mindhive/saxelab3/anzellotti/forrest/derivatives/fmriprep/' ### all_subjects = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-09', 'sub-10', 'sub-14', 'sub-15', 'sub-16', 'sub-17', 'sub-18', 'sub-19', 'sub-20'] work_dir = '/Users/chloe/Documents/' all_subjects = ['sub-01'] masks = ['_T1w_space-MNI152NLin2009cAsym_class-CSF_probtissue.nii.gz', '_T1w_space-MNI152NLin2009cAsym_class-WM_probtissue.nii.gz'] mask_thr = 0.5 shrink_size = 1 # shrink mask by this amount of voxels on the boundary # iterate through all subjects for sub in all_subjects: # generate union mask # initialize info sub_dir = work_dir + sub + '_complete/' mask_dir = sub_dir + 'anat/' sub_out_dir = work_dir + sub + '_complete/' + sub + '_ROIs/' if not os.path.exists(sub_out_dir): os.makedirs(sub_out_dir) # load masks mask_CSF = nib.load(mask_dir + sub + masks[0]) mask_WM = nib.load(mask_dir + sub + masks[1]) mask_CSF_affine = mask_CSF.affine mask_WM_affine = mask_WM.affine mask_CSF_header = mask_CSF.header mask_CSF = mask_CSF.get_data() mask_WM = mask_WM.get_data() # mask CSF and WM should be of same shape # make union of the two masks, filter with threshold mask_union = np.zeros(mask_CSF.shape) for x in range(0, mask_CSF.shape[0]): for y in range(0, mask_CSF.shape[1]): for z in range(0, mask_CSF.shape[2]): if mask_CSF[x, y, z] >= mask_thr or mask_WM[x, y, z] >= mask_thr: mask_union[x, y, z] = 1 # shrink the mask mask_union = ndimage.binary_erosion(mask_union, iterations = shrink_size).astype(int) # save the shrinked mask somewhere mask_union_img = nib.Nifti1Image(mask_union, mask_CSF_affine, mask_CSF_header) nib.save(mask_union_img, sub_out_dir + sub + '_CSF_WM_mask_union_bin_shrinked.nii.gz')
[ "yl3506@nyu.edu" ]
yl3506@nyu.edu
c63ab88a1b3faf2cb4abf1a0c1bb74d644de8b85
e806a4ca5276d5bbe6c79806986aadd105de6490
/pdp7_pt/measures/admin.py
3e36d026e6004c4743fa0ceb4f924274c3217b26
[]
no_license
alexpdp7/pdp7_pt
36d5ea058040cfee4a2536d894db4339a2b9cd14
c5568c4a85f1d716d452607af73ea1ebc468e12c
refs/heads/master
2020-04-17T20:16:20.983886
2020-03-27T15:23:04
2020-03-27T15:23:04
66,585,470
0
0
null
2016-08-28T11:36:10
2016-08-25T19:06:39
Python
UTF-8
Python
false
false
301
py
from django.contrib import admin from pdp7_pt.measures import models class MeasureAdmin(admin.ModelAdmin): date_hierarchy = 'when' admin.site.register(models.Series) admin.site.register(models.FloatMeasure, MeasureAdmin) admin.site.register(models.DoublePositiveIntegerMeasure, MeasureAdmin)
[ "alex@pdp7.net" ]
alex@pdp7.net
3b01ef52fe6dd9707ec569c0266c5d04749fc773
9e131c705936db8b9d3a473a13c5f17aab5b6bda
/tensorflow_/tensorflowcv/models/preresnet.py
a8d923f83115ed214bcd917f4303382cc7962deb
[ "MIT" ]
permissive
kolingv/imgclsmob
0b792cdcc88abbfb63bebbe8a45e5772e755aed0
11e77cf36f0c99be044597d5be29313be394a692
refs/heads/master
2020-04-23T14:12:08.784090
2019-02-17T19:39:48
2019-02-17T19:39:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
27,590
py
""" PreResNet, implemented in TensorFlow. Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. """ __all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnet16', 'preresnet18_wd4', 'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet34', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152', 'preresnet152b', 'preresnet200', 'preresnet200b', 'preres_block', 'preres_bottleneck_block', 'preres_init_block', 'preres_activation'] import os import tensorflow as tf from .common import pre_conv1x1_block, pre_conv3x3_block, conv2d, conv1x1, batchnorm, maxpool2d, is_channels_first,\ flatten def preres_block(x, in_channels, out_channels, strides, training, data_format, name="preres_block"): """ Simple PreResNet block for residual path in PreResNet unit. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. training : bool, or a TensorFlow boolean scalar tensor Whether to return the output in training mode or in inference mode. data_format : str The ordering of the dimensions in tensors. name : str, default 'preres_block' Block name. Returns ------- tuple of two Tensors Resulted tensor and preactivated input tensor. """ x, x_pre_activ = pre_conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, return_preact=True, training=training, data_format=data_format, name=name + "/conv1") x = pre_conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, training=training, data_format=data_format, name=name + "/conv2") return x, x_pre_activ def preres_bottleneck_block(x, in_channels, out_channels, strides, conv1_stride, training, data_format, name="preres_bottleneck_block"): """ PreResNet bottleneck block for residual path in PreResNet unit. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. training : bool, or a TensorFlow boolean scalar tensor Whether to return the output in training mode or in inference mode. data_format : str The ordering of the dimensions in tensors. name : str, default 'preres_bottleneck_block' Block name. Returns ------- tuple of two Tensors Resulted tensor and preactivated input tensor. """ mid_channels = out_channels // 4 x, x_pre_activ = pre_conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=(strides if conv1_stride else 1), return_preact=True, training=training, data_format=data_format, name=name + "/conv1") x = pre_conv3x3_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=(1 if conv1_stride else strides), training=training, data_format=data_format, name=name + "/conv2") x = pre_conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, training=training, data_format=data_format, name=name + "/conv3") return x, x_pre_activ def preres_unit(x, in_channels, out_channels, strides, bottleneck, conv1_stride, training, data_format, name="preres_unit"): """ PreResNet unit with residual connection. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. training : bool, or a TensorFlow boolean scalar tensor Whether to return the output in training mode or in inference mode. data_format : str The ordering of the dimensions in tensors. name : str, default 'preres_unit' Unit name. Returns ------- Tensor Resulted tensor. """ identity = x if bottleneck: x, x_pre_activ = preres_bottleneck_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, training=training, data_format=data_format, name=name + "/body") else: x, x_pre_activ = preres_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, training=training, data_format=data_format, name=name + "/body") resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1( x=x_pre_activ, in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name=name + "/identity_conv/conv") x = x + identity return x def preres_init_block(x, in_channels, out_channels, training, data_format, name="preres_init_block"): """ PreResNet specific initial block. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. training : bool, or a TensorFlow boolean scalar tensor Whether to return the output in training mode or in inference mode. data_format : str The ordering of the dimensions in tensors. name : str, default 'preres_init_block' Block name. Returns ------- Tensor Resulted tensor. """ x = conv2d( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=2, padding=3, use_bias=False, data_format=data_format, name=name + "/conv") x = batchnorm( x=x, training=training, data_format=data_format, name=name + "/bn") x = tf.nn.relu(x, name=name + "/activ") x = maxpool2d( x=x, pool_size=3, strides=2, padding=1, data_format=data_format, name=name + "/pool") return x def preres_activation(x, training, data_format, name="preres_activation"): """ PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block. Parameters: ---------- x : Tensor Input tensor. training : bool, or a TensorFlow boolean scalar tensor Whether to return the output in training mode or in inference mode. data_format : str The ordering of the dimensions in tensors. name : str, default 'preres_activation' Block name. Returns ------- Tensor Resulted tensor. """ x = batchnorm( x=x, training=training, data_format=data_format, name=name + "/bn") x = tf.nn.relu(x, name=name + "/activ") return x class PreResNet(object): """ PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(PreResNet, self).__init__(**kwargs) assert (data_format in ["channels_last", "channels_first"]) self.channels = channels self.init_block_channels = init_block_channels self.bottleneck = bottleneck self.conv1_stride = conv1_stride self.in_channels = in_channels self.in_size = in_size self.classes = classes self.data_format = data_format def __call__(self, x, training=False): """ Build a model graph. Parameters: ---------- x : Tensor Input tensor. training : bool, or a TensorFlow boolean scalar tensor, default False Whether to return the output in training mode or in inference mode. Returns ------- Tensor Resulted tensor. """ in_channels = self.in_channels x = preres_init_block( x=x, in_channels=in_channels, out_channels=self.init_block_channels, training=training, data_format=self.data_format, name="features/init_block") in_channels = self.init_block_channels for i, channels_per_stage in enumerate(self.channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = preres_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=self.bottleneck, conv1_stride=self.conv1_stride, training=training, data_format=self.data_format, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = preres_activation( x=x, training=training, data_format=self.data_format, name="features/post_activ") x = tf.layers.average_pooling2d( inputs=x, pool_size=7, strides=1, data_format=self.data_format, name="features/final_pool") # x = tf.layers.flatten(x) x = flatten( x=x, data_format=self.data_format) x = tf.layers.dense( inputs=x, units=self.classes, name="output") return x def get_preresnet(blocks, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join('~', '.tensorflow', 'models'), **kwargs): """ Create PreResNet or SE-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14: layers = [2, 2, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) net = PreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_state_dict net.state_dict, net.file_path = download_state_dict( model_name=model_name, local_model_store_dir_path=root) else: net.state_dict = None net.file_path = None return net def preresnet10(**kwargs): """ PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=10, model_name="preresnet10", **kwargs) def preresnet12(**kwargs): """ PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=12, model_name="preresnet12", **kwargs) def preresnet14(**kwargs): """ PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=14, model_name="preresnet14", **kwargs) def preresnet16(**kwargs): """ PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=16, model_name="preresnet16", **kwargs) def preresnet18_wd4(**kwargs): """ PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs) def preresnet18_wd2(**kwargs): """ PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs) def preresnet18_w3d4(**kwargs): """ PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs) def preresnet18(**kwargs): """ PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=18, model_name="preresnet18", **kwargs) def preresnet34(**kwargs): """ PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=34, model_name="preresnet34", **kwargs) def preresnet50(**kwargs): """ PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=50, model_name="preresnet50", **kwargs) def preresnet50b(**kwargs): """ PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs) def preresnet101(**kwargs): """ PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=101, model_name="preresnet101", **kwargs) def preresnet101b(**kwargs): """ PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs) def preresnet152(**kwargs): """ PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=152, model_name="preresnet152", **kwargs) def preresnet152b(**kwargs): """ PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs) def preresnet200(**kwargs): """ PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=200, model_name="preresnet200", **kwargs) def preresnet200b(**kwargs): """ PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs) def _test(): import numpy as np data_format = "channels_last" pretrained = False models = [ preresnet10, preresnet12, preresnet14, preresnet16, preresnet18_wd4, preresnet18_wd2, preresnet18_w3d4, preresnet18, preresnet34, preresnet50, preresnet50b, preresnet101, preresnet101b, preresnet152, preresnet152b, preresnet200, preresnet200b, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) x = tf.placeholder( dtype=tf.float32, shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3), name="xx") y_net = net(x) weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != preresnet10 or weight_count == 5417128) assert (model != preresnet12 or weight_count == 5491112) assert (model != preresnet14 or weight_count == 5786536) assert (model != preresnet16 or weight_count == 6967208) assert (model != preresnet18_wd4 or weight_count == 830680) assert (model != preresnet18_wd2 or weight_count == 3055048) assert (model != preresnet18_w3d4 or weight_count == 6674104) assert (model != preresnet18 or weight_count == 11687848) assert (model != preresnet34 or weight_count == 21796008) assert (model != preresnet50 or weight_count == 25549480) assert (model != preresnet50b or weight_count == 25549480) assert (model != preresnet101 or weight_count == 44541608) assert (model != preresnet101b or weight_count == 44541608) assert (model != preresnet152 or weight_count == 60185256) assert (model != preresnet152b or weight_count == 60185256) assert (model != preresnet200 or weight_count == 64666280) assert (model != preresnet200b or weight_count == 64666280) with tf.Session() as sess: if pretrained: from .model_store import init_variables_from_state_dict init_variables_from_state_dict(sess=sess, state_dict=net.state_dict) else: sess.run(tf.global_variables_initializer()) x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32) y = sess.run(y_net, feed_dict={x: x_value}) assert (y.shape == (1, 1000)) tf.reset_default_graph() if __name__ == "__main__": _test()
[ "osemery@gmail.com" ]
osemery@gmail.com
696666f703b9b3f54267ca9038eff6745d675137
e3da426fbe6be0f2c90a83bb8f2354e1bb3fc740
/app/modules/curso.py
796a0244780aa753d144098c8dc193e4db06d159
[]
no_license
raloxxx/proyectoprueba2
3789b80c533209fa8d66674787a7b6b0be17d47c
eae662a8643241c061143b6471a7ae69c3136317
refs/heads/master
2022-11-17T12:12:31.794620
2020-07-16T05:20:25
2020-07-16T05:20:25
275,482,504
0
1
null
null
null
null
UTF-8
Python
false
false
478
py
# Una clase esta definida por atributos y metodos # atributos = variables # metodos = funciones # Curso # instancias # Matematica Comunicacion Religion class Curso: def __init__(self, id, nombre): self.id = id self.nombre = nombre def getId(self): return self.id def setId(self, id): self.id = id def getNombre(self): return self.nombre def setNombre(self, nombre): self.nombre = nombre
[ "juan.15121001@gmail.com" ]
juan.15121001@gmail.com
1f1c4775c1f9cb17c43d65e1d87a0b05f31bbbfd
4fc0d62e546c8d73971f8a460c291494c1b2133f
/main.py
98e64ba7b4ab5f2dd16973c05a14f74dd2d7b033
[]
no_license
NicKomarov73/git_test
55c87425e7f6e4348ba4f6cb6c2fbbf00f4324b5
12380c47aeac4511bb12b2ddda0f6755bdad9b0b
refs/heads/main
2023-01-09T15:57:13.661918
2020-11-14T14:02:40
2020-11-14T14:02:40
312,816,898
0
0
null
null
null
null
UTF-8
Python
false
false
162
py
from tkinter import * # creating window window = Tk() window.title("Testing system") window.geometry('400x500') window.resizable(False, False) window.mainloop()
[ "komarov@lic145.kiev.ua" ]
komarov@lic145.kiev.ua
b27818b5634093e58370be65ddee9767958e0082
0620d5e4489a9ab59fd07dc9db424ebb7e1d2b53
/Tensorflow/ActivMaxim1/script.py
45251cc0139f25221d8e566c9e60e40d77a120e4
[]
no_license
lephamcong/LAID
22e15b088293c304b561d8c60fc3d4333caacad0
356ab14dedfded27d7915c0c7c029b7182765552
refs/heads/master
2023-04-14T23:38:15.127191
2018-10-27T19:36:12
2018-10-27T19:36:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,725
py
import tensorflow as tf import numpy as np import jmlipman.ml import time,os from Model import Model as MNIST_Model import scipy.misc import itertools class ActivationMaximization(MNIST_Model): def __init__(self,config,weights=None): self.conf = config self.placeholder = {} self.sess = tf.Session() self.load_weights(weights) self.create_train_step() def train(self,target): """ This function trains the network. """ print("Training") ep = self.conf["epochs"] # Preprocess the data #np.random.seed(seed=42) #res = [np.random.random((28*28))] res = [np.zeros(28*28)] Y = jmlipman.ml.hotvector(np.array([target]),self.conf["classes"]) np.save("res/other",res) for e in range(ep): [res,loss,pred] = self.sess.run([self.train_step,self.loss,self.prediction], feed_dict={self.placeholder["input"]: np.expand_dims(res[0],0), self.placeholder["output"]: Y}) print(e,loss,np.argmax(pred)) res2 = np.reshape(res[0],(28,28)) np.save("res/other",res2) scipy.misc.imsave("res/other.png",res2) def predict(self,data): pred = self.sess.run(self.prediction,feed_dict={self.placeholder["input"]: data}) return pred # Get the data and split it #data = jmlipman.ml.getDataset("mnist") #data = jmlipman.ml.splitDataset(data,[0.8,0.2,0]) # Configuration part learning_rate = 0.1 optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate) initializer = tf.keras.initializers.he_uniform() classes = 10 sizes = [28,28] epochs = 10 config = {"optimizer": optimizer, "learning_rate": learning_rate, "classes": classes, "sizes": sizes, "epochs": epochs, "initializer": initializer } AM = ActivationMaximization(config,weights="weights/") AM.train(2)
[ "jvmlipman@hotmail.com" ]
jvmlipman@hotmail.com
d586790bd7bff025ce7f4098a3885a42d649403c
36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1
/tour/migrations/0019_auto_20181209_0803.py
40dfec86d7904ad87c59944319dc512cd92ec9df
[]
no_license
phufoxy/fotourNew
801ab2518424118020dc6e5f31a7ba90a654e56a
6048c24f5256c8c5a0d18dc7b38c106a7c92a29c
refs/heads/master
2023-04-13T01:34:22.510717
2018-12-26T03:46:09
2018-12-26T03:46:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
404
py
# Generated by Django 2.1 on 2018-12-09 01:03 import ckeditor.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('tour', '0018_auto_20181209_0730'), ] operations = [ migrations.AlterField( model_name='placetour', name='description', field=ckeditor.fields.RichTextField(), ), ]
[ "vanphudhsp2015@gmail.com" ]
vanphudhsp2015@gmail.com
a186c3a680db9ce50eb284cddc769a829a1f71b5
7865087569122e271b34f3d46fb011c445f233dc
/Modern_Python3_Bootcamp/Modules/Custom_modules/Fruits.py
5e838541535e0eb3b17182459eae6d134d4dcaa7
[]
no_license
mikaelbeat/Modern_Python3_Bootcamp
1cc5a83fef65377b6cebbeba91538bb94703b0cd
b5f281a718c50097421210507efa351e9780688d
refs/heads/master
2020-05-06T12:29:40.430456
2019-06-07T07:22:14
2019-06-07T07:22:14
180,125,537
0
0
null
null
null
null
UTF-8
Python
false
false
86
py
import Bananas, Apples print(Bananas.dip_in_chocolate()) print(Apples.offer())
[ "ryynanenphm@gmail.com" ]
ryynanenphm@gmail.com
2f3980ef2f1f859c6455897332790c674646fc9a
cc2a312f1ab75c6970b40d18a82454a6909648bd
/python-module-study/exercise.py
da2ca6f2b3681aeda943691d0167ad15d891fec3
[]
no_license
FantasybabyChange/python-study
f68317b6a6ef683f109b66d4637d63d9c97b8848
fb4ca2466ccd9ebacfcc54bf8759533cc941e834
refs/heads/master
2023-01-23T19:18:32.748497
2022-12-27T03:20:41
2022-12-27T03:20:41
240,224,390
2
1
null
null
null
null
UTF-8
Python
false
false
88
py
# 1. 导入os模块,并使用help(os)查看os模块的帮助文档 import os help(os)
[ "625353861@qq.com" ]
625353861@qq.com
b8480b8605b74221a144d204ed4fea2604c6faa9
5f2be2a5a01f0449a8e75947b60ea403f8b61e9a
/6.py
24fdb05719e2c4a89d8e3d144e160ec6791b788c
[]
no_license
Rafael-F-S/OPP
a5c6bf0981a2a75981dcb6fd81c1ee6cb3a43a00
195af023210503439986b8e1f76b8bf34009931e
refs/heads/master
2020-05-18T04:57:56.738877
2019-04-30T04:23:02
2019-04-30T04:23:02
184,189,745
0
0
null
null
null
null
UTF-8
Python
false
false
1,121
py
""" class Employee: raise_amt = 1.04 def __init__(self, first, last, ): self.first = first self.last = last self.email = first+"."+last+"@company.com" def fullname(self): return "{} {}".format(self.first, self.last) emp_1 = Employee("Corey", "Schafer") emp_1.first = "Jim" print(emp_1.first) print(emp_1.email) print(emp_1.fullname()) """ class Employee: raise_amt = 1.04 def __init__(self, first, last, ): self.first = first self.last = last @property def email(self): return '{}.{}@company.com'.format(self.first, self.last) @property def fullname(self): return "{} {}".format(self.first, self.last) @fullname.setter def fullname(self, name): first, last = name.split(' ') self.first = first self.last = last @fullname.deleter def fullname(self): print('Delete Name') self.first = None self.last = None emp_1 = Employee("Rafael", "Silva") emp_1.first = "Jim" print(emp_1.first) print(emp_1.email) print(emp_1.fullname) del emp_1.fullname
[ "rfsminas89@gmail.com" ]
rfsminas89@gmail.com
d70262d86638bee1df9318ad355ac96f77fa37f0
da0adac2efc05ee27e6545f5b5e311cd780300a9
/src/t1000/application/result/result_abstract.py
fa92d975183660b9e9728958d024317fe0615bc4
[ "MIT" ]
permissive
helcerion/T1000
9986ed645cf5ddb3dec1075206d856084ac97c18
25684e88dc8adb37fe07ff358f84f797f7b9c716
refs/heads/develop
2022-12-23T21:01:55.846337
2019-10-29T18:34:54
2019-10-29T18:34:54
218,250,199
1
0
MIT
2022-12-08T06:47:33
2019-10-29T09:29:33
Python
UTF-8
Python
false
false
614
py
from ..command import command_abstract class ResultAbstract(): _command: command_abstract.CommandAbstract def __init__(self, command: command_abstract.CommandAbstract, resource): self._command = command self._resource = resource def set_command(self, command: command_abstract.CommandAbstract): self._command = command def set_resource(self, resource): self._resource = resource def get(self, *args, **kwargs): raise Exception( 'You need to implement get function from %s.' % (self.__class__.__name__) )
[ "alonso.albert@gmail.com" ]
alonso.albert@gmail.com
b899c2f940469ffda705dc73fa659605f21e977a
913d22cf09d5e6cde0e66e11215794b7e64b92b6
/alien_color_6.py
432cbef15957f145ab519be3cc0ad8504455184b
[]
no_license
chenp0088/git
94beb31b6c90fe477ad3764fe73f3f1807ed4999
0a9782ca00d3d217fc7989e0bb5107d3f49066c2
refs/heads/master
2020-03-25T21:04:41.439874
2019-06-15T00:43:10
2019-06-15T00:43:10
144,158,352
0
0
null
null
null
null
UTF-8
Python
false
false
138
py
alien_color=['green','yellow','red'] alien='green' if alien=='green': print("You get 5 points") else: print("Your get 10 points")
[ "pc008@sohu.com" ]
pc008@sohu.com
489d849197b680e79a7a8cd6da2aadf65e574532
7dbe31a463e96de737a3004eba413f8d94923a66
/PythonTutorial/helloworld.py
0e96c4c1af5dda5d020d4f72ff63a58cddaa35c9
[]
no_license
thienlerva/PythonTutorial
66abbdedff9147f06cedf680a519ee9512c5030a
c93dae23168a8d38559d8346fc3047509addf7a3
refs/heads/master
2023-03-02T11:27:10.087916
2021-02-16T20:09:32
2021-02-16T20:09:32
336,053,626
0
0
null
null
null
null
UTF-8
Python
false
false
773
py
# run script python helloworld.py x = "Hello World" print(x) fruits = [] fruits.append("orange") print(fruits) colors = ["yellow", "green", "blue"] colors.append("gray") colors.sort() print(colors) numList = [50, 100, 65, 82, 23] numList.sort(reverse = True) print(numList) def comparable(n): return abs(n - 50) numList.sort(key = comparable) print(numList) sortFruits = ["banana", "Orange", "Kiwi", "cherry"] sortFruits.sort(key = str.lower) print(sortFruits) sortFruits.reverse() print(sortFruits) f = open("demofile.txt", "a") f.write("This is first line") f.close() f = open("demofile.txt", "r") print(f.read()) import pandas as pd mydataset = { 'cars': ["BMW", "Volvo", "Ford"], 'passings': [3, 7, 2] } myvar = pd.DataFrame(mydataset) print(myvar)
[ "thienxuanle@yahoo.com" ]
thienxuanle@yahoo.com
42bd3c170d0de2ff43b6f1b9bbabbea30e21b314
2af9a95586c893d386172bac61994518196da1da
/server/run.py
016084476dae50f2edbb5e589389c6d9cfa1ca27
[]
no_license
freddyli7/WaterlooDiscovery
2e70eb8f5112f987ec57204bfdeddc8faf236057
e720baeae406b9d5c9d84572eda3945014fc3222
refs/heads/master
2021-06-12T17:32:35.453555
2017-03-09T18:38:27
2017-03-09T18:38:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
73
py
#!/usr/bin/python SQLALCHEMY_ECHO = False PORT = 8000 HOST = '0.0.0.0'
[ "zbhknight@gmail.com" ]
zbhknight@gmail.com
ecdf152974a86661c7d07aee826c3d4d1c27557b
c9fe05f893deff75232aabca4e877c144972249a
/arcpyenv/arcgispro-py3-clone/Scripts/jupyter-kernelspec-script.py
57f54379d60ae219e875731f31db4a9367c56a12
[ "Python-2.0" ]
permissive
SherbazHashmi/HackathonServer
4d1dc7f0122a701a0f3a17787d32efe83bc67601
a874fe7e5c95196e4de68db2da0e2a05eb70e5d8
refs/heads/master
2022-12-26T06:46:33.893749
2019-11-03T10:49:47
2019-11-03T10:49:47
218,912,149
3
3
null
2022-12-11T11:52:37
2019-11-01T04:16:38
Python
UTF-8
Python
false
false
256
py
# -*- coding: utf-8 -*- import re import sys from jupyter_client.kernelspecapp import KernelSpecApp if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(KernelSpecApp.launch_instance())
[ "sherbaz.hashmi@gmail.com" ]
sherbaz.hashmi@gmail.com
0d15c94e8d8e655948d98ae5da0947de75b7aefa
2aabe86ae30aa5fdbae1a161e4330f891c81aa6b
/test/hard_function_chain.py
1584d6e3b311d72f920282755ed1f9fc6b0792c0
[]
no_license
Cyberdog52/saep2
50fb80cf5173cbf2d6bdf17bd6cd91a5537b32af
a26017fcd8d9c742b81fb12d63ccef76cf1d054c
refs/heads/master
2020-05-29T23:10:53.531954
2015-05-24T20:44:14
2015-05-24T20:44:14
35,619,678
1
1
null
null
null
null
UTF-8
Python
false
false
301
py
def chain4(asdf): return asdf def chain3(x, y, z): if x > 1: if y > 1: if z > 1: return chain4(42) return chain4(10000) def chain2(y): return chain3(y, y, y) def chain1(asdf, a): return chain2(asdf+a) def main (x,y): return chain1(x,y) def expected_result(): return [42, 1000]
[ "Andres@Andress-MacBook-Air.local" ]
Andres@Andress-MacBook-Air.local
4eac9f6b223afbd5dc25d6c4dacd6d371c18fd5c
7b50e5b78248940d9abbdbc1f9736746cca39343
/tests/integration/rolling_update/test_rolling_update.py
e6800e86408d3c28b1599c81d74057b217e81183
[ "Apache-2.0" ]
permissive
Aqiry/jina
1fb62a6c07b211c3e8757b88f275f5ef72819700
5604b9a0bb6d6a5ded7ee731b8b1f35735c26fdd
refs/heads/master
2023-08-12T07:31:49.282176
2021-10-09T08:11:00
2021-10-09T08:11:00
415,276,147
1
0
null
null
null
null
UTF-8
Python
false
false
8,199
py
import collections import os import time import threading import numpy as np import pytest from jina import Document, Flow, Executor, requests cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture def config(tmpdir): os.environ['JINA_REPLICA_DIR'] = str(tmpdir) yield del os.environ['JINA_REPLICA_DIR'] @pytest.fixture def docs(): return [ Document(id=str(i), text=f'doc {i}', embedding=np.array([i] * 5)) for i in range(20) ] class DummyMarkExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.metas.name = 'dummy' @requests def foo(self, docs, *args, **kwargs): for doc in docs: doc.tags['replica'] = self.runtime_args.replica_id doc.tags['shard'] = self.runtime_args.pea_id def close(self) -> None: import os os.makedirs(self.workspace, exist_ok=True) def test_normal(docs): NUM_REPLICAS = 3 NUM_SHARDS = 2 doc_id_path = collections.OrderedDict() def handle_search_result(resp): for doc in resp.data.docs: doc_id_path[int(doc.id)] = (doc.tags['replica'], doc.tags['shard']) flow = Flow().add( name='executor1', uses=DummyMarkExecutor, replicas=NUM_REPLICAS, parallel=NUM_SHARDS, ) with flow: flow.search(inputs=docs, request_size=1, on_done=handle_search_result) assert len(doc_id_path.keys()) == len(docs) num_used_replicas = len(set(map(lambda x: x[0], doc_id_path.values()))) assert num_used_replicas == NUM_REPLICAS shards = collections.defaultdict(list) for replica, shard in doc_id_path.values(): shards[replica].append(shard) assert len(shards.keys()) == NUM_REPLICAS for shard_list in shards.values(): assert len(set(shard_list)) == NUM_SHARDS @pytest.mark.timeout(60) def test_simple_run(docs): flow = Flow().add( name='executor1', replicas=2, parallel=3, ) with flow: # test rolling update does not hang flow.search(docs) flow.rolling_update('executor1', None) flow.search(docs) @pytest.fixture() def docker_image(): docker_file = os.path.join(cur_dir, 'Dockerfile') os.system(f"docker build -f {docker_file} -t test_rolling_update_docker {cur_dir}") time.sleep(3) yield os.system(f"docker rmi $(docker images | grep 'test_rolling_update_docker')") @pytest.mark.repeat(5) @pytest.mark.timeout(60) @pytest.mark.parametrize('uses', ['docker://test_rolling_update_docker']) def test_thread_run(docs, mocker, reraise, docker_image, uses): def update_rolling(flow, pod_name): with reraise: flow.rolling_update(pod_name) error_mock = mocker.Mock() total_responses = [] with Flow().add( uses=uses, name='executor1', replicas=2, parallel=2, timeout_ready=5000, ) as flow: x = threading.Thread( target=update_rolling, args=( flow, 'executor1', ), ) for i in range(50): responses = flow.search( docs, on_error=error_mock, request_size=10, return_results=True ) total_responses.extend(responses) if i == 5: x.start() x.join() error_mock.assert_not_called() assert len(total_responses) == (len(docs) * 50 / 10) @pytest.mark.repeat(5) @pytest.mark.timeout(60) def test_vector_indexer_thread(config, docs, mocker, reraise): def update_rolling(flow, pod_name): with reraise: flow.rolling_update(pod_name) error_mock = mocker.Mock() total_responses = [] with Flow().add( name='executor1', uses=DummyMarkExecutor, replicas=2, parallel=3, timeout_ready=5000, ) as flow: for i in range(5): flow.search(docs, on_error=error_mock) x = threading.Thread( target=update_rolling, args=( flow, 'executor1', ), ) for i in range(40): responses = flow.search( docs, on_error=error_mock, request_size=10, return_results=True ) total_responses.extend(responses) if i == 5: x.start() x.join() error_mock.assert_not_called() assert len(total_responses) == (len(docs) * 40 / 10) def test_workspace(config, tmpdir, docs): with Flow().add( name='executor1', uses=DummyMarkExecutor, workspace=str(tmpdir), replicas=2, parallel=3, ) as flow: # in practice, we don't send index requests to the compound pod this is just done to test the workspaces for i in range(10): flow.index(docs) # validate created workspaces assert set(os.listdir(str(tmpdir))) == {'dummy'} assert set(os.listdir(os.path.join(tmpdir, 'dummy'))) == {'0', '1'} for replica_id in {'0', '1'}: assert set(os.listdir(os.path.join(tmpdir, 'dummy', replica_id))) == { '0', '1', '2', } @pytest.mark.parametrize( 'replicas_and_parallel', ( ((3, 1),), ((2, 3),), ((2, 3), (3, 4), (2, 2), (2, 1)), ), ) def test_port_configuration(replicas_and_parallel): def validate_ports_replica(replica, replica_port_in, replica_port_out, parallel): assert replica_port_in == replica.args.port_in assert replica.args.port_out == replica_port_out peas_args = replica.peas_args peas = peas_args['peas'] assert len(peas) == parallel if parallel == 1: assert peas_args['head'] is None assert peas_args['tail'] is None assert peas[0].port_in == replica_port_in assert peas[0].port_out == replica_port_out else: shard_head = peas_args['head'] shard_tail = peas_args['tail'] assert replica.args.port_in == shard_head.port_in assert replica.args.port_out == shard_tail.port_out for pea in peas: assert shard_head.port_out == pea.port_in assert pea.port_out == shard_tail.port_in flow = Flow() for i, (replicas, parallel) in enumerate(replicas_and_parallel): flow.add( name=f'pod{i}', replicas=replicas, parallel=parallel, copy_flow=False, ) with flow: pods = flow._pod_nodes for pod_name, pod in pods.items(): if pod_name == 'gateway': continue if pod.args.replicas == 1: if int(pod.args.parallel) == 1: assert len(pod.peas_args['peas']) == 1 else: assert len(pod.peas_args) == 3 replica_port_in = pod.args.port_in replica_port_out = pod.args.port_out else: replica_port_in = pod.head_args.port_out replica_port_out = pod.tail_args.port_in assert pod.head_pea.args.port_in == pod.args.port_in assert pod.head_pea.args.port_out == replica_port_in assert pod.tail_pea.args.port_in == replica_port_out assert pod.tail_pea.args.port_out == pod.args.port_out if pod.args.replicas > 1: for replica in pod.replicas: validate_ports_replica( replica, replica_port_in, replica_port_out, getattr(pod.args, 'parallel', 1), ) assert pod def test_num_peas(config): with Flow().add( name='executor1', uses='!DummyMarkExecutor', replicas=3, parallel=4, ) as flow: assert flow.num_peas == ( 3 * (4 + 1 + 1) # replicas 3 # parallel 4 # pod head # pod tail + 1 # compound pod head + 1 # compound pod tail + 1 # gateway )
[ "noreply@github.com" ]
Aqiry.noreply@github.com
3241190153a590daac0b3947d9f74fcbd5eb76e2
8952c3edb27752fe08b577a89d100c432433a00a
/entities/items/__init__.py
50ec215edf587191c08159e4d15c38646c250761
[ "MIT" ]
permissive
newobj/lunch-break-rl
ce0f147e25b1fe919b3d0991d53bf5f66a7957c5
67604d9393e2382bed8d4ce05dcac99fa0ac4b93
refs/heads/master
2021-01-20T10:57:11.728812
2017-08-27T05:20:04
2017-08-27T05:20:04
null
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
def register(): from entities.items.weapons import battleaxe from entities.items.weapons import dagger from entities.items.weapons import fist # from entities.items.weapons import glove from entities.items.weapons import pickaxe from entities.items.weapons import sword from entities.items.weapons import whip from entities.items.consumables import potion
[ "joshua.skelton@gmail.com" ]
joshua.skelton@gmail.com
dea9df6d61e8ff8aec50fd395427af2ef14cbe01
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
/call_life_beneath_different_year.py
8b63938cea369307ce22c5d3c863f32c9f6781a9
[]
no_license
JingkaiTang/github-play
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
51b550425a91a97480714fe9bc63cb5112f6f729
refs/heads/master
2021-01-20T20:18:21.249162
2016-08-19T07:20:12
2016-08-19T07:20:12
60,834,519
0
0
null
null
null
null
UTF-8
Python
false
false
190
py
#! /usr/bin/env python def think_place(str_arg): hand(str_arg) print('leave_hand') def hand(str_arg): print(str_arg) if __name__ == '__main__': think_place('last_group')
[ "jingkaitang@gmail.com" ]
jingkaitang@gmail.com
ae9205eb82e6c82b7a04567ddef23cb9d7392aa9
15c7a09c290ca405885ba532e54ac6054632d0ab
/src/eval.py
aabbd27a15e3e57ff3075a975f09d91addc1466d
[ "Apache-2.0" ]
permissive
DianaTaukin/DSD-SATN
8304bb2794533dcb30fbf687b4d6c9dae04d461d
5a4ab5e3cfcb00e72ca27cf5ec10a8d8e29ef312
refs/heads/master
2020-08-12T19:50:55.644682
2019-10-14T21:27:24
2019-10-14T21:27:24
214,833,038
0
0
Apache-2.0
2019-10-13T14:17:43
2019-10-13T14:17:43
null
UTF-8
Python
false
false
5,298
py
from base import * def val_result(self,epoch,evaluation = False,data_loader=None): if data_loader is None: data_loader = self.loader_val if self.video and not self.eval_pw3d: self.generator.eval() print('evaluation ...') MPJPE = AverageMeter() PA_MPJPE = AverageMeter() integral_kp2d_error = AverageMeter() integral_kp2d_pckh = AverageMeter() PA_MPJPE_results, MPJPE_results, imgpaths = [], [], [] entire_length = len(data_loader) vnum = self.val_batch_size if self.visual_all else 6 with torch.no_grad(): for test_iter,data_3d in enumerate(data_loader): outputs,kps,details = self.net_forward(data_3d,self.generator,video=self.video) kps_gt = data_3d['kp_2d'].cuda().reshape(data_3d['kp_2d'].shape[0],14,2) vis = (kps_gt!=-2.).float() if not self.eval_pw3d: integral_kp2d_error.update((((kps_gt-kps.reshape(kps_gt.shape[0],14,-1))**2)*vis).sum(-1).mean(0).detach().cpu().numpy()) integral_kp2d_pckh.update(compute_pckh_lsp(kps_gt.cpu().numpy(),kps.reshape(kps_gt.shape[0],14,-1).detach().cpu().numpy(),vis.cpu())) if test_iter%self.val_batch_size==0 and not evaluation: name = self.result_img_dir+'/{}_{}_val_{}'.format(self.tab,epoch,test_iter) self.visualizer.visulize_result(outputs,kps,data_3d,name,vnum = vnum) print('PCKh: {:.3f}'.format(integral_kp2d_pckh.avg)) ((cam,pose,shape), predict_verts, predict_j2d, predict_j3d, predict_Rs,verts_camed,j3d_camed) = outputs if self.save_smpl_params or self.save_obj: img_paths = data_3d['imgpath'] verts = predict_verts.cpu().numpy().astype(np.float16) params = torch.cat([cam,pose,shape],dim=-1).detach().cpu().numpy() for idx, img_path in enumerate(img_paths): save_name = os.path.join(self.result_img_dir,os.path.basename(img_path)) if self.save_smpl_params: np.save(save_name+'.npy',params[idx]) if self.save_obj: self.smpl.save_obj(verts[idx],save_name+'.obj') if self.eval_pw3d: h36m_idx = np.arange(len(data_3d['imgpath'])) else: h36m_idx = np.where(np.array(data_3d['data_set'])=='h36m')[0] if len(h36m_idx)<2: continue predict_j3d = predict_j3d[h36m_idx] real_3d = data_3d['kp_3d'][h36m_idx] smpl_trans = data_3d['param'][h36m_idx, :3].cuda() global_rotation = data_3d['global_rotation'][h36m_idx].cuda() imgpaths.append(np.array(data_3d['imgpath'])[h36m_idx]) kp3d_mono = data_3d['kp3d_mono'][h36m_idx].reshape(-1,self.kp3d_num,3).cuda() predicts = j3d_camed[h36m_idx].reshape(-1,self.kp3d_num,3)#.cpu().numpy() if self.eval_pw3d: predicts_aligned = align_by_pelvis(predicts) kp3d_mono = align_by_pelvis(kp3d_mono) else: predicts_aligned = align_by_pelvis(predicts) mpjpe_each = torch.sqrt(((predicts_aligned - kp3d_mono)**2).sum(-1)).mean(-1)*1000 MPJPE_results.append(mpjpe_each.cpu()) mpjpe_error = mpjpe_each.mean() MPJPE.update(mpjpe_error) per_verts_error = p_mpjpe(predict_j3d.detach().cpu().numpy(),real_3d.numpy().reshape(-1,self.kp3d_num,3),each_separate=True).mean(-1)*1000 PA_MPJPE_results.append(per_verts_error) PA_MPJPE.update(np.mean(per_verts_error)) if test_iter>3*self.val_batch_size and not evaluation: break if test_iter%self.val_batch_size==0 and evaluation: print('evaluation {}/{}: {:.3f}, {:.3f}'.format(test_iter,len(data_loader),MPJPE.avg,PA_MPJPE.avg)) PA_MPJPE_result = PA_MPJPE.avg MPJPE_result = MPJPE.avg PA_MPJPE_acts = self.h36m_evaluation_act_wise(np.concatenate(PA_MPJPE_results,axis=0),np.concatenate(np.array(imgpaths),axis=0)) MPJPE_acts = self.h36m_evaluation_act_wise(np.concatenate(MPJPE_results,axis=0),np.concatenate(np.array(imgpaths),axis=0)) print('-'*20) print('MPJPE: {:.3f}'.format(MPJPE_result)) print('PA_MPJPE: {:.3f}'.format(PA_MPJPE_result)) print('-'*20) table = PrettyTable(['Protocol']+config.h36m_action_names) table.add_row(['1']+MPJPE_acts) table.add_row(['2']+PA_MPJPE_acts) print(table) print('-'*20) print('integral_kp2d_PCKh:',integral_kp2d_pckh.avg) print('-'*20) if not self.eval_pw3d: scale_factor = 256 print('integral_kp2d_error:') table = PrettyTable(['部位']+config.j14_names+['均值']) table.add_row(['pixel error']+np.array(integral_kp2d_error.avg*scale_factor,dtype=np.float16).astype(np.str).tolist()\ +['{:.2f}'.format(integral_kp2d_error.avg.mean()*scale_factor)]) print(table) print('-'*20) if evaluation: print(self.gmodel_path) print(self.best_save_path) return MPJPE_result, PA_MPJPE_result
[ "936605403@qq.com" ]
936605403@qq.com
094ba89b95b8fedc9a4b7f76c03b16b1a734a651
2d1832f2334760188125d9593ae7fdc50ee66ea1
/Multidimensional List/08_miner_v1.py
5b307aaff6f22dcac6020a365a2942ee80397b4c
[]
no_license
IvayloValkov/Python-Advanced
49bc73abbeb5be4173a323dc415aa2d1809061a1
15ce73276ba8cb9285018d23b7e0a69b7689f5e5
refs/heads/main
2023-02-22T14:29:24.945194
2021-01-28T18:24:47
2021-01-28T18:24:47
330,350,520
0
0
null
null
null
null
UTF-8
Python
false
false
2,467
py
def field_as_input_matrix(n) -> [[str]]: return [input().split() for _ in range(n)] def starting_position(matrix, start_symbol) -> tuple: side = len(matrix) for row in range(side): for col in range(side): if matrix[row][col] == start_symbol: return row, col def all_coals_on_field(matrix, coal_symbol) -> int: total = 0 for row in range(len(matrix)): total += matrix[row].count(coal_symbol) return total def next_move(current_pos, current_command) -> tuple: moves = {"up": (-1, 0), "down": (1, 0), "left": (0, -1), "right": (0, 1)} return tuple(sum(pair) for pair in zip(current_pos, moves[current_command])) def valid_indexes(matrix, position) -> bool: r, c = position return 0 <= r < len(matrix) and 0 <= c < len(matrix) def move_symbol(matrix, current_pos, next_pos, miner_symbol) -> [[str]]: regular_position_symbol = "*" matrix[current_pos[0]][current_pos[1]] = regular_position_symbol matrix[next_pos[0]][next_position[1]] = miner_symbol return matrix REGULAR_POSITION = "*" ROUTE_END = "e" COAL = "c" MINER_SYMBOL = "s" field_size = int(input()) list_of_commands = input().split() field = field_as_input_matrix(field_size) miner_position = starting_position(field, MINER_SYMBOL) total_coals_on_field = all_coals_on_field(field, COAL) coal_counter = 0 game_over = False for command in list_of_commands: current_position = miner_position next_position = next_move(current_position, command) if not valid_indexes(field, next_position): continue if field[next_position[0]][next_position[1]] == COAL: coal_counter += 1 if coal_counter == total_coals_on_field: print(f"You collected all coals! {next_position[0], next_position[1]}") game_over = True break move_symbol(field, current_position, next_position, MINER_SYMBOL) elif field[next_position[0]][next_position[1]] == ROUTE_END: print(f"Game over! {next_position[0], next_position[1]}") game_over = True break else: move_symbol(field, current_position, next_position, MINER_SYMBOL) miner_position = next_position # print("\n".join(" ".join(row) for row in field)) if not game_over: print(f"{total_coals_on_field - coal_counter} coals left. {miner_position[0], miner_position[1]}")
[ "noreply@github.com" ]
IvayloValkov.noreply@github.com
fb6cfaa7fe768449761e0dd5008a4ff9e54cf458
ce94d5a598d9a5558d99bc86757952ac6d3dfb8a
/Django_Polling_Page_Github/poll_site_GitHub/polls/views.py
7b0c51f489b2a91979ba6643c9fee899538da1ad
[]
no_license
DeltaForce14/polling-with-django
bbcefe99851e69133c642c4108e023a5b07e3885
0e7f0061e55e6a941edc4e0d943dcc781c9be10e
refs/heads/master
2022-12-14T10:39:36.291688
2019-07-23T16:15:02
2019-07-23T16:15:02
198,462,191
0
0
null
2022-04-22T21:59:24
2019-07-23T15:50:25
Python
UTF-8
Python
false
false
2,037
py
from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404, render from django.urls import reverse from django.views import generic from django.utils import timezone from .models import Question, Choice # Create your views here. class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): # Retutn the last five published questions(not displaying the questions published in the future) return Question.objects.filter(pub_date__lte=timezone.now()).order_by('pub_date')[:5] # Question “detail” page – displays a question text, with no results but with a form to vote. class DetailView(generic.DetailView): model = Question template_name = 'polls/detail.html' def get_queryset(self): # Excludes any questions that aren't published yet. return Question.objects.filter(pub_date__lte=timezone.now()) # Question “results” page – displays results for a particular question. class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' # Vote action – handles voting for a particular choice in a particular question. def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # Redisplay the question voting form. return render(request, 'polls/detail.html', { 'question': question, 'error_message': "You didn't select a choice.", }) else: selected_choice.votes += 1 selected_choice.save() # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
[ "ditapeskova@Ditas-MacBook-Air.local" ]
ditapeskova@Ditas-MacBook-Air.local
f69736435500c47405b5828275a10e269b94bf24
09f8a3825c5109a6cec94ae34ea17d9ace66f381
/cohesity_management_sdk/models/cluster_config_proto_vault_cloud_tier_info.py
b29b932f0b1fd01320bf6519b3e2c7657bda8028
[ "Apache-2.0" ]
permissive
cohesity/management-sdk-python
103ee07b2f047da69d7b1edfae39d218295d1747
e4973dfeb836266904d0369ea845513c7acf261e
refs/heads/master
2023-08-04T06:30:37.551358
2023-07-19T12:02:12
2023-07-19T12:02:12
134,367,879
24
20
Apache-2.0
2023-08-31T04:37:28
2018-05-22T06:04:19
Python
UTF-8
Python
false
false
3,380
py
# -*- coding: utf-8 -*- # Copyright 2023 Cohesity Inc. import cohesity_management_sdk.models.cluster_config_proto_vault_cloud_tier_type class ClusterConfigProto_Vault_CloudTierInfo(object): """Implementation of the 'ClusterConfigProto_Vault_CloudTierInfo' model. TODO: type description here. Attributes: honor_tier_info (bool): Flag that determines whether this tiering info is to be honored or not. By default this tiering setting will be ignored. This must be explicitly set to true in order to honor this setting. num_secs_to_move_after (long|int): Represents the number of seconds since the snapshot first got archived (to default tier) after which it needs to be moved to the target tier. For example, if user selects target as an AWS vault (default: S3 tier) with 3 months retention, move to glacier after 1 month, and move to deep glacier after 2 months, then the below field should be set to appropriate number of seconds corresponding to 1 or 2 months by iris. The snapshot will reside in S3 (default tier) for 1 month, then 1 month in glacier tier, and then another 1 month in deep glacier before being deleted. target_tier_type (ClusterConfigProto_Vault_CloudTierType): Represents the target tier to which a archive snapshot needs to be moved/migrated. Currently we only allow down-tiering (i.e. moving snapshots to a colder tier compared to current tier). """ # Create a mapping from Model property names to API property names _names = { "honor_tier_info":'honorTierInfo', "num_secs_to_move_after":'numSecsToMoveAfter', "target_tier_type":'targetTierType', } def __init__(self, honor_tier_info=None, num_secs_to_move_after=None, target_tier_type=None, ): """Constructor for the ClusterConfigProto_Vault_CloudTierInfo class""" # Initialize members of the class self.honor_tier_info = honor_tier_info self.num_secs_to_move_after = num_secs_to_move_after self.target_tier_type = target_tier_type @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary honor_tier_info = dictionary.get('honorTierInfo') num_secs_to_move_after = dictionary.get('numSecsToMoveAfter') target_tier_type = cohesity_management_sdk.models.cluster_config_proto_vault_cloud_tier_type.ClusterConfigProto_Vault_CloudTierType.from_dictionary(dictionary.get('targetTierType')) if dictionary.get('targetTierType') else None # Return an object of this model return cls( honor_tier_info, num_secs_to_move_after, target_tier_type )
[ "naveena.maplelabs@cohesity.com" ]
naveena.maplelabs@cohesity.com
34e017a7c8f5ffa5bc2b23842213868fa9995371
d0aecacc74ec19abe7ac05d92a34d3762565e7f6
/src/model/post.py
3cf036998c9f83bd24dac8f28c49992ba9d8339a
[]
no_license
donaldng/bumpbot
a7cea4c911803c1f2af88178b93546461fd88887
3f37a231332203d2ae8800ece0c751f4679b9ca2
refs/heads/master
2022-12-10T00:10:35.750507
2018-04-18T23:07:09
2018-04-18T23:07:09
125,709,192
0
0
null
2022-12-08T00:56:29
2018-03-18T09:21:29
Python
UTF-8
Python
false
false
4,114
py
from requests_html import HTMLSession from environment.config import * from util.misc import log from splinter import Browser import sqlite3, records import time class Post: def __init__(self, user=None): self.db = records.Database('sqlite:///{}'.format(dbname)) self.topic_url = "https://forum.lowyat.net/topic/" self.requestor = user self.limit = 1 def add(self, url): post_id = self.process_id(url) log("post_id is {}".format(post_id)) post_title, post_owner = self.getPostInfo(post_id) is_owner = post_owner.lower() == self.requestor.lower() if post_id and is_owner: status = "Verified success, will bump {}{} for {}!".format(self.topic_url, post_id, self.requestor) try: self.db.query("INSERT OR IGNORE INTO post (post_id, title, url, count, status, user_id, created_at, updated_at) VALUES ({pid}, '{title}', '{base}{pid}', 0, 1, (SELECT user_id FROM user WHERE LOWER(username)='{uname}'), {t}, {t})".format(pid=post_id, base=self.topic_url, uname=self.requestor.lower(), t=int(time.time()), title=post_title)) self.db.query("UPDATE post SET deleted=0, updated_at={t} WHERE post_id={pid} AND user_id=(SELECT user_id FROM user WHERE LOWER(username)='{uname}')".format(pid=post_id, uname=self.requestor.lower(), t=int(time.time()))) except: status = "Error inserting post" log(status) pass elif not is_owner: status = "post owner is {}, not you, {}".format(post_owner, self.requestor) log(status) else: status = "unable to process url {}".format(url) log(status) return status def delete(self, post_id): log("delete post_id {}".format(post_id)) post_owner = self.db.query("SELECT u.username FROM post p, user u WHERE u.user_id=p.user_id AND post_id=:post_id", False, post_id=post_id)[0].username log("post_owner is {}".format(post_owner)) if post_owner.lower() == self.requestor.lower(): self.db.query("UPDATE post SET deleted=1, status=0 WHERE post_id=:post_id", False, post_id=post_id) log("deleted") def updateStatus(self, post_id, status): log("stop post_id {}".format(post_id)) post_owner = self.db.query("SELECT u.username FROM post p, user u WHERE u.user_id=p.user_id AND post_id=:post_id", False, post_id=post_id)[0].username if post_owner.lower() == self.requestor.lower(): self.db.query("UPDATE post SET status=:status WHERE post_id=:post_id", False, status=status, post_id=post_id) def process_id(self, url): post_id = 0 if "lowyat.net/topic" in url: post_id = url.split(".lowyat.net/topic/")[1].split("/")[0] # https://forum.lowyat.net/index.php?showtopic=873687&hl= if not post_id: post_id = url.split("showtopic=")[1].split("&")[0] return post_id def get(self, user_id=0, username=None): condition = [] where_statement = "" if user_id: condition.append("user_id='{}'".format(user_id)) if username: condition.append("username='{}'".format(username)) if len(condition) > 0: where_statement = " AND {}".format(" AND ".join(condition)) return self.db.query('SELECT * FROM post WHERE deleted=0 {};'.format(where_statement)) def getPostInfo(self, post_id): log("get post info of post_id {}".format(post_id)) session = HTMLSession() url = "{}{}".format(self.topic_url, post_id) r = session.get(url) body = r.html topic_owner = body.find("span.normalname", first=True).text title = body.find("title", first=True).text log("topic owner={};title={}".format(topic_owner, title)) return title, topic_owner if __name__ == "__main__": post = Post("ACHARR") status = post.add("https://forum.lowyat.net/topic/4503006/+20")
[ "donaldyann@gmail.com" ]
donaldyann@gmail.com
6c84d74e6e8dd91c7f2d2a598a8a90dde07190c7
6278f9dce690bc0304cea0b872c232923302bab7
/scripts/simulator_node.py
8da44d47ada411704bfa6b31ef1eb7be96fcac29
[]
no_license
jaredsfrank/atb_ros
c144d4a200beff423deca0280563ce58eb0a5c39
bd404f9591c824a3f9d1f8d4688c592895d10ea0
refs/heads/master
2020-05-21T10:39:40.323097
2017-03-11T03:00:21
2017-03-11T03:00:21
84,618,782
0
0
null
null
null
null
UTF-8
Python
false
false
1,272
py
#!/usr/bin/env python import rospy from std_msgs.msg import String from geometry_msgs.msg import Pose2D import time import random import matplotlib import matplotlib.pyplot as plt import simulator import nav import numpy as np from matplotlib import collections as mc from std_msgs.msg import Int32MultiArray def update_graph(data): bike_sim.update_bike() dir_to_turn = int(float(data.data)) if dir_to_turn == 0: bike_sim.move_straight() else: bike_sim.rotate(dir_to_turn, bike_sim.bike.turning_r) def path_parse(data): d = np.array(data.data).reshape(len(data.data)/4, 2, 2) map_model.paths = d def listener(): pub = rospy.Publisher('bike_pos', Pose2D, queue_size=10) rospy.init_node('simulator', anonymous=True) rospy.Subscriber("dir_to_turn", String, update_graph) rospy.Subscriber("paths", Int32MultiArray, path_parse) rate = rospy.Rate(100) rospy.loginfo(rospy.is_shutdown()) while not rospy.is_shutdown(): pub.publish(bike_sim.x, bike_sim.y, bike_sim.theta) rate.sleep() if __name__ == '__main__': new_bike = nav.Bike((1,8), np.radians(0), .02) map_model = nav.Map_Model(new_bike, [[],[]], []) bike_sim = simulator.bike_sim(map_model.bike) listener()
[ "jaredsfrank@gmail.com" ]
jaredsfrank@gmail.com
c539bf7bdc584af315154cc21957adbad9ea738a
1a9ccaa3e9edeadcae31ff81ee20ee22327b5c10
/sayun/dfs_bfs/check.py
43fea031c5e2ebe8121f7105ac7adf4cbc6de3c2
[]
no_license
42somoim/42somoim3
e939a11f39ac5bc00f73df0bda4f5aa43bb99360
7b4b123e50363861475a9888ec330e4acc275fea
refs/heads/main
2023-02-22T05:29:53.472744
2021-01-26T10:59:02
2021-01-26T10:59:02
300,508,013
0
1
null
2020-11-17T11:20:54
2020-10-02T05:15:00
C++
UTF-8
Python
false
false
395
py
N, M, V = map(int, input().split()) matrix = [[0]for i in range(N + 1)] for i in range(M): a, b = map(int,input().split()) matrix[a].append(b) matrix[b].append(a) visit_list = [0] * (N + 1) s_result = [] def dfs(V, s_result): visit_list[V] = 1 s_result.append(V) for i in range(1, N + 1): if (i in matrix[V] and visit_list[i] == 0): dfs(i, s_result) dfs(V, s_result) print(s_result)
[ "noreply@github.com" ]
42somoim.noreply@github.com
3338afd711d10bf178ef77bbfd41ec873d687373
24e7c44605140e3033bc0be18853bb9f59d8027a
/wsgi.py
9f46dc64667bc6c5180f358db44a750815da5155
[ "MIT" ]
permissive
ytaro-office/modest
82d4ef5cd2bd65d07928c047868f60be64933386
8efaa1b1afe8a00afa56142ea436b5278a098d0e
refs/heads/master
2021-08-30T04:03:28.311987
2017-12-16T00:04:39
2017-12-16T00:04:39
114,419,567
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
""" WSGI config for modest project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "modest.settings") application = get_wsgi_application()
[ "ryusuke.maeda1107@gmail.com" ]
ryusuke.maeda1107@gmail.com
042c57f7e5757202d59f7988f3c44d5ae2bd9a79
630fe47bb5aa5e49b45ab101d87c2dd2c53d180f
/venv/Lib/site-packages/com/vmware/nsx/node/services/syslog_client.py
f822d5ad5df9a3d50c24353423651dbc003b7846
[]
no_license
shrivastava-himanshu/Leetcode_practice
467497a58d82ff3ae2569d5e610dc6f27a1f31d6
4c59799947c2b17bfd22ca2a08707ef85e84a913
refs/heads/main
2023-06-12T13:14:45.381839
2021-07-05T04:09:05
2021-07-05T04:09:05
367,546,005
0
0
null
null
null
null
UTF-8
Python
false
false
23,001
py
# -*- coding: utf-8 -*- #--------------------------------------------------------------------------- # Copyright 2021 VMware, Inc. All rights reserved. # AUTO GENERATED FILE -- DO NOT MODIFY! # # vAPI stub file for package com.vmware.nsx.node.services.syslog. #--------------------------------------------------------------------------- """ """ __author__ = 'VMware, Inc.' __docformat__ = 'restructuredtext en' import sys from vmware.vapi.bindings import type from vmware.vapi.bindings.converter import TypeConverter from vmware.vapi.bindings.enum import Enum from vmware.vapi.bindings.error import VapiError from vmware.vapi.bindings.struct import VapiStruct from vmware.vapi.bindings.stub import ( ApiInterfaceStub, StubFactoryBase, VapiInterface) from vmware.vapi.bindings.common import raise_core_exception from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator) from vmware.vapi.exception import CoreException from vmware.vapi.lib.constants import TaskType from vmware.vapi.lib.rest import OperationRestMetadata class Exporters(VapiInterface): """ """ _VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.syslog.exporters' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _ExportersStub) self._VAPI_OPERATION_IDS = {} def create(self, node_syslog_exporter_properties, ): """ Adds a rule for exporting syslog information to a specified server. The required parameters are the rule name (exporter_name); severity level (emerg, alert, crit, and so on); transmission protocol (TCP or UDP); and server IP address or hostname. The optional parameters are the syslog port number, which can be 1 through 65,535 (514, by default); facility level to use when logging messages to syslog (kern, user, mail, and so on); and message IDs (msgids), which identify the types of messages to export. :type node_syslog_exporter_properties: :class:`com.vmware.nsx.model_client.NodeSyslogExporterProperties` :param node_syslog_exporter_properties: (required) :rtype: :class:`com.vmware.nsx.model_client.NodeSyslogExporterProperties` :return: com.vmware.nsx.model.NodeSyslogExporterProperties :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange` Conflict :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('create', { 'node_syslog_exporter_properties': node_syslog_exporter_properties, }) def delete(self): """ Removes all syslog exporter rules. :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('delete', None) def delete_0(self, exporter_name, ): """ Removes a specified rule from the collection of syslog exporter rules. :type exporter_name: :class:`str` :param exporter_name: Name of syslog exporter to delete (required) :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('delete_0', { 'exporter_name': exporter_name, }) def get(self, exporter_name, ): """ Returns information about a specific syslog collection point. :type exporter_name: :class:`str` :param exporter_name: Name of syslog exporter (required) :rtype: :class:`com.vmware.nsx.model_client.NodeSyslogExporterProperties` :return: com.vmware.nsx.model.NodeSyslogExporterProperties :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('get', { 'exporter_name': exporter_name, }) def list(self): """ Returns the collection of registered syslog exporter rules, if any. The rules specify the collector IP address and port, and the protocol to use. :rtype: :class:`com.vmware.nsx.model_client.NodeSyslogExporterPropertiesListResult` :return: com.vmware.nsx.model.NodeSyslogExporterPropertiesListResult :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('list', None) def verify(self): """ Collect iptables rules needed for all existing syslog exporters and verify if the existing iptables rules are the same. If not, remove the stale rules and add the new rules to make sure all exporters work properly. :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('verify', None) class Status(VapiInterface): """ """ _VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.syslog.status' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _StatusStub) self._VAPI_OPERATION_IDS = {} def get(self): """ Read syslog service status :rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties` :return: com.vmware.nsx.model.NodeServiceStatusProperties :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('get', None) class _ExportersStub(ApiInterfaceStub): def __init__(self, config): # properties for create operation create_input_type = type.StructType('operation-input', { 'node_syslog_exporter_properties': type.ReferenceType('com.vmware.nsx.model_client', 'NodeSyslogExporterProperties'), }) create_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.concurrent_change': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } create_input_value_validator_list = [ ] create_output_validator_list = [ ] create_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/node/services/syslog/exporters', request_body_parameter='node_syslog_exporter_properties', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for delete operation delete_input_type = type.StructType('operation-input', {}) delete_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } delete_input_value_validator_list = [ ] delete_output_validator_list = [ ] delete_rest_metadata = OperationRestMetadata( http_method='DELETE', url_template='/api/v1/node/services/syslog/exporters', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for delete_0 operation delete_0_input_type = type.StructType('operation-input', { 'exporter_name': type.StringType(), }) delete_0_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } delete_0_input_value_validator_list = [ ] delete_0_output_validator_list = [ ] delete_0_rest_metadata = OperationRestMetadata( http_method='DELETE', url_template='/api/v1/node/services/syslog/exporters/{exporter-name}', path_variables={ 'exporter_name': 'exporter-name', }, query_parameters={ }, content_type='application/json' ) # properties for get operation get_input_type = type.StructType('operation-input', { 'exporter_name': type.StringType(), }) get_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } get_input_value_validator_list = [ ] get_output_validator_list = [ ] get_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/api/v1/node/services/syslog/exporters/{exporter-name}', path_variables={ 'exporter_name': 'exporter-name', }, query_parameters={ }, content_type='application/json' ) # properties for list operation list_input_type = type.StructType('operation-input', {}) list_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } list_input_value_validator_list = [ ] list_output_validator_list = [ ] list_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/api/v1/node/services/syslog/exporters', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for verify operation verify_input_type = type.StructType('operation-input', {}) verify_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } verify_input_value_validator_list = [ ] verify_output_validator_list = [ ] verify_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/node/services/syslog/exporters?action=verify', path_variables={ }, query_parameters={ }, content_type='application/json' ) operations = { 'create': { 'input_type': create_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeSyslogExporterProperties'), 'errors': create_error_dict, 'input_value_validator_list': create_input_value_validator_list, 'output_validator_list': create_output_validator_list, 'task_type': TaskType.NONE, }, 'delete': { 'input_type': delete_input_type, 'output_type': type.VoidType(), 'errors': delete_error_dict, 'input_value_validator_list': delete_input_value_validator_list, 'output_validator_list': delete_output_validator_list, 'task_type': TaskType.NONE, }, 'delete_0': { 'input_type': delete_0_input_type, 'output_type': type.VoidType(), 'errors': delete_0_error_dict, 'input_value_validator_list': delete_0_input_value_validator_list, 'output_validator_list': delete_0_output_validator_list, 'task_type': TaskType.NONE, }, 'get': { 'input_type': get_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeSyslogExporterProperties'), 'errors': get_error_dict, 'input_value_validator_list': get_input_value_validator_list, 'output_validator_list': get_output_validator_list, 'task_type': TaskType.NONE, }, 'list': { 'input_type': list_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeSyslogExporterPropertiesListResult'), 'errors': list_error_dict, 'input_value_validator_list': list_input_value_validator_list, 'output_validator_list': list_output_validator_list, 'task_type': TaskType.NONE, }, 'verify': { 'input_type': verify_input_type, 'output_type': type.VoidType(), 'errors': verify_error_dict, 'input_value_validator_list': verify_input_value_validator_list, 'output_validator_list': verify_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'create': create_rest_metadata, 'delete': delete_rest_metadata, 'delete_0': delete_0_rest_metadata, 'get': get_rest_metadata, 'list': list_rest_metadata, 'verify': verify_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.node.services.syslog.exporters', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class _StatusStub(ApiInterfaceStub): def __init__(self, config): # properties for get operation get_input_type = type.StructType('operation-input', {}) get_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } get_input_value_validator_list = [ ] get_output_validator_list = [ ] get_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/api/v1/node/services/syslog/status', path_variables={ }, query_parameters={ }, content_type='application/json' ) operations = { 'get': { 'input_type': get_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeServiceStatusProperties'), 'errors': get_error_dict, 'input_value_validator_list': get_input_value_validator_list, 'output_validator_list': get_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'get': get_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.node.services.syslog.status', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class StubFactory(StubFactoryBase): _attrs = { 'Exporters': Exporters, 'Status': Status, }
[ "Himanshu.Shrivastava@vce.com" ]
Himanshu.Shrivastava@vce.com
b1944869e6423f695c47e849bfb352d5771a5139
c13aba57d849d98e25c509c9db021e14b4a63cc7
/Easy_NN_sample.py
db22217e27c7ab2b25d09a50abe10e75a9f77df6
[]
no_license
cool425589/tensorflow_example
a7a0bd79609dc00852a04a66ab492737b51e0c40
0e8d31674b3203f95dc69068767d591c8f53e94b
refs/heads/master
2021-09-06T11:54:14.905140
2018-02-06T08:13:04
2018-02-06T08:13:04
105,788,483
0
0
null
null
null
null
UTF-8
Python
false
false
2,148
py
import tensorflow as tf import numpy as np #training data train_1 = np.array( [[1.,2.,3.], [3.,4.,5.], [8.,5.,7.], [7.,1.,8.]] ) train_2 = np.array( [[1.], [0.], [0.], [1.]] ) """ 3 x [1,3] input , 3 x [1] real answer hidden layer_1 have 3 input and 2 output hidden layer_2 have 2 input and 1 output Activation function : sigmoid loss function : Mean squared error Optimizer : Gradient Descent """ input_1 = tf.placeholder(tf.float32, shape = [None, 3]) input_2 = tf.placeholder(tf.float32, shape = [None, 1]) weight_1 = tf.get_variable(name='weight_1', shape = [3,2], dtype = tf.float32, initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1) ) bias_1 = tf.get_variable(name='bias_1', shape = [2], dtype = tf.float32, initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1) ) layer_1_output = tf.add(tf.matmul( input_1, weight_1 ), bias_1) weight_2 = tf.get_variable(name='weight_2', shape = [2,1], dtype = tf.float32, initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1) ) bias_2 = tf.get_variable(name='bias_2', shape = [1], dtype = tf.float32, initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1) ) layer_2_output = tf.sigmoid( tf.add(tf.matmul( layer_1_output, weight_2 ), bias_2) ) #Mean squared error #train_2 is our desired output and layer_2_output is output that our network calculated #Use GradientDescent Minimize the loss to approach our desired output loss = tf.losses.mean_squared_error(train_2, layer_2_output) #our goal minimize squared error optimizer = tf.train.GradientDescentOptimizer(0.1) train = optimizer.minimize(loss) with tf.Session() as sess: #initial init = tf.global_variables_initializer() sess.run( init ) #train for step in range(201) : if step % 20 == 0: print ('loss : ', sess.run(loss, feed_dict = {input_1: train_1, input_2: train_2})) print ('predict : ', sess.run(layer_2_output, feed_dict = {input_1: train_1})) sess.run(train, feed_dict = {input_1: train_1, input_2: train_2})
[ "noreply@github.com" ]
cool425589.noreply@github.com
d053a67afacf36a8c8d5424e30e4c754c3cf8e08
b7077b6a139e8369bb6e0c99d8222dcdf78c3513
/week9/1-Money-In-The-Bank/sql_manager.py
cbb78d6090b94e5c39cbcd7ade67b80c622efbfd
[]
no_license
siyana-plachkova/programming101
4276af6c271516cec4c357e7bed303276cb45db5
867c6f1f2131460113b1c1f88e53034283ad5abb
refs/heads/master
2020-04-15T02:47:14.865659
2015-05-25T20:04:24
2015-05-25T20:04:24
32,450,967
0
0
null
null
null
null
UTF-8
Python
false
false
3,021
py
import sqlite3 from client import Client class Database: def __init__(self): self._conn = sqlite3.connect("bank.db") self._cursor = self._conn.cursor() @property def conn(self): return self._conn @property def cursor(self): return self._cursor def create_clients_table(self): create_query = '''CREATE TABLE IF NOT EXISTS clients(id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT, email TEXT, password TEXT, balance REAL DEFAULT 0, message TEXT, reset_password_hash TEXT)''' self._cursor.execute(create_query) def change_message(self, new_message, logged_user): update_sql = "UPDATE clients SET message = ? WHERE id = ?" self._cursor.execute(update_sql, (new_message, logged_user.get_id())) self._conn.commit() logged_user.set_message(new_message) def change_pass(self, new_pass, logged_user): update_sql = "UPDATE clients SET password = ? WHERE id = ?" self._cursor.execute(update_sql, (new_pass, logged_user.get_id())) self._conn.commit() def register(self, username, email, password): insert_sql = "INSERT INTO clients (username, email, password) VALUES (?, ?, ?)" self._cursor.execute(insert_sql, (username, email, password)) self._conn.commit() def login(self, username, password): select_query = "SELECT id, username, balance, message, email FROM clients WHERE username = ? AND password = ? LIMIT 1" self._cursor.execute(select_query, (username, password)) user = self._cursor.fetchone() if user: return Client(user[0], user[1], user[2], user[3], user[4]) else: return False def get_user_email(self, username): get_email = "SELECT email FROM clients WHERE username = ? LIMIT 1" self._cursor.execute(get_email, (username, )) user = self._cursor.fetchone() if user: return user[0] return False def update_reset_pass(self, reset_pass_hash, username): update_reset_pass_hash = "UPDATE clients SET reset_password_hash = ? WHERE username = ?" self._cursor.execute(update_reset_pass_hash, (reset_pass_hash, username)) self._conn.commit() def get_reset_pass_hash(self, username): get_reset_pass = "SELECT reset_password_hash FROM clients WHERE username = ? LIMIT 1" self._cursor.execute(get_reset_pass, (username, )) user = self._cursor.fetchone() if user: return user[0] return False def clear_reset_hash(self, username): clear_reset_hash = "UPDATE clients SET reset_password_hash = ? WHERE username = ?" self._cursor.execute(clear_reset_hash, (None, username)) self._conn.commit() def close(self): self._cursor.close() self._conn.close()
[ "siyanaplachkova@gmail.com" ]
siyanaplachkova@gmail.com
4d2f4158c0cba41bc73889d655da2690c7826d74
3d19e1a316de4d6d96471c64332fff7acfaf1308
/Users/F/flukebox/bscgraduate.py
88a566fdb06b75cfda7c693b694d6464e67172ca
[]
no_license
BerilBBJ/scraperwiki-scraper-vault
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
65ea6a943cc348a9caf3782b900b36446f7e137d
refs/heads/master
2021-12-02T23:55:58.481210
2013-09-30T17:02:59
2013-09-30T17:02:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,102
py
############################################################################### # START HERE: Tutorial for scraping ASP.NET pages (HTML pages that end .aspx), using the # very powerful Mechanize library. In general, when you follow a 'next' link on # .aspx pages, you're actually submitting a form. # This tutorial demonstrates scraping a particularly tricky example. ############################################################################### import scraperwiki import mechanize from BeautifulSoup import BeautifulSoup def bootstrap(soup): br.open(base_url + 'default.aspx') br.open(base_url + 'defaulthome.aspx') current = base_url + 'allotedseat/stream.aspx' br.open(base_url + 'allotedseat/stream.aspx') soup = BeautifulSoup(br.response().read()) print soup alink = "http://seeuptu.nic.in/allotedseat/strconn_OPCR.aspx?streamcd=9&__EVENTARGUMENT=&__EVENTTARGET=STRMNM&__EVENTVALIDATION=%2FwEWDAKd0MScAgLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BA1Rj7Q3On1yIicBeWq3K%2Bdv9%2Fx6n&__PREVIOUSPAGE=_A0fAcgbfcl7jZI6MTsl0RhBaMQr9o92tyUw3tPTTk8hKPOJUV8RrKtqY9-_OYa1gsmdZxPLORNdVEiCkEA_Kztd6GVW4KRZuVaH8aoE_w8dfH690&__VIEWSTATE=%2FwEPDwUJNjE0NTgzMzgxZGR3h8JfMHDP635QJ3PCDYpgC6GJeQ%3D%3D" br.open(alink) soup = BeautifulSoup(br.response().read()) print soup options = soup.find("select", {"id":"DDL_Institute"}).findAll("option") for option in options: print option["value"] br.select_form(name='form1') br.form.set_all_readonly(False) br['DDL_Institute'] = [option["value"]] institute = option.text institute_code = option["value"] br.submit() soup = BeautifulSoup(br.response().read()) rows = soup.find("table", {"id":"Table1"}).findAll("tr"); cols = ["","","","","",""] for row in rows[1:]: index =0 for col in row.findAll("td"): if col.text: cols[index]=col.text index=index+1 print "%s,%s,%s,%s,%s,%s,%s" %(institute_code,institute,cols[0],cols[1],cols[2],cols[3],cols[4]) record = { "InstituteCode":institute_code, "InstituteName":institute, "SerialNumber":cols[0], "BranchCourse":cols[1], "AlottedUCAT":cols[2], "OPRank":cols[3], "CLRank":cols[4] } scraperwiki.datastore.save(["InstituteCode","BranchCourse","AlottedUCAT"],record) # --------------------------------------------------------------------------- # START HERE: setting up Mechanize # We need to set the user-agent header so the page thinks we're a browser, # as otherwise it won't show all the fields we need # --------------------------------------------------------------------------- base_url = 'http://seeuptu.nic.in/' starting_url = base_url + 'index.aspx' br = mechanize.Browser() # Set the user-agent as Mozilla - if the page knows we're Mechanize, it won't return all fields br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')] br.open(starting_url) soup = BeautifulSoup(br.response().read()) # Have a look at 'soup': note the 'onSubmit' JavaScript function that is called when # you click on the 'next' link. We'll mimic this in the function above. print soup # start scraping bootstrap(soup) ############################################################################### # START HERE: Tutorial for scraping ASP.NET pages (HTML pages that end .aspx), using the # very powerful Mechanize library. In general, when you follow a 'next' link on # .aspx pages, you're actually submitting a form. # This tutorial demonstrates scraping a particularly tricky example. ############################################################################### import scraperwiki import mechanize from BeautifulSoup import BeautifulSoup def bootstrap(soup): br.open(base_url + 'default.aspx') br.open(base_url + 'defaulthome.aspx') current = base_url + 'allotedseat/stream.aspx' br.open(base_url + 'allotedseat/stream.aspx') soup = BeautifulSoup(br.response().read()) print soup alink = "http://seeuptu.nic.in/allotedseat/strconn_OPCR.aspx?streamcd=9&__EVENTARGUMENT=&__EVENTTARGET=STRMNM&__EVENTVALIDATION=%2FwEWDAKd0MScAgLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BAwLNiZW%2BA1Rj7Q3On1yIicBeWq3K%2Bdv9%2Fx6n&__PREVIOUSPAGE=_A0fAcgbfcl7jZI6MTsl0RhBaMQr9o92tyUw3tPTTk8hKPOJUV8RrKtqY9-_OYa1gsmdZxPLORNdVEiCkEA_Kztd6GVW4KRZuVaH8aoE_w8dfH690&__VIEWSTATE=%2FwEPDwUJNjE0NTgzMzgxZGR3h8JfMHDP635QJ3PCDYpgC6GJeQ%3D%3D" br.open(alink) soup = BeautifulSoup(br.response().read()) print soup options = soup.find("select", {"id":"DDL_Institute"}).findAll("option") for option in options: print option["value"] br.select_form(name='form1') br.form.set_all_readonly(False) br['DDL_Institute'] = [option["value"]] institute = option.text institute_code = option["value"] br.submit() soup = BeautifulSoup(br.response().read()) rows = soup.find("table", {"id":"Table1"}).findAll("tr"); cols = ["","","","","",""] for row in rows[1:]: index =0 for col in row.findAll("td"): if col.text: cols[index]=col.text index=index+1 print "%s,%s,%s,%s,%s,%s,%s" %(institute_code,institute,cols[0],cols[1],cols[2],cols[3],cols[4]) record = { "InstituteCode":institute_code, "InstituteName":institute, "SerialNumber":cols[0], "BranchCourse":cols[1], "AlottedUCAT":cols[2], "OPRank":cols[3], "CLRank":cols[4] } scraperwiki.datastore.save(["InstituteCode","BranchCourse","AlottedUCAT"],record) # --------------------------------------------------------------------------- # START HERE: setting up Mechanize # We need to set the user-agent header so the page thinks we're a browser, # as otherwise it won't show all the fields we need # --------------------------------------------------------------------------- base_url = 'http://seeuptu.nic.in/' starting_url = base_url + 'index.aspx' br = mechanize.Browser() # Set the user-agent as Mozilla - if the page knows we're Mechanize, it won't return all fields br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')] br.open(starting_url) soup = BeautifulSoup(br.response().read()) # Have a look at 'soup': note the 'onSubmit' JavaScript function that is called when # you click on the 'next' link. We'll mimic this in the function above. print soup # start scraping bootstrap(soup)
[ "pallih@kaninka.net" ]
pallih@kaninka.net
850867f87e7ed3b89e151c8fab97f417fe20b4c3
9edaf93c833ba90ae9a903aa3c44c407a7e55198
/travelport/models/air_reprice_search_port_type_service_input.py
cf8d7d4b04fd3fcd488875ffd4bb7190b6b63c17
[]
no_license
tefra/xsdata-samples
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
ef027fe02e6a075d8ed676c86a80e9647d944571
refs/heads/main
2023-08-14T10:31:12.152696
2023-07-25T18:01:22
2023-07-25T18:01:22
222,543,692
6
1
null
2023-06-25T07:21:04
2019-11-18T21:00:37
Python
UTF-8
Python
false
false
858
py
from __future__ import annotations from dataclasses import dataclass, field from travelport.models.air_reprice_req import AirRepriceReq __NAMESPACE__ = "http://www.travelport.com/service/air_v52_0" @dataclass class AirRepriceSearchPortTypeServiceInput: class Meta: name = "Envelope" namespace = "http://schemas.xmlsoap.org/soap/envelope/" body: None | AirRepriceSearchPortTypeServiceInput.Body = field( default=None, metadata={ "name": "Body", "type": "Element", } ) @dataclass class Body: air_reprice_req: None | AirRepriceReq = field( default=None, metadata={ "name": "AirRepriceReq", "type": "Element", "namespace": "http://www.travelport.com/schema/air_v52_0", } )
[ "chris@komposta.net" ]
chris@komposta.net
dd07bf9ee6534d7aa6e2e55ddca99569bf0667a9
d620ee5a3fdb00ade6ccc996db3b0fcbf2e4d8c8
/simfinder.py
b4a9d16e3d62d6ec3c64e5151cec4e094f5bd9e4
[]
no_license
feipeixuan/common_script
573327db1d3816d92092026c0cf5598ae0a82d7e
6bdff21837da9578b2f12596fdc9a9755dd8ff46
refs/heads/master
2020-04-26T02:14:03.456346
2019-05-15T02:09:30
2019-05-15T02:09:30
173,228,721
0
0
null
null
null
null
UTF-8
Python
false
false
3,054
py
#!/usr/bin/python # coding=utf-8 import sys import os from PIL import Image import imagehash import distance from shutil import copyfile # 相似图片发现器 class SimFinder: def __init__(self): self.groups = [] self.indexs = [] for i in range(0, 8): self.indexs.append({}) # 产生图片指纹 def getFinger(self, image): hash = imagehash.average_hash(Image.open(image), hash_size=16) return str(hash) # 对图片进行聚类 def clusterPhotos(self, baseDir, imageList): for image in imageList: # 计算指纹 finger = self.getFinger(baseDir + image) group = self.getGroup(finger, image) if group == None: self.addGroup(finger, image) # 获取高相似度图片 def getSimPhotos(self, baseDir, imageList): simPhotos = [] self.clusterPhotos(baseDir, imageList) for group in self.groups: for finger in group.keys(): if (len(group[finger]) >= 5): simPhotos.extend(group[finger]) return simPhotos # 获取单独图片 def getAlonePhotos(self, baseDir, imageList): alonePhotos = [] self.clusterPhotos(baseDir, imageList) for group in self.groups: for finger in group.keys(): if (len(group[finger]) < 5): alonePhotos.extend(group[finger]) return alonePhotos # 获取组 def getGroup(self, finger, image): for i in range(0, 8): subFinger = finger[i * 8:(i + 1) * 8] if subFinger not in self.indexs[i]: continue for groupIndex in self.indexs[i][subFinger]: fingerTmp = self.groups[groupIndex].keys()[0] if self.computeDiff(fingerTmp, finger) <= 10: self.groups[groupIndex][fingerTmp].append(image) return self.groups[groupIndex] return None # 添加组 def addGroup(self, finger, image): # 添加group groupIndex = len(self.groups) self.groups.append({finger: [image]}) # 添加索引 for i in range(0, 8): subFinger = finger[i * 8:(i + 1) * 8] if subFinger not in self.indexs[i]: self.indexs[i][subFinger] = [] self.indexs[i][subFinger].append(groupIndex) # 计算两个指纹的距离 def computeDiff(self, hash1, hash2): return distance.hamming(str(hash1), str(hash2)) def main(): inputDir = sys.argv[1] outputDir = sys.argv[2] strategy = sys.argv[3] simfinder = SimFinder() photoList = os.listdir(inputDir) if strategy == "sim": photoList = simfinder.getSimPhotos(inputDir,photoList) elif strategy == "alone": photoList = simfinder.getAlonePhotos(inputDir, photoList) for photoUrl in photoList: copyfile(inputDir + "/" + photoUrl, outputDir + "/" + photoUrl) if __name__ == '__main__': main()
[ "feipeixuan@163.com" ]
feipeixuan@163.com
8128fdb7f3771e6e201275c2f4b661d15ba64eb6
58e6d305d70f18c89606ac4f2d112e37821030b9
/mainApp/migrations/0001_initial.py
3fe5335c0551c723de1bf7a8575bc84ba23bc982
[]
no_license
JorgeVidalCano/petShelter
b34735d41d206fc64ad006cb2dfd9abb8ea338de
ea22f3d2f5a74ad6035c4ce656cd53290da3906d
refs/heads/master
2023-02-03T09:50:19.216348
2020-12-28T12:00:21
2020-12-28T12:00:21
305,349,641
0
0
null
null
null
null
UTF-8
Python
false
false
3,388
py
# Generated by Django 3.1.2 on 2020-10-23 17:36 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Shelter', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=60, unique=True)), ('location', models.CharField(max_length=255)), ('slug', models.SlugField(blank='True', unique=True)), ('manager', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usuario', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Pet', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('name', models.CharField(max_length=60)), ('about', models.CharField(max_length=300)), ('age', models.PositiveIntegerField(default=0)), ('sex', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female'), ('Unknown', 'Unknown')], default='Unknown', max_length=10)), ('kind', models.CharField(choices=[('Cat', 'Cat'), ('Dog', 'Dog'), ('Other', 'Other')], default='Cat', max_length=5)), ('weight', models.PositiveIntegerField(default=0)), ('visits', models.PositiveIntegerField(default=0)), ('status', models.CharField(choices=[('Adoption', 'Adoption'), ('Adopted', 'Adopted'), ('Urgent', 'Urgent')], default='Adoption', max_length=10)), ('color', models.CharField(choices=[('#fbfbfa', 'White'), ('000', 'Black'), ('#800000', 'Brown'), ('#808080', 'Gray')], default='FFF', max_length=7)), ('date_created', models.DateField(default=django.utils.timezone.now)), ('slug', models.SlugField(blank=True, unique=True)), ('shelter', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='albergue', to='mainApp.shelter')), ], ), migrations.CreateModel( name='Images', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, default='', max_length=60)), ('image', models.ImageField(upload_to='pet_imagen')), ('pet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pet_imagenes', to='mainApp.pet')), ], ), migrations.CreateModel( name='Feature', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=30, unique=True)), ('description', models.CharField(max_length=60)), ('pet', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='pet_atributos', to='mainApp.pet')), ], ), ]
[ "jorge.vidal.cano@gmail.com" ]
jorge.vidal.cano@gmail.com
bfeff981fd1df505268725a68dbeecffff38eead
b22588340d7925b614a735bbbde1b351ad657ffc
/athena/Generators/MadGraphModels/python/models/Zp2HDM_UFO/particles.py
879051c3c964ef8edb6aa11708e447d0ec186aaa
[]
no_license
rushioda/PIXELVALID_athena
90befe12042c1249cbb3655dde1428bb9b9a42ce
22df23187ef85e9c3120122c8375ea0e7d8ea440
refs/heads/master
2020-12-14T22:01:15.365949
2020-01-19T03:59:35
2020-01-19T03:59:35
234,836,993
1
0
null
null
null
null
UTF-8
Python
false
false
14,534
py
# This file was automatically created by FeynRules 2.0.25 # Mathematica version: 9.0 for Mac OS X x86 (64-bit) (January 24, 2013) # Date: Wed 11 Nov 2015 22:20:11 from __future__ import division from object_library import all_particles, Particle import parameters as Param import propagators as Prop a = Particle(pdg_code = 22, name = 'a', antiname = 'a', spin = 3, color = 1, mass = Param.ZERO, width = Param.ZERO, texname = 'a', antitexname = 'a', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) Z = Particle(pdg_code = 23, name = 'Z', antiname = 'Z', spin = 3, color = 1, mass = Param.MZ, width = Param.WZ, texname = 'Z', antitexname = 'Z', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) W__plus__ = Particle(pdg_code = 24, name = 'W+', antiname = 'W-', spin = 3, color = 1, mass = Param.MW, width = Param.WW, texname = 'W+', antitexname = 'W-', charge = 1, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) W__minus__ = W__plus__.anti() g = Particle(pdg_code = 21, name = 'g', antiname = 'g', spin = 3, color = 8, mass = Param.ZERO, width = Param.ZERO, texname = 'g', antitexname = 'g', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) Zp = Particle(pdg_code = 32, name = 'Zp', antiname = 'Zp', spin = 3, color = 1, mass = Param.MZp, width = Param.WZp, texname = 'Zp', antitexname = 'Zp', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) ghA = Particle(pdg_code = 9000001, name = 'ghA', antiname = 'ghA~', spin = -1, color = 1, mass = Param.ZERO, width = Param.ZERO, texname = 'ghA', antitexname = 'ghA~', charge = 0, GhostNumber = 1, LeptonNumber = 0, QZ = 0, Y = 0) ghA__tilde__ = ghA.anti() ghZ = Particle(pdg_code = 9000002, name = 'ghZ', antiname = 'ghZ~', spin = -1, color = 1, mass = Param.MZ, width = Param.ZERO, texname = 'ghZ', antitexname = 'ghZ~', charge = 0, GhostNumber = 1, LeptonNumber = 0, QZ = 0, Y = 0) ghZ__tilde__ = ghZ.anti() ghZp = Particle(pdg_code = 9000003, name = 'ghZp', antiname = 'ghZp~', spin = -1, color = 1, mass = Param.MZp, width = Param.ZERO, texname = 'ghZp', antitexname = 'ghZp~', charge = 0, GhostNumber = 1, LeptonNumber = 0, QZ = 0, Y = 0) ghZp__tilde__ = ghZp.anti() ghWp = Particle(pdg_code = 9000004, name = 'ghWp', antiname = 'ghWp~', spin = -1, color = 1, mass = Param.MW, width = Param.ZERO, texname = 'ghWp', antitexname = 'ghWp~', charge = 1, GhostNumber = 1, LeptonNumber = 0, QZ = 0, Y = 0) ghWp__tilde__ = ghWp.anti() ghWm = Particle(pdg_code = 9000005, name = 'ghWm', antiname = 'ghWm~', spin = -1, color = 1, mass = Param.MW, width = Param.ZERO, texname = 'ghWm', antitexname = 'ghWm~', charge = -1, GhostNumber = 1, LeptonNumber = 0, QZ = 0, Y = 0) ghWm__tilde__ = ghWm.anti() ghG = Particle(pdg_code = 9000006, name = 'ghG', antiname = 'ghG~', spin = -1, color = 8, mass = Param.ZERO, width = Param.ZERO, texname = 'ghG', antitexname = 'ghG~', charge = 0, GhostNumber = 1, LeptonNumber = 0, QZ = 0, Y = 0) ghG__tilde__ = ghG.anti() ve = Particle(pdg_code = 12, name = 've', antiname = 've~', spin = 2, color = 1, mass = Param.ZERO, width = Param.ZERO, texname = 've', antitexname = 've~', charge = 0, GhostNumber = 0, LeptonNumber = 1, QZ = 0, Y = 0) ve__tilde__ = ve.anti() vm = Particle(pdg_code = 14, name = 'vm', antiname = 'vm~', spin = 2, color = 1, mass = Param.ZERO, width = Param.ZERO, texname = 'vm', antitexname = 'vm~', charge = 0, GhostNumber = 0, LeptonNumber = 1, QZ = 0, Y = 0) vm__tilde__ = vm.anti() vt = Particle(pdg_code = 16, name = 'vt', antiname = 'vt~', spin = 2, color = 1, mass = Param.ZERO, width = Param.ZERO, texname = 'vt', antitexname = 'vt~', charge = 0, GhostNumber = 0, LeptonNumber = 1, QZ = 0, Y = 0) vt__tilde__ = vt.anti() e__minus__ = Particle(pdg_code = 11, name = 'e-', antiname = 'e+', spin = 2, color = 1, mass = Param.ZERO, width = Param.ZERO, texname = 'e-', antitexname = 'e+', charge = -1, GhostNumber = 0, LeptonNumber = 1, QZ = 0, Y = 0) e__plus__ = e__minus__.anti() mu__minus__ = Particle(pdg_code = 13, name = 'mu-', antiname = 'mu+', spin = 2, color = 1, mass = Param.MM, width = Param.ZERO, texname = 'mu-', antitexname = 'mu+', charge = -1, GhostNumber = 0, LeptonNumber = 1, QZ = 0, Y = 0) mu__plus__ = mu__minus__.anti() ta__minus__ = Particle(pdg_code = 15, name = 'ta-', antiname = 'ta+', spin = 2, color = 1, mass = Param.MTA, width = Param.ZERO, texname = 'ta-', antitexname = 'ta+', charge = -1, GhostNumber = 0, LeptonNumber = 1, QZ = 0, Y = 0) ta__plus__ = ta__minus__.anti() u = Particle(pdg_code = 2, name = 'u', antiname = 'u~', spin = 2, color = 3, mass = Param.ZERO, width = Param.ZERO, texname = 'u', antitexname = 'u~', charge = 2/3, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) u__tilde__ = u.anti() c = Particle(pdg_code = 4, name = 'c', antiname = 'c~', spin = 2, color = 3, mass = Param.ZERO, width = Param.ZERO, texname = 'c', antitexname = 'c~', charge = 2/3, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) c__tilde__ = c.anti() t = Particle(pdg_code = 6, name = 't', antiname = 't~', spin = 2, color = 3, mass = Param.MT, width = Param.WT, texname = 't', antitexname = 't~', charge = 2/3, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) t__tilde__ = t.anti() d = Particle(pdg_code = 1, name = 'd', antiname = 'd~', spin = 2, color = 3, mass = Param.ZERO, width = Param.ZERO, texname = 'd', antitexname = 'd~', charge = -1/3, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) d__tilde__ = d.anti() s = Particle(pdg_code = 3, name = 's', antiname = 's~', spin = 2, color = 3, mass = Param.ZERO, width = Param.ZERO, texname = 's', antitexname = 's~', charge = -1/3, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) s__tilde__ = s.anti() b = Particle(pdg_code = 5, name = 'b', antiname = 'b~', spin = 2, color = 3, mass = Param.MB, width = Param.ZERO, texname = 'b', antitexname = 'b~', charge = -1/3, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) b__tilde__ = b.anti() h = Particle(pdg_code = 25, name = 'h', antiname = 'h', spin = 1, color = 1, mass = Param.Mh, width = Param.Wh, texname = 'h', antitexname = 'h', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) H = Particle(pdg_code = 26, name = 'H', antiname = 'H', spin = 1, color = 1, mass = Param.MH, width = Param.WH, texname = 'H', antitexname = 'H', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) H__plus__ = Particle(pdg_code = 27, name = 'H+', antiname = 'H-', spin = 1, color = 1, mass = Param.MHP, width = Param.WHP, texname = 'H+', antitexname = 'H-', charge = 1, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) H__minus__ = H__plus__.anti() A0 = Particle(pdg_code = 28, name = 'A0', antiname = 'A0', spin = 1, color = 1, mass = Param.MA0, width = Param.WA0, texname = 'A0', antitexname = 'A0', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) phip = Particle(pdg_code = 29, name = 'phip', antiname = 'phip', spin = 1, color = 1, mass = Param.Mphip, width = Param.Wphip, texname = 'phip', antitexname = 'phip', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) G0 = Particle(pdg_code = 250, name = 'G0', antiname = 'G0', spin = 1, color = 1, mass = Param.MZ, width = Param.ZERO, texname = 'G0', antitexname = 'G0', goldstone = True, charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) G__plus__ = Particle(pdg_code = 251, name = 'G+', antiname = 'G-', spin = 1, color = 1, mass = Param.MW, width = Param.ZERO, texname = 'G+', antitexname = 'G-', goldstone = True, charge = 1, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) G__minus__ = G__plus__.anti() G0p = Particle(pdg_code = 252, name = 'G0p', antiname = 'G0p', spin = 1, color = 1, mass = Param.MZp, width = Param.ZERO, texname = 'G0p', antitexname = 'G0p', goldstone = True, charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) n1 = Particle(pdg_code = 1000022, name = 'n1', antiname = 'n1~', spin = 2, color = 1, mass = Param.MX, width = Param.ZERO, texname = 'n1', antitexname = 'n1~', charge = 0, GhostNumber = 0, LeptonNumber = 0, QZ = 0, Y = 0) n1__tilde__ = n1.anti()
[ "rushioda@lxplus754.cern.ch" ]
rushioda@lxplus754.cern.ch
bcacf2a72fbc8b0401ba68d9f8d12ce63d997aff
8cde023db3f53aa3f5b9aed20fcc769bdc85dd3f
/Regression/exercise_ml_internship_altair.py
9056dc3c08e227d9890d22a383915e26c90796b4
[]
no_license
bhumikdv/Assignments
5c688c12f57033c7d2f8625942465d99d71c8879
e55e53131d3b14e666a35db18e7364e191cc3900
refs/heads/main
2023-03-28T16:29:37.178449
2021-03-28T23:34:11
2021-03-28T23:34:11
351,596,597
0
0
null
null
null
null
UTF-8
Python
false
false
13,112
py
#!/usr/bin/env python # coding: utf-8 # - """ # Here are 2 small exercises to evaluate your motivation and skills. # The first one is simply a question, and the second one is related to # applying a regression model to a dataset. The approach is at least or even # more important than the result, please detail all the steps of your research. # """ # # # 1 - Data preprocessing # In[1]: # ---------------------- # In a dataset, there is a feature named "Server/Machine Type", how would you # transform/prepare this feature so that it can be used in a regression model # (one only accepting float/bool as value), you don't have to code a solution, # just write an answer on what you would do. # Some example of values on this feature: # Intel(R) Xeon(R) Gold 6142 CPU @ 2.60GHz (x86_64), 2950 MHz, 385570 MB RAM, 12079 MB swap # Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz (x86_64), 2500 MHz, 95717 MB RAM, 149012 MB swap # Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz (x86_64), 1300 MHz, 257868 MB RAM, 12027 MB swap # Intel(R) Xeon(R) Gold 6142 CPU @ 2.60GHz (x86_64), 3138 MHz, 772642 MB RAM, 9042 MB swap # Intel(R) Xeon(R) Gold 6142 CPU @ 2.60GHz (x86_64), 2214 MHz, 385570 MB RAM, 12090 MB swap # Core(TM) i7-6700HQ CPU @ 2.60GHz (x86_64), 2600 MHz, 40078 MB RAM, 75183 MB swap # Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz (x86_64), 1199 MHz, 257868 MB RAM, 12247 MB swap # Intel(R) Xeon(R) Gold 6142 CPU @ 2.60GHz (x86_64), 3246 MHz, 514658 MB RAM, 10770 MB swap # Intel(R) Xeon(R) Gold 6142 CPU @ 2.60GHz (x86_64), 2483 MHz, 772642 MB RAM, 8266 MB swap # # Your full text (not python) answer: # # - There is no simple answer to transform this textual (non-numeric) data to float/bool value. # - One way to solve this problem is, # - We have one feature named "Server/Machine Type", we can transform this single feature into multiple features. # - As we can see the value of the feature is a configuration of the "Server/Machine Type", we can break it to multiple # features like Server/Machine Name, Processor Speed (GHz), (32 or 64 bit), Processor Number(2950, 2500, 2600.. ), # RAM(385570, 95717..), Swap Memory(12079, 149012..). # # - Most of the new columns are numeric values and doesn't need to transform again. # - For Server/Machine Name other textual data we can handle in different ways like One Hot Encoding. # # - Although we are increasing the dimension by transforming single feature into mulltiple features, We enable machine learning # algorithm to comprehend them and extract the useful information. # # 2 - Regression # 1. You are given a dataset (providing as additional file to this exercise) with 34 features and 1 target to predict, your task is to train a regression model on this dataset. # # <br> # 2. Code it in python3, provide as well a requirements.txt containing the version you use # I should be able to run directly in my linux terminal: # # - pip install -r requirements.txt && python exercise_ml_internship_altair.py # # <br> # 3. You are free to use every library you think is appropriate # # # 2.1 loading dataset import pandas as pd df = pd.read_csv("dataset_ml_exo.csv") # # 2.2 Data preparation # This dataset has already been mostly prepared (it contains only float/bool features), but you may # still have to do pre-processing (e.g. features reduction, other...). df.head() df['Unnamed: 0'].head() # drop unnamed column (Index column) df = df.drop(['Unnamed: 0'], axis=1) # display dataframe print(df) # - Index column dropped # ## 2.2.2 Checking for duplicate records # checking whether we have any duplicated rows df.duplicated().sum() # - No duplicate records. # Details of the dataset like number of columns, non=null count and the data type of each column df.info() # - feature_1 has mixed values (int, float, 'unknown'). # - Althought the column looks like it is a Date column, we will fill the unknown values with most frequently occurred value. # ## 2.2.3 Handling unknown values in feature_1 column # Filling the "unknown" values with most frequently occurred value. df['feature_1'] = pd.to_numeric(df['feature_1'], errors='coerce') df['feature_1'] = df['feature_1'].fillna(int(df.feature_1.mode())) # display dataframe print(df.head()) # Details of the dataset like number of columns and the data type of each column df.info() # ## 2.2.4 Filling the missing values with mean # Checking for null values in all the column (features & columns) df.isnull().sum() # - feature_20, feature_26, feature_27, feature_28, feature_29, feature_30 has missing values which need to be filled. # Filling the missing values with mean df = df.apply(pd.to_numeric, errors='coerce') df = df.fillna(df.mean()) # Checking for null values in all the column after filling the missing values with mean df.isnull().sum() # - No more missing values in the dataset # ## 2.2.5 Changing boolean to 0 and 1 values # Making sure whether all the columns are numberic values df.info() # - We have 2 columns (feature_12,feature_16) which are boolean and we need to convert it to numeric; 1 for True and 0 for False # Changing boolean to 0 and 1 values df['feature_12'] = df['feature_12'].astype(int) df['feature_16'] = df['feature_16'].astype(int) # Checking the dataframe again to confirm that all the columns are numberic values df.info() # - Now all the values in the dataframe are numeric # ## 2.2.6 Dropping feature_1 as the column values are not consistent and we are not performing any time series analysis df = df.drop(['feature_1'], axis=1) df.head() # ## 2.2.7 Feature Selection from sklearn import preprocessing # Feature Scaling: Transform features by scaling each feature to a (0, 1) range x = df.values #returns a numpy array min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_scaled = pd.DataFrame(x_scaled) print(df_scaled) X = df_scaled.iloc[:, :-1] y = df_scaled.iloc[:, -1] print(X.shape) print(y.shape) # Splitting test and train data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.2, random_state= 0) #fitting multiple regression model to the training set print("Shape of X_train: ",X_train.shape) print("Shape of X_test: ", X_test.shape) print("Shape of y_train: ",y_train.shape) print("Shape of y_test",y_test.shape) # ## Checking the correlation among the features import numpy as np import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(25,20)) cor = X_train.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.CMRmap_r) plt.show() # function to find highly correlated features def correlation(dataset, threshold): """ dataset: pass the dataset to find the correlation threshold: features with more than threshold value will be added to set - col_corr return: returns col_corr set contains all the features with correlation more than threshold value. Here absolute coeff value is taken into consideration. """ col_corr = set() # Set of all the names of correlated columns corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i, j]) > threshold: # we are interested in absolute coeff value colname = corr_matrix.columns[i] # getting the name of column col_corr.add(colname) return col_corr corr_features = correlation(X_train, 0.89) # gets correlated features with more than .89 value len(set(corr_features)) print(corr_features) # dropped correlated features X_train_dropped = X_train.drop(corr_features,axis=1) X_test_dropped = X_test.drop(corr_features,axis=1) # # 2.3 model training # metrics from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error # ## 2.3.1 Linear Regression # Fitting the Linear Regression Model to the dataset from sklearn.linear_model import LinearRegression regressor_lr = LinearRegression() regressor_lr.fit(X_train_dropped, y_train) #predicting the test set results y_pred_lr = regressor_lr.predict(X_test_dropped) # calculating metrics r2_score_lr =r2_score(y_test, y_pred_lr) mae_lr = mean_absolute_error(y_test, y_pred_lr) mse_lr = mean_squared_error(y_test, y_pred_lr) rmse_lr = (np.sqrt(mse_lr)) print('R2_score: ', r2_score_lr) print('Mean Absolute Error: ', mae_lr) print('Mean Squared Error: ', mse_lr) print("RMSE: ", rmse_lr) # ## 2.3.2 Support Vector Regression # Fitting the Support Vector Regression Model to the dataset from sklearn.svm import SVR regressor_svr = SVR(kernel = 'poly', gamma = 'scale') regressor_svr.fit(X_train_dropped, y_train) #predicting the test set results y_pred_svr = regressor_svr.predict(X_test_dropped) # calculating metrics r2_score_svr =r2_score(y_test, y_pred_svr) mae_svr = mean_absolute_error(y_test, y_pred_svr) mse_svr = mean_squared_error(y_test, y_pred_svr) rmse_svr = (np.sqrt(mse_svr)) print('R2_score: ', r2_score_svr) print('Mean Absolute Error: ', mae_svr) print('Mean Squared Error: ', mse_svr) print("RMSE: ", rmse_svr) # ## 2.3.3 Decision Tree # Fitting the Decision Tree Regression Model to the dataset from sklearn.tree import DecisionTreeRegressor regressor_dt = DecisionTreeRegressor(random_state = 0) regressor_dt.fit(X_train_dropped, y_train) #predicting the test set results y_pred_dt = regressor_dt.predict(X_test_dropped) # calculating metrics r2_score_dt = r2_score(y_test, y_pred_dt) mae_dt = mean_absolute_error(y_test, y_pred_dt) mse_dt = mean_squared_error(y_test, y_pred_dt) rmse_dt = (np.sqrt(mse_dt)) print('R2_score: ', r2_score_dt) print('Mean Absolute Error: ', mae_dt) print('Mean Squared Error: ', mse_dt) print("RMSE: ", rmse_dt) # ## 2.3.4 Random Forest # Fitting the Random Forest Regression Model to the dataset from sklearn.ensemble import RandomForestRegressor regressor_rf = RandomForestRegressor(n_estimators = 100, random_state = 0) regressor_rf.fit(X_train_dropped, y_train) #predicting the test set results y_pred_rf = regressor_rf.predict(X_test_dropped) # calculating metrics r2_score_rf =r2_score(y_test, y_pred_rf) mae_rf = mean_absolute_error(y_test, y_pred_rf) mse_rf = mean_squared_error(y_test, y_pred_rf) rmse_rf = (np.sqrt(mse_rf)) print('R2_score: ', r2_score_rf) print('Mean Absolute Error: ', mae_rf) print('Mean Squared Error: ', mse_rf) print("RMSE: ", rmse_rf) # # 2.4 model evaluation (evaluate model perf and display metrics) # All metric appended to the list benchmark_metrics = ['Linear Regression', 'Support Vector Regression', 'Decision Tree', 'Random Forest'] # All model RMSE values appended to the list RMSE_values = [rmse_lr, rmse_svr, rmse_dt, rmse_rf] # All model MAE values appended to the list MAE_values = [mae_lr, mae_svr, mae_dt, mae_rf] # All model MSE values appended to the list MSE_values = [mse_lr, mse_svr, mse_dt, mse_rf] # All model R2_score values appended to the list R2_score = [r2_score_lr, r2_score_svr, r2_score_dt, r2_score_rf] # ### Bar graph for models Vs RMSE values fig_dims = (10, 5) fig, ax = plt.subplots(figsize=fig_dims) ax.bar(benchmark_metrics,RMSE_values,color=['#2a9d8f', '#e9c46a', '#f4a261', '#e76f51'] ) ax.set_xlabel("Model", fontweight='bold') ax.set_ylabel("RMSE values", fontweight='bold') ax.set_title('Accuracy by model and RMSE value', fontweight='bold') plt.show() # ### Bar graph for models Vs MSE values fig_dims = (10, 5) fig, ax = plt.subplots(figsize=fig_dims) ax.bar(benchmark_metrics, MSE_values,color=['#2a9d8f', '#e9c46a', '#f4a261', '#e76f51'] ) ax.set_xlabel("Model", fontweight='bold') ax.set_ylabel("MSE values", fontweight='bold') ax.set_title('Accuracy by model and MSE value', fontweight='bold') plt.show() # ### Bar graph for models Vs MAE values fig_dims = (10, 5) fig, ax = plt.subplots(figsize=fig_dims) ax.bar(benchmark_metrics,MAE_values,color=['#2a9d8f', '#e9c46a', '#f4a261', '#e76f51'] ) ax.set_xlabel("Model", fontweight='bold') ax.set_ylabel("MAE values", fontweight='bold') ax.set_title('Accuracy by model and MAE value', fontweight='bold') # plt.xticks(rotation=90) plt.show() X = np.arange(4) barWidth = 0.2 fig = plt.figure(figsize=(10,5)) ax = fig.add_axes([0,0,1,1]) ax.bar(X + 0.00, RMSE_values, color = 'b', width = 0.2, label = 'RMSE') ax.bar(X + 0.2, MAE_values, color = 'g', width = 0.2, label = 'MAE') ax.bar(X + 0.40, MSE_values, color = 'r', width = 0.2, label = 'MSE') ax.set_title('Accuracy of model Vs RMSE, MAE, MSE value', fontweight='bold') plt.xlabel('Model', fontweight='bold') plt.xticks([r + barWidth for r in range(len(benchmark_metrics))], benchmark_metrics) plt.legend() plt.show() # ### Bar graph for models Vs R2_score values fig_dims = (10, 5) fig, ax = plt.subplots(figsize=fig_dims) ax.bar(benchmark_metrics,R2_score,color=['#2a9d8f', '#e9c46a', '#f4a261', '#e76f51'] ) ax.set_xlabel("Model", fontweight='bold') ax.set_ylabel("R2_score values", fontweight='bold') ax.set_title('Accuracy by model and R2_score value', fontweight='bold') # plt.xticks(rotation=90) plt.show() # Thanks !
[ "bhumikdv@gmail.com" ]
bhumikdv@gmail.com
a8528dc9fa8df8318566fef48cd5e410399b8155
6db6d7445b51eb6c2aae428f8481dabbd49ba5ec
/ATAL/contest2Q2.py
62d8563d8d0b9736f44e5860df969d2d6459312f
[]
no_license
ManoMax/python-studies
ab16535cab30965710331d1fa6d8e0ecb219b657
cd73b830f7e2894af4eda700c71e2992b0160f7a
refs/heads/master
2023-04-18T03:06:12.958281
2021-04-26T00:20:35
2021-04-26T00:20:35
130,414,154
1
0
null
null
null
null
UTF-8
Python
false
false
413
py
num = int(input()) cont = 0 array_x = [] array_y = [] used = [0] * num for i in range(num): ax, ay = [int(j) for j in input().split()] array_x.append(ax) array_y.append(ay) def aux(elem): used[elem] = 1 for i in range(num): if not used[i] and (array_x[i] == array_x[elem] or array_y[i] == array_y[elem]): aux(i) for i in range(num): if not used[i]: aux(i) cont = cont + 1 print(cont-1)
[ "gabrielmaxcontato@gmail.com" ]
gabrielmaxcontato@gmail.com
899eacf0f831e91334c97c6010b2d9b3db685973
79f99302533d155f1f471629fafe24e352f0fb93
/bancabcco/settings.py
bf26a76956ffe52085a72e09f77164bf761b1250
[]
no_license
hristo-grudev/bancabcco
c37030e7fd5d275d05a3996e43bf3d98b5addb6f
022bd28a6622ce683c6eea120d0d6611e65f64b8
refs/heads/main
2023-04-03T16:24:13.752705
2021-04-12T10:56:27
2021-04-12T10:56:27
357,158,495
0
0
null
null
null
null
UTF-8
Python
false
false
359
py
BOT_NAME = 'bancabcco' SPIDER_MODULES = ['bancabcco.spiders'] NEWSPIDER_MODULE = 'bancabcco.spiders' FEED_EXPORT_ENCODING = 'utf-8' LOG_LEVEL = 'ERROR' DOWNLOAD_DELAY = 0 ROBOTSTXT_OBEY = True ITEM_PIPELINES = { 'bancabcco.pipelines.BancabccoPipeline': 100, } USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'
[ "hr.grudev@gmail.com" ]
hr.grudev@gmail.com
a4a3eb1a39616bff525f509f8aa457b29a995d8e
91015480741ec59dda36712d71e7e6f0704bc516
/model_zoo/official/cv/resnet152/src/dataset.py
020ecb5d2367f09a1b58d18af9a74b80edc0a50e
[ "Apache-2.0", "Libpng", "LGPL-3.0-only", "AGPL-3.0-only", "MPL-1.1", "BSD-3-Clause-Open-MPI", "LicenseRef-scancode-mit-nagy", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-proprietary-license", "LicenseRef-scancode-python-cwi", "LGPL-2.1-only", "OpenSSL", "LicenseRef-scancode-other-copyleft", "IJG", "Zlib", "MPL-1.0", "LicenseRef-scancode-other-permissive", "libtiff", "NTP", "MIT", "LicenseRef-scancode-unknown-license-reference", "Python-2.0", "GPL-1.0-or-later", "LicenseRef-scancode-gary-s-brown", "MPL-2.0", "BSD-3-Clause", "Unlicense", "0BSD", "MPL-2.0-no-copyleft-exception", "LicenseRef-scancode-free-unknown", "GPL-2.0-only", "BSL-1.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause" ]
permissive
Ming-blue/mindspore
b5dfa6af7876b00163ccfa2e18512678026c232b
9ec8bc233c76c9903a2f7be5dfc134992e4bf757
refs/heads/master
2023-06-23T23:35:38.143983
2021-07-14T07:36:40
2021-07-14T07:36:40
286,421,966
1
0
Apache-2.0
2020-08-10T08:41:45
2020-08-10T08:41:45
null
UTF-8
Python
false
false
9,887
py
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ create train or eval dataset. """ import os import mindspore.common.dtype as mstype import mindspore.dataset.engine as de import mindspore.dataset.vision.c_transforms as C import mindspore.dataset.transforms.c_transforms as C2 from mindspore.communication.management import init, get_rank, get_group_size def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False): """ create a train or evaluate cifar10 dataset for resnet50 Args: dataset_path(string): the path of dataset. do_train(bool): whether dataset is used for train or eval. repeat_num(int): the repeat times of dataset. Default: 1 batch_size(int): the batch size of dataset. Default: 32 target(str): the device target. Default: Ascend distribute(bool): data for distribute or not. Default: False Returns: dataset """ if not do_train: dataset_path = os.path.join(dataset_path, 'eval') else: dataset_path = os.path.join(dataset_path, 'train') if target == "Ascend": device_num, rank_id = _get_rank_info() else: if distribute: init() rank_id = get_rank() device_num = get_group_size() else: device_num = 1 if device_num == 1: ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True) else: ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id) # define map operations trans = [] if do_train: trans += [ C.RandomCrop((32, 32), (4, 4, 4, 4)), C.RandomHorizontalFlip(prob=0.5) ] trans += [ C.Resize((224, 224)), C.Rescale(1.0 / 255.0, 0.0), C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]), C.HWC2CHW() ] type_cast_op = C2.TypeCast(mstype.int32) ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8) ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True) # apply dataset repeat operation ds = ds.repeat(repeat_num) return ds def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False): """ create a train or eval imagenet2012 dataset for resnet50 Args: dataset_path(string): the path of dataset. do_train(bool): whether dataset is used for train or eval. repeat_num(int): the repeat times of dataset. Default: 1 batch_size(int): the batch size of dataset. Default: 32 target(str): the device target. Default: Ascend distribute(bool): data for distribute or not. Default: False Returns: dataset """ if not do_train: dataset_path = os.path.join(dataset_path, 'val') else: dataset_path = os.path.join(dataset_path, 'train') if target == "Ascend": device_num, rank_id = _get_rank_info() else: if distribute: init() rank_id = get_rank() device_num = get_group_size() else: device_num = 1 if device_num == 1: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True) else: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id) image_size = 224 mean = [0.485 * 255, 0.456 * 255, 0.406 * 255] std = [0.229 * 255, 0.224 * 255, 0.225 * 255] # define map operations if do_train: trans = [ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)), C.RandomHorizontalFlip(prob=0.5), C.Normalize(mean=mean, std=std), C.HWC2CHW() ] else: trans = [ C.Decode(), C.Resize(256), C.CenterCrop(image_size), C.Normalize(mean=mean, std=std), C.HWC2CHW() ] type_cast_op = C2.TypeCast(mstype.int32) ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8) ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True) # apply dataset repeat operation ds = ds.repeat(repeat_num) return ds def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False): """ create a train or eval imagenet2012 dataset for resnet101 Args: dataset_path(string): the path of dataset. do_train(bool): whether dataset is used for train or eval. repeat_num(int): the repeat times of dataset. Default: 1 batch_size(int): the batch size of dataset. Default: 32 target(str): the device target. Default: Ascend distribute(bool): data for distribute or not. Default: False Returns: dataset """ if not do_train: dataset_path = os.path.join(dataset_path, 'val') else: dataset_path = os.path.join(dataset_path, 'train') if target == "Ascend": device_num, rank_id = _get_rank_info() else: if distribute: init() rank_id = get_rank() device_num = get_group_size() else: device_num = 1 rank_id = 1 if device_num == 1: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True) else: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id) image_size = 224 mean = [0.475 * 255, 0.451 * 255, 0.392 * 255] std = [0.275 * 255, 0.267 * 255, 0.278 * 255] # define map operations if do_train: trans = [ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)), C.RandomHorizontalFlip(rank_id / (rank_id + 1)), C.Normalize(mean=mean, std=std), C.HWC2CHW() ] else: trans = [ C.Decode(), C.Resize(256), C.CenterCrop(image_size), C.Normalize(mean=mean, std=std), C.HWC2CHW() ] type_cast_op = C2.TypeCast(mstype.int32) ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8) ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True) # apply dataset repeat operation ds = ds.repeat(repeat_num) return ds def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False): """ create a train or eval imagenet2012 dataset for se-resnet50 Args: dataset_path(string): the path of dataset. do_train(bool): whether dataset is used for train or eval. repeat_num(int): the repeat times of dataset. Default: 1 batch_size(int): the batch size of dataset. Default: 32 target(str): the device target. Default: Ascend distribute(bool): data for distribute or not. Default: False Returns: dataset """ if target == "Ascend": device_num, rank_id = _get_rank_info() else: if distribute: init() rank_id = get_rank() device_num = get_group_size() else: device_num = 1 if device_num == 1: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True) else: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True, num_shards=device_num, shard_id=rank_id) image_size = 224 mean = [123.68, 116.78, 103.94] std = [1.0, 1.0, 1.0] # define map operations if do_train: trans = [ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)), C.RandomHorizontalFlip(prob=0.5), C.Normalize(mean=mean, std=std), C.HWC2CHW() ] else: trans = [ C.Decode(), C.Resize(292), C.CenterCrop(256), C.Normalize(mean=mean, std=std), C.HWC2CHW() ] type_cast_op = C2.TypeCast(mstype.int32) ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=12) ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True) # apply dataset repeat operation ds = ds.repeat(repeat_num) return ds def _get_rank_info(): """ get rank size and rank id """ rank_size = int(os.environ.get("RANK_SIZE", 1)) if rank_size > 1: rank_size = get_group_size() rank_id = get_rank() else: rank_size = 1 rank_id = 0 return rank_size, rank_id
[ "ruippan@126.com" ]
ruippan@126.com
e7c7e1ac5d231ee5f40f075cb49c007b28690385
c9036301aff9c02162f1a5a0641d16c94367a8f4
/highway_tramultask/train.py
7cd1267c3bb8587d25011109e2f9e0b1777e101a
[ "MIT" ]
permissive
daweidavidwang/temp
cb54bebf45214d80fe8727977b526bab4432df94
c9014595bd9a391a542adbb6ed434ccb7efc254d
refs/heads/master
2023-02-25T03:18:38.802374
2021-01-29T11:32:36
2021-01-29T11:32:36
316,666,342
0
0
null
null
null
null
UTF-8
Python
false
false
2,676
py
import os import torch import gym import numpy as np from tensorboardX import SummaryWriter import highway_env from dqnagent import DQNAgent ENV = gym.make('highway-v0') EPISODES = 200000 STORE_PATH = "./model" tensorboard_saved_dir = os.path.join(STORE_PATH,"tensorboard") writer = SummaryWriter(tensorboard_saved_dir) def write_tenboard(writer,episode,reward,step): writer.add_scalar('episode/reward',np.sum(reward),episode) writer.add_scalar('episode/step',step,episode) def train(): agent = DQNAgent(ENV) agent.set_writer(writer) for episde in range(0,EPISODES): reward = [] step = 0 current_state, current_future_pos, current_past_pos = \ ENV.reset() while True: ENV.render() action = agent.act(current_state, current_past_pos) next_state, next_future_pos, next_past_pos, reward, done, info = \ ENV.step(action) #state, action, reward, next_state, done, info agent.record(current_state, current_future_pos, current_past_pos,\ action,reward,\ next_state, next_future_pos, next_past_pos, \ done,info) step += 1 if done: if episde % 100 ==0: filename = os.path.join(STORE_PATH,str(episde)) agent.save(filename) write_tenboard(writer,episde,reward,step) print("step = ",step," sum reward = ",np.sum(reward)) break current_state, current_future_pos, current_past_pos = \ next_state, next_future_pos, next_past_pos def test(): agent = DQNAgent(ENV) agent.set_writer(writer) agent.eval() agent.load("/home/glp/highway_project/highway_envimage/model/6100") for episde in range(EPISODES): reward = [] step = 0 state = ENV.reset() done = False average_speed = 0 slowernum = 0 step = 1 while True: ENV.render() action = agent.act(state) next_state, reward, done, info = ENV.step(action) #state, action, reward, next_state, done, info state = next_state average_speed = average_speed + (info['speed'] - average_speed) / step if info['action'] == 4: slowernum += 1 step += 1 if done: print("average speed = ",average_speed,' slowernum = ',slowernum,' crash = ',info['crashed']) break if __name__ == "__main__": train() #test()
[ "dawei.wang@inceptio.ai" ]
dawei.wang@inceptio.ai
7a7801229cd87021ea7f5fef9aa98f4639ba4fde
8a2d2d4419180576f9a4ceefbca5cc207d13ed0a
/dataset/pascal2tfrecord.py
87ff642dda430f4b054f69bbb0efaff161d9c141
[]
no_license
Samjith888/EfficientDet-tf-custom_dataset
570eb6ede1f704a4b71573c736d6eed422a79aed
a0129e810b169b485f94860a6a650c28e83241f8
refs/heads/master
2022-05-28T13:38:23.524304
2020-04-29T07:52:34
2020-04-29T07:52:34
257,501,864
2
1
null
null
null
null
UTF-8
Python
false
false
7,924
py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import logging import numpy as np import xml.etree.cElementTree as ET import os import tensorflow.compat.v1 as tf NAME_LABEL_MAP = {'back_ground': 0, 'class_name': 1,} def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def int64_list_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def bytes_list_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) def float_list_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def open_sharded_output_tfrecords(exit_stack, base_path, num_shards): """Opens all TFRecord shards for writing and adds them to an exit stack. Args: exit_stack: A context2.ExitStack used to automatically closed the TFRecords opened in this function. base_path: The base path for all shards num_shards: The number of shards Returns: The list of opened TFRecords. Position k in the list corresponds to shard k. """ tf_record_output_filenames = [ '{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards) for idx in range(num_shards) ] tfrecords = [ exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name)) for file_name in tf_record_output_filenames ] return tfrecords def read_xml_gtbox_and_label(xml_path): tree = ET.parse(xml_path) root = tree.getroot() img_width = None img_height = None box_list = [] for child_of_root in root: if child_of_root.tag == 'size': for child_item in child_of_root: if child_item.tag == 'width': img_width = int(child_item.text) if child_item.tag == 'height': img_height = int(child_item.text) if child_of_root.tag == 'object': label = None for child_item in child_of_root: if child_item.tag == 'name': label = NAME_LABEL_MAP[child_item.text] if child_item.tag == 'polygon': tmp_box = [] for node in child_item: tmp_box.append(float(node.text)) assert label is not None, 'label is none, error' tmp_box.append(label) box_list.append(tmp_box) gtbox_label = np.array(box_list, dtype=np.int32) xmin=(gtbox_label[:,0:-1:2].min(axis=1)/img_width).tolist() xmax=(gtbox_label[:,0:-1:2].max(axis=1)/img_width).tolist() ymin=(gtbox_label[:,1:-1:2].min(axis=1)/img_height).tolist() ymax=(gtbox_label[:,1:-1:2].max(axis=1)/img_height).tolist() category_ids = gtbox_label[:,-1].tolist() return img_height, img_width, xmin,ymin,xmax,ymax,category_ids def load_txt_annotations(txt_annotation_path): with open(txt_annotation_path, 'r') as f: txt = f.readlines() annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0] return annotations def read_txt_gtbox_and_label(annotation): line = annotation.split() image_name = line[0].split('/')[-1] bboxes = np.array([list(map(lambda x: int(float(x)), box.split(','))) for box in line[1:]]) #shape [m,9] bboxes = np.reshape(bboxes, [-1, 9]) x_list = bboxes[:, 0:-2:2] y_list = bboxes[:, 1:-1:2] class_id = (bboxes[:, -1]+1).tolist() print(class_id) y_max = (np.max(y_list, axis=1)/2048).tolist() y_min = (np.min(y_list, axis=1)/2048).tolist() x_max = (np.max(x_list, axis=1)/2448).tolist() x_min = (np.min(x_list, axis=1)/2448).tolist() print(y_max) return image_name,x_min,y_min,x_max,y_max,class_id def create_tf_example(img_height, img_width, box_xmin,box_ymin,box_xmax,box_ymax,category_ids, image_path): img_full_path = image_path with tf.gfile.GFile(img_full_path, 'rb') as fid: encoded_jpg = fid.read() if img_height and img_width: image_height = img_height image_width = img_width else: with tf.Session() as sess: image = tf.image.decode_png(encoded_jpg) shape_tuple=image.eval().shape image_height=shape_tuple[0] image_width =shape_tuple[1] feature_dict = { 'image/height' :int64_feature(image_height), 'image/width' :int64_feature(image_width), 'image/encoded' :bytes_feature(encoded_jpg), 'image/format' :bytes_feature('png'.encode('utf8')),} xmin=box_xmin xmax=box_xmax ymin=box_ymin ymax=box_ymax category_ids=category_ids feature_dict.update({ 'image/object/bbox/xmin' :float_list_feature(xmin), 'image/object/bbox/xmax' :float_list_feature(xmax), 'image/object/bbox/ymin' :float_list_feature(ymin), 'image/object/bbox/ymax' :float_list_feature(ymax), 'image/object/class/label':int64_list_feature(category_ids) }) example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) return example def create_tf_record_from_xml(image_path,xml_path,tf_output_path, tf_record_num_shards,img_format): logging.info('writing to output path: %s', tf_output_path) writers = [tf.python_io.TFRecordWriter(tf_output_path+ '-%05d-of-%05d.tfrecord' % (i, tf_record_num_shards)) for i in range(tf_record_num_shards)] for count, xml in enumerate(glob.glob(xml_path + '/*.xml')): # to avoid path error in different development platform xml = xml.replace('\\', '/') img_name = xml.split('/')[-1].split('.')[0] + img_format img_path = image_path + '/' + img_name if not os.path.exists(img_path): print('{} is not exist!'.format(img_path)) continue img_height,img_width,xmin,ymin,xmax,ymax,category_ids=read_xml_gtbox_and_label(xml) example = create_tf_example(None, None, xmin, ymin, xmax, ymax, category_ids, img_path) #example=create_tf_example(img_height, img_width,xmin,ymin,xmax,ymax,category_ids,img_path) writers[count % tf_record_num_shards].write(example.SerializeToString()) def create_tf_record_from_txt(image_dir_path,txt_path,tf_output_path, tf_record_num_shards): logging.info('writing to output path: %s', tf_output_path) writers = [tf.python_io.TFRecordWriter(tf_output_path+ '-%05d-of-%05d.tfrecord' % (i, tf_record_num_shards)) for i in range(tf_record_num_shards)] annotations=load_txt_annotations(txt_path) for count, annotation in enumerate(annotations): #to avoid path error in different development platform print("****************************") image_name,xmin,ymin,xmax,ymax,category_ids=read_txt_gtbox_and_label(annotation) img_path = image_dir_path + '/' + image_name if not os.path.exists(img_path): print('{} is not exist!'.format(img_path)) continue example = create_tf_example(None, None, xmin, ymin, xmax, ymax, category_ids, img_path) writers[count % tf_record_num_shards].write(example.SerializeToString()) def main(_): #create_tf_record_from_xml(image_path="/home/lwp/efficientdet/dataset/img", #xml_path ="/home/lwp/efficientdet/dataset/xml", #tf_output_path="/home/lwp/efficientdet/dataset/tfrecord/", #tf_record_num_shards=5,img_format=".png") create_tf_record_from_txt(image_dir_path="", txt_path="trainnew.txt", tf_output_path="/tfrecord", tf_record_num_shards=5) if __name__ == '__main__': tf.app.run(main)
[ "samjith@detecttechnologies.com" ]
samjith@detecttechnologies.com
94370865d26031d9d5c812275cd02b83ef10dd0e
138c81b38013bc19eed814458c1440d641d18a9f
/dldt/dldt_object_detection.py
473487df9532c5082b63d33f53fcc9e748046125
[]
no_license
vdevaram/deep_learning_utilities_cpu
b12c4a49759274533ba09880616e67385b92115c
d2a0dd3362443bb25715c7f953ca41ca1504aa7e
refs/heads/master
2021-05-09T06:40:21.265510
2020-12-01T10:55:19
2020-12-01T10:55:19
119,337,125
4
6
null
2020-03-22T12:43:46
2018-01-29T05:40:54
Python
UTF-8
Python
false
false
8,813
py
########################################################################### # This is a sample script file to run Caffe/TF inference benchmarks with # mkldnn library on Intel CPUs using DLDT tool # Please contact vinod.devarampati@intel.com for any clarifications # Instructions to fill the variable are given in comments ############################################################################ # Mandatory : Install latest DLDT from # - https://software.intel.com/en-us/openvino-toolkit/choose-download ############################################################################ import argparse import os MODEL_COMMON_PATH = "/home/vinod/dldt/object_detection/" SAMPLES_COMMON_PATH = "/home/vinod/dldt/samples/intel64/Release/" models = {\ "MASK_RCNN_RES50": [ MODEL_COMMON_PATH + "mask_rcnn_resnet50_atrous_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "mask_rcnn_demo"], "MASK_RCNN_RES101": [ MODEL_COMMON_PATH + "mask_rcnn_resnet101_atrous_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "mask_rcnn_demo"], "FRCNN_RES50": [ MODEL_COMMON_PATH + "faster_rcnn_resnet50_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "FRCNN_RES101": [ MODEL_COMMON_PATH + "faster_rcnn_resnet101_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "RFCN_RES101": [ MODEL_COMMON_PATH + "rfcn_resnet101_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "SSD_MOB_V2": [ MODEL_COMMON_PATH + "ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "I8_FRCNN_RES50": [ MODEL_COMMON_PATH + "faster_rcnn_resnet50_coco_2018_01_28/frozen_inference_graph_i8.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "I8_FRCNN_RES101": [ MODEL_COMMON_PATH + "faster_rcnn_resnet101_coco_2018_01_28/frozen_inference_graph_i8.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "I8_RFCN_RES101": [ MODEL_COMMON_PATH + "rfcn_resnet101_coco_2018_01_28/frozen_inference_graph_i8.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "I8_SSD_MOB_V2": [ MODEL_COMMON_PATH + "ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph_i8.xml", SAMPLES_COMMON_PATH + "object_detection_sample_ssd"], "I8_MASK_RCNN_RES50": [ MODEL_COMMON_PATH + "imask_rcnn_resnet50_atrous_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "mask_rcnn_demo"], "I8_MASK_RCNN_RES101": [ MODEL_COMMON_PATH + "mask_rcnn_resnet101_atrous_coco_2018_01_28/frozen_inference_graph.xml", SAMPLES_COMMON_PATH + "mask_rcnn_demo"], } def print_results(args): """ """ files = os.listdir(args.log_dir) topology = {} for file_name in files: if file_name.endswith(".log"): with open(os.path.join(args.log_dir,file_name)) as fp: data = fp.read() start = data.find("iteration:")+11 if start <= 11: continue end = data.find(" ",start) val = float(data[start:end]) parse = file_name.split(".")[0].split('_') bs = parse[-2] stream = parse[-1] top = parse[0] for name in parse[1:-3]: top = top+'_'+name if topology.get(top) != None: if topology[top].get(bs) != None: if topology[top][bs].get(stream) != None: topology[top][bs][stream].append(val) else: topology[top][bs][stream] = [] topology[top][bs][stream].append(val) else: topology[top][bs] = {} topology[top][bs][stream] = [] topology[top][bs][stream].append(val) else: topology[top] = {} topology[top][bs] = {} topology[top][bs][stream] = [] topology[top][bs][stream].append(val) for top in topology: for bs in topology[top]: for stream in topology[top][bs]: average_latency = sum(topology[top][bs][stream])/float(len(topology[top][bs][stream])) num_streams = float(stream[3:]) fps = int(bs[2:])*1000*num_streams/average_latency print(top,"\t\t",bs[2:],"\t",num_streams,"\t",average_latency,"\t",fps) def create_shell_script(args): """ """ bs = args.batch_size if args.cpu == "8180" or args.cpu == "8280": NUM_CORES = 56 SOCK_CORES = 28 if bs == 1: NUM_STREAMS = [1,2,4,8,14,28,56] elif bs == 14: NUM_STREAMS = [1,2,4,8] elif bs == 16: NUM_STREAMS = [1,2,4,8] elif bs == 28: NUM_STREAMS = [1,2,4] elif bs == 32: NUM_STREAMS = [1,2,4] elif bs == 56: NUM_STREAMS = [1,2,4] elif args.cpu == "6140" or args.cpu == "6240": NUM_CORES = 36 SOCK_CORES = 18 if bs == 1: NUM_STREAMS = [1,2,4,6]#,9,12,18,36] elif bs == 6: NUM_STREAMS = [1,2,4,6] elif bs == 9: NUM_STREAMS = [1,2,4] elif bs == 18: NUM_STREAMS = [1,2,4] elif bs == 36: NUM_STREAMS = [1,2] elif args.cpu == "6154" or args.cpu == "6254": NUM_CORES = 36 SOCK_CORES = 18 if bs == 1: NUM_STREAMS = [1,2,4,6]#,9,12,18,36] elif bs == 6: NUM_STREAMS = [1,2,4,6] elif bs == 9: NUM_STREAMS = [1,2,4] elif bs == 18: NUM_STREAMS = [1,2,4] elif bs == 36: NUM_STREAMS = [1,2] elif args.cpu == "VM6254" or args.cpu == "VM6154": NUM_CORES = 16 SOCK_CORES = 16 if bs == 1: NUM_STREAMS = [1,2,4] elif bs == 8: NUM_STREAMS = [1,2,4] elif bs == 16: NUM_STREAMS = [1,2,4] elif bs == 32: NUM_STREAMS = [1,2] elif args.cpu == "4S6248": NUM_CORES = 80 SOCK_CORES = 20 if bs == 1: NUM_STREAMS = [1,2,4,8] elif bs == 8: NUM_STREAMS = [1,2,4,5] elif bs == 20: NUM_STREAMS = [1,2,4] elif bs == 40: NUM_STREAMS = [1,2,4] elif args.cpu == "6148" or args.cpu == "6248": NUM_CORES = 40 SOCK_CORES = 20 if bs == 1: NUM_STREAMS = [1,2,4,8] elif bs == 8: NUM_STREAMS = [1,2,4,5] elif bs == 20: NUM_STREAMS = [1,2,4] elif bs == 40: NUM_STREAMS = [1,2,4] print("export WKDIR=$HOME/dldt/object_detection") if args.topology == 'all': top_list = [ key for key in models] else: top_list = args.topology.split(',') LOGS_PATH="$WKDIR/logs" print("export DATA_PATH=$WKDIR/coco_test_data") print("export SAMPLES_PATH=$WKDIR/samples") print("export VINO_PATH=$HOME/intel/openvino/deployment_tools") print("source $VINO_PATH/../bin/setupvars.sh") for topology in top_list: sleep_time = "111s" for ns in NUM_STREAMS: cores_per_stream = NUM_CORES//ns for i in range(ns): j = i*cores_per_stream k = (i+1)*cores_per_stream-1 log_file = os.path.join(LOGS_PATH,topology+"_"+str(i)+"_bs"+str(bs)+"_str"+str(ns)+".log &") if (j < SOCK_CORES) and (k < SOCK_CORES): node = 0 elif (j >= SOCK_CORES) and (k < SOCK_CORES*2): node = 1 elif (j >= SOCK_CORES*2) and (k < SOCK_CORES*3): node = 2 elif (j >= SOCK_CORES*3) and (k < SOCK_CORES*4): node = 3 else: node = -1 if node >= 0: opt = "numactl -N "+ str(node)+" -m " + str(node)+ " -C "+ str(j) + "-" + str(k) + " " else: opt = "numactl -l -C "+ str(j) + "-" + str(k) + " " print( opt + models[topology][1] + ' -i $DATA_PATH/'+str(bs)+' -m ' +\ models[topology][0]+' -nthreads '+ str(cores_per_stream) +\ ' -d CPU -ni 100 &>'+log_file) print("echo 'Waiting for "+str(ns)+"-streams to finish'") print("sleep "+sleep_time) print("ps -elf | grep samples | for i in $(awk '{print $4}');do kill -9 $i; done") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--cpu", default="6254", help="SKU name") parser.add_argument("--topology", default="SSD_MOB_V2", help=" topology name") #parser.add_argument("--fw", default="TF", help="caffe/tf") parser.add_argument("--batch_size", type=int, default=1, help="i Batch size") parser.add_argument("--mode", type=str, default="exe", help="exe/log") parser.add_argument("--data_type", type=str, default="f32", help="f32/f16/i8") parser.add_argument("--log_dir", type=str, default="./", help="logs directory") parser.add_argument("--display", type=bool, default=False, help="Display supported topologies") args = parser.parse_args() if args.display: for key in models: print(key,end = ",") else: if args.mode == "exe": create_shell_script(args) else: print_results(args)
[ "noreply@github.com" ]
vdevaram.noreply@github.com
91ea86d8c1433c4f3b991f43622c0b3bbe3136b3
8700ea37c191dd6d7d21a9c8c245ccba59e73124
/EstimateTimeForATransition.py
c4cd39aeb1cc1af1e8ef7947e7c7ca51f8dc6650
[]
no_license
rolaechea/ml-traces-analysis
b62ca205bf13edf7a56e3e3d231618cb11c2a3b6
4d43e61298f6c0eb54b93eae2fedd6a72c5e6151
refs/heads/master
2020-04-05T13:12:03.764118
2019-06-11T18:58:36
2019-06-11T18:58:36
156,891,518
0
0
null
null
null
null
UTF-8
Python
false
false
4,916
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 25 14:40:55 2019 @author: rafaelolaechea """ import argparse import MLConstants from pickleFacade import loadObjectFromPickle from AnalyzerRQ2 import getRegressorToTransitionIdMapping from AutonomooseTraces import generateBitsetForOneConfigurationAutonomoose from ConfigurationUtilities import generateBitsetForOneConfiguration, transformFeatureBitmapsToIncludeSquares def parseArguments(): """ Returns an args object with parsed argument or throws an error and exit. """ parser = argparse.ArgumentParser() parser.add_argument("subjectSystem") parser.add_argument("regressorInputFilename", help="filename of learnt regressor to use") parser.add_argument("configurationId", help="configuration whose transition execution time will be estimated") parser.add_argument("transitionId", help="Transition Id of transition's execution time to estimate") args = parser.parse_args() if args.subjectSystem not in MLConstants.lstSubjectSystems: print ("Subject systems must be one of {0}".format(", ".join(MLConstants.lstSubjectSystems))) exit(0) return args class TransitionEstimator(object): def __init__(self, SubjectSystem, regressorInputFilename=""): self.SubjectSystem = SubjectSystem self.loadEstimatorForTransitions(regressorInputFilename) # if self.SubjectSystem == MLConstants.x264Name # self. def loadEstimatorForTransitions(self, regressorInputFilename=""): """ Load estimator to use as a library. """ self.regressorsArray = loadObjectFromPickle(regressorInputFilename) self.transitionToRegressorMapping = getRegressorToTransitionIdMapping(self.regressorsArray) def estimate(self, configurationId, transitionId): """ Return the estimate for the given configurationId """ if self.SubjectSystem == MLConstants.x264Name: configurationInBitset = generateBitsetForOneConfiguration(configurationId) else: configurationInBitset = generateBitsetForOneConfigurationAutonomoose(configurationId) regressorWrapperForSelectedTransition = self.regressorsArray[self.transitionToRegressorMapping[transitionId]] if regressorWrapperForSelectedTransition.getUseSquareX(): configurationInBitset = transformFeatureBitmapsToIncludeSquares([configurationInBitset])[0] skRegressor = regressorWrapperForSelectedTransition.getRegressor() RawPrediction = skRegressor.predict([configurationInBitset]) PredictedTransitionExecutionTime = regressorWrapperForSelectedTransition.getScaler().inverse_transform(RawPrediction)[0] if not regressorWrapperForSelectedTransition.isLasso(): PredictedTransitionExecutionTime = PredictedTransitionExecutionTime[0] return PredictedTransitionExecutionTime if __name__ == "__main__": """ Prints the prediction for a given subject system, regressor, videofile and transition id. """ args = parseArguments() SubjectSystem = args.subjectSystem regressorInputFilename = args.regressorInputFilename configurationId = int(args.configurationId) transitionId = int(args.transitionId) regressorsArray = loadObjectFromPickle(regressorInputFilename) transitionToRegressorMapping = getRegressorToTransitionIdMapping(regressorsArray) regressorWrapperForSelectedTransition = regressorsArray[transitionToRegressorMapping[transitionId]] transitionToConfArrayTimeTaken = {} if SubjectSystem == MLConstants.x264Name: configurationInBitset = generateBitsetForOneConfiguration(configurationId) else: print ("Not yet implemented for Autonomoose.") exit(0) if regressorWrapperForSelectedTransition.getUseSquareX(): configurationInBitset = transformFeatureBitmapsToIncludeSquares([configurationInBitset])[0] skRegressor = regressorWrapperForSelectedTransition.getRegressor() RawPrediction = skRegressor.predict([configurationInBitset]) PredictedTransitionExecutionTime = regressorWrapperForSelectedTransition.getScaler().inverse_transform(RawPrediction)[0] print ( dir(skRegressor)) print ( skRegressor.coef_) print ( skRegressor.intercept_[0]) print("Scaler : {0}".format(regressorWrapperForSelectedTransition.getScaler().mean_[0])) if not regressorWrapperForSelectedTransition.isLasso(): PredictedTransitionExecutionTime = PredictedTransitionExecutionTime[0] print(PredictedTransitionExecutionTime)
[ "rolaechea@gsd.uwaterloo.ca" ]
rolaechea@gsd.uwaterloo.ca
a5bece4fd2c50b5f90963b093272617330b42b75
6ade743d033a23311a45bffea2d423254a16032b
/dev/track-sale-sms-py/controls_frame.py
005e4f0b54086c3f53610e6a2a56ed862fb5e685
[ "Apache-2.0" ]
permissive
capitalch/trace
ebeb7d86ace4c711cf2be27d914f1a6d0ddaafad
510c947609b06fc179bc8fe5b981a4e3bcb6d610
refs/heads/main
2023-09-01T21:51:45.021429
2023-08-28T07:01:32
2023-08-28T07:01:32
213,639,100
0
0
Apache-2.0
2019-11-17T15:37:44
2019-10-08T12:41:22
TypeScript
UTF-8
Python
false
false
1,475
py
from tkinter import Button, Frame from tkinter.constants import E, RIDGE from tkcalendar import DateEntry from utils import fetch_local_data from ibuki import Ibuki sale_date = '' def fetch_sale_data(): tuple_data = fetch_local_data('track-sale-sms', sale_date) Ibuki.emit('POPULATE-DATA-TABLE-TREE-VIEW', tuple_data) def send_sms(): Ibuki.emit('SEND-SMS', '') def get_controls_frame(root): def change_date(e): global sale_date sale_date = (e.widget.get_date()).isoformat() fetch_sale_data() global sale_date frame = Frame(root, height=50, pady=5, borderwidth=3, relief=RIDGE) btn_refresh = Button(frame, text='Refresh', width=10, fg='blue', font=12, padx=10, command=fetch_sale_data) btn_refresh.grid(row=0, column=1) date_entry = DateEntry(frame, width=12, background='darkblue', foreground='white', borderwidth=2, date_pattern='dd/MM/yyyy') date_entry.grid(row=0, column=0, padx=5) btn_send_sms = Button(frame, text='Send SMS', width=10,bg='yellow', fg='red', font=12, padx=10, command=send_sms) frame.columnconfigure(0, weight=1) frame.columnconfigure(1, weight=1) frame.columnconfigure(2, weight=10) btn_send_sms.grid(row=0, column=2, sticky=E) sale_date = (date_entry.get_date()).isoformat() date_entry.bind('<<DateEntrySelected>>', change_date) return(frame)
[ "capitalch@gmail.com" ]
capitalch@gmail.com
64f7c71a570166d36c95ad273c373ac5bff92033
76aa7635803e3c573dffe1892b5ffaebff792000
/work/bayes.py
3c4b1afa1584f343f387222fa846372e7320463f
[]
no_license
bruceSz/ML-some-implementation
d809b832ed19db335840ee774e895806c444f3dc
9a39ebb01f43f55aa82a7489c7ecada13893c55f
refs/heads/master
2020-06-17T19:48:36.559759
2018-09-16T11:19:15
2018-09-16T11:19:15
74,975,314
0
0
null
null
null
null
UTF-8
Python
false
false
2,473
py
#!/usr/bin/python import numpy as np def loadDataSet(): postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] class_v = [0,1,0,1,0,1] return postingList, class_v def createVList(dataSet): vSet = set([]) for doc in dataSet: vSet = vSet|set(doc) return list(vSet) def setOfWordsVec(vList,InputSet): retVlist = [0]*len(vList) for w in InputSet: if w in vList: retVlist[vList.index(w)] = 1 else: print 'word ', w, "not in dict" return retVlist def bagOfW2Vec(vList, InputSet): retV = [0] * len(vList) for w in InputSet: if w in vList: retV[vList.index(w)] +=1 return retV def trainBN0(trainM, trainC): numDoc = len(trainM) numWord = len(trainM[0]) pAbusive = sum(trainC)/float(numDoc) p0Num = np.ones(numWord) p1Num = np.ones(numWord) p0Denom = 2.0 p1Denom = 2.0 for i in range(numDoc): if trainC[i] == 1: p1Num += trainM[i] p1Denom += sum(trainM[i]) else: p0Num += trainM[i] p0Denom += sum(trainM[i]) p1Vect = np.log(p1Num/p1Denom) p0Vect = np.log(p0Num/p0Denom) return p0Vect,p1Vect,pAbusive def classifyNB(vec2C, p0V, p1V, pClass): p1 = sum(vec2C * p1V) + np.log(pClass) p0 = sum(vec2C * p0V) + np.log(1.0-pClass) if p1 > p0: return 1 else: return 0 def testingNB(): listPosts, listC = loadDataSet() myVList = createVList(listPosts) trainM = [] for post in listPosts: trainM.append(setOfWordsVec(myVList, post)) p0v, p1v, pAb = trainBN0(np.array(trainM),np.array(listC)) testE = ['love','my','dalmation'] thisdoc = np.array(setOfWordsVec(myVList,testE)) print testE, 'classified as : ',classifyNB(thisdoc, p0v,p1v,pAb) testE = ['stupid','garbage'] thisdoc = np.array(setOfWordsVec(myVList,testE)) print testE,"classfied as :", classifyNB(thisdoc, p0v, p1v, pAb ) def main(): testingNB() if __name__ == "__main__": main()
[ "brucesz@zhangsongdeMacBook-Pro.local" ]
brucesz@zhangsongdeMacBook-Pro.local
1aa131979281a93765cfeb1f8add5cec5fd1acd7
17f5dfd1e4f2e6b4f4a497350ca541c341e2ea3c
/models.py
96779476f1870c75f3743ec8664a52156c458f5b
[]
no_license
Nikolai-veb/Blog_Flask
fbbd74f9b5177376174fb9c751030c453bbfe78f
4a0fc7b6b1c55065af7fe3879d838c741066c6ce
refs/heads/master
2022-12-22T03:18:06.789044
2020-10-03T17:53:21
2020-10-03T17:53:21
297,778,202
0
0
null
null
null
null
UTF-8
Python
false
false
1,406
py
from app import db from datetime import datetime import re def slugfy(s): pattern = r'[^\w+]' return re.sub(pattern, '-', s) post_tags = db.Table('post_tags', db.Column('post_id', db.Integer, db.ForeignKey('posts.id')), db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')) ) class Posts(db.Model): id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(150)) slug = db.Column(db.String(150), unique=True) text = db.Column(db.Text) created = db.Column(db.DateTime, default=datetime.now()) tags = db.relationship('Tag', secondary=post_tags, backref=db.backref('posts', lazy='dynamic'),) def __init__(self, *args, **kwargs): super(Posts, self).__init__(*args, **kwargs) self.generate_slug() def generate_slug(self): if self.title: self.slug = slugfy(self.title.lower()) def __repr__(self): return '<Post_id: {} title: {}'.format(self.id, self.title.lower()) class Tag(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100)) slug = db.Column(db.String(100)) def __init__(self, *args, **kwargs): super(Tag, self).__init__(*args, **kwargs) self.slug = slugfy(self.name) def __repr__(self): return '< Tag id: {}, name {}>'.format(self.id, self.name)
[ "nik.vagin1995@mail.ru" ]
nik.vagin1995@mail.ru
29fb88a6bf317c7942af8630380f72c794e35206
d2b81ede3d3a516f5313835ce07dfea97db46c87
/maple/thomas_kacz/Kacz.py
1a9383ff786c003763e244a203dd078fec7d251f
[]
no_license
p15-git-acc/code
8b847ad95cd59a13446f595ac65d96d6bc45b512
59337447523018dfab71cbbbda53b4bb88b7ce59
refs/heads/master
2022-12-16T14:38:09.073676
2020-09-16T09:23:00
2020-09-16T09:23:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,798
py
from scipy import pi from scipy import sin from scipy import exp import numpy as np from scipy.optimize import newton from scipy.optimize import minimize from scipy.optimize import basinhopping file = open('zeros1', 'r') zeros = file.read().split('\n') zeros = [float(string) for string in zeros[:4000]] file.close() print('Approximating by using first {} zeros of zeta.'.format(len(zeros))) def F(z): sum = 0 for zero in zeros: sum += exp(complex(0, zero)*z)/complex(0.5, zero) return sum def f(z): sum = 0 for zero in zeros: sum += complex(0, zero)*exp(complex(0, zero)*z)/complex(0.5, zero) return sum def F_N(z, N): sum = 0 for zero in zeros[:N]: sum += exp(complex(0, zero)*z)/complex(0.5, zero) return sum def root_near(guess, silent=False): """Find a root of F near guess using Newton's method. """ root = None try: root = newton(F, guess, fprime=f) except: if not silent: print("Newton's method failed.") finally: return root def alpha_a_b(coord, N, silent=True): """Calculate alpha, a, b for a rectangle with coordinates coord and truncation at N.""" [x0, x1, y0, y1] = coord a = 0 for zero in zeros[:N]: a += exp(-zero*y0)/abs(complex(0.5, zero)) b = 0 for zero in zeros[N:]: b += exp(-zero*y0)/abs(complex(0.5, zero)) def F_north(x): return abs(F_N(complex(x, y1), N)) def F_south(x): return abs(F_N(complex(x, y0), N)) def F_east(y): return abs(F_N(complex(x1, y), N)) def F_west(y): return abs(F_N(complex(x0, y), N)) def x_bounds(f_new, x_new, f_old, x_old): return x0 <= x_new <= x1 def y_bounds(f_new, x_new, f_old, x_old): return y0 <= x_new <= y1 min_north = basinhopping(F_north, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), accept_test=x_bounds).fun min_south = basinhopping(F_south, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), accept_test=x_bounds).fun min_east = basinhopping(F_east, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), accept_test=y_bounds).fun min_west = basinhopping(F_west, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), accept_test=y_bounds).fun if not silent: tuple = (min_north, min_south, min_east, min_west) print("alpha = min{}.".format(tuple)) alpha = min(min_north, min_south, min_east, min_west) return alpha, a, b def old_box_q(coord, N, root): """Calculate alpha, a, b according to Kaczorowski.""" [alpha, a, b] = alpha_a_b(coord, N) if alpha - 3*b < 0: return None return int(4*pi*a/(alpha - 3*b)) + 1 def box_q(coord, N, root, silent=True, compare=False): """Calculate q according to Morrill, Platt and Trudgian, with the option of comparing to Kaczorowski. """ [x0, x1, y0, y1] = coord if not (x0 <= root.real <= x1 and y0 <= root.imag <= y1) and not silent: string = 'Error: Box does not contain root:' if not x0 < root.real: string += ' x0 too large,' if not root.real < x1: string += ' x1 too small,' if not y0 < root.imag: string += ' y0 too large,' if not root.imag < y1: string += ' y1 too small' string = string[:-1] + '.' print(string) return None [alpha, a, b] = alpha_a_b(coord, N, silent=silent) if not silent: print("alpha = {}.".format(alpha)) print("a = {}.".format(a)) print("b = {}.".format(b)) w = root.imag aw = 0 for zero in zeros[:N]: aw += exp(-zero*w)/abs(complex(0.5, zero)) if not silent: print("a_w = {}.".format(aw)) bw = 0 for zero in zeros[N:]: bw += exp(-zero*w)/abs(complex(0.5, zero)) if not silent: print("b_w = {}.".format(bw)) q = int(pi*a/(alpha - b - 2*bw)) i = 0 if alpha - b - 2*bw <= 0: if not silent: print('Error: Inadmissable box/N.') return None while not 2*aw*sin(pi/q) + 2*pi*a/q <= alpha - b - 2*bw: q += 1 i += 1 if i == 1000: if not silent: print('Error: Timed out.') return None if compare: Kacz_q = old_box_q(coord, N, root) if not Kacz_q: Kacz_q = 'no result' print('As compared to {},'.format(Kacz_q)) return q def opti_box(guess, N, root): """Optimize q^(-N) as a function of the coordinates (x0, x1, y0, y1). """ if not box_q(guess, N, root, silent=False): return None x, y = root.real, root.imag class MyBounds(object): def __init__(self, xymax=[x, np.inf, y, np.inf], xymin=[0, x, 0, y]): self.xymax = np.array(xymax) self.xymin = np.array(xymin) def __call__(self, **kwargs): x = kwargs["x_new"] coord_max = bool(np.all(x <= self.xymax)) coord_min = bool(np.all(x >= self.xymin)) unit_wide = bool(x[1] - x[0] < 1) good_alpha = bool(box_q(x, N, root, silent=True)) return coord_max and coord_min and unit_wide and good_alpha mybounds = MyBounds() class RandomDisplacementBounds(object): """random displacement with bounds""" def __init__(self, x, y, stepsize=0.5): self.x = x self.y = y self.stepsize = stepsize def __call__(self, x): """take a random step but ensure the new position is within the bounds""" while True: xnew = x + np.random.uniform(-self.stepsize, self.stepsize, np.shape(x)) if x[0] < self.x < x[1] and x[2] < self.y < x[3]: break return xnew take_step = RandomDisplacementBounds(x, y) def opti_q(x): coord = x q = box_q(coord, N, root, silent=True) if not q: return 10000 return q result = basinhopping(opti_q, guess, niter=1000, accept_test=mybounds, take_step=take_step) if result.fun == 1: print("That ain't good.") return result def epsilon_box(x_epsilon, y_epsilon, N, root): x, y = root.real, root.imag guess = [x - x_epsilon, x + x_epsilon, y - y_epsilon, y + y_epsilon] return opti_box(guess, N, root) def min_N(coord, N, root, silent=False): """Find a better for a (coord, N, root) that works.""" X = box_q(coord, N, root, silent=True)**(-N) best_N = N for n in range(N+11)[:0:-1]: q = box_q(coord, n, root, silent=True) if q: if not silent: print(q**(-n)) if q**(-n) > X: X = q**(-n) best_N = n return best_N
[ "dave.platt@bris.ac.uk" ]
dave.platt@bris.ac.uk
b190c54adbe0a9fd40c58475eaabbd0a3217a96c
6d9bfaa330fbe3076637b6beaaa62a69b28d0e9c
/manage.py
547113df814a95436179ed34505af810631ef67b
[]
no_license
edubega0707/ventas-backend
71fcf1470d1f0349ce1ab256057790e50b690c0a
3fa6580c738c710509177b0ac8e5924d6a398dce
refs/heads/master
2020-03-28T00:30:40.586243
2018-09-04T21:47:08
2018-09-04T21:47:08
147,423,682
0
0
null
null
null
null
UTF-8
Python
false
false
535
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nfh.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[ "eduardobenitez@MacBook-Pro-de-EDUARDO.local" ]
eduardobenitez@MacBook-Pro-de-EDUARDO.local
f063485253aa0a5721efdf11de85a116fdc1277e
e10bc27de5a63b9fac8b663bf69b501ecb6b3adf
/shop/migrations/0001_initial.py
bd4b0139e40f86222f2069234cba0ad391bd056a
[]
no_license
rahultanwar8814/Retail_India
acac5f60a65655ed3f607084211b5b8d0d08b218
baa6059ab949c08f47c56adb094041c53bbafca8
refs/heads/master
2023-06-03T14:22:52.683850
2021-06-20T12:57:22
2021-06-20T12:57:22
378,247,883
0
0
null
null
null
null
UTF-8
Python
false
false
693
py
# Generated by Django 3.1.7 on 2021-06-04 08:41 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='item', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('price', models.CharField(max_length=50)), ('description', models.CharField(max_length=300)), ('image', models.ImageField(default='', upload_to='shop/images')), ], ), ]
[ "hulkvsironmanmark2@gamil.com" ]
hulkvsironmanmark2@gamil.com
1b1c6f1ffe39f736efb69c58a0d9e489f47a50ae
7d1f481cdb53c1b4edd19a0d77611059e2f7f722
/appengine/antonym/model.py
d530e79863b5d1b16370a1a1266ca385256306c3
[]
no_license
Mohibtech/antonym
9b3f81def92043dbe2d9fee4d38c0e7fd533a475
89781b6b3428f21997a2f9e687d0688ab78ab2ca
refs/heads/master
2020-04-27T02:20:13.920032
2013-01-13T00:36:18
2013-01-13T00:36:18
173,990,863
0
0
null
2019-03-05T17:27:56
2019-03-05T17:27:56
null
UTF-8
Python
false
false
9,792
py
import logging from google.appengine.ext import db, search from google.appengine.api import users from katapult.log import LoggerFactory from katapult.models import entity_has_property, IdGenerator from antonym.core import IllegalArgumentException, NotFoundException def strict_key_lookup(): """ decorator to return single entity for single key-like param, or fail. keywords: return_none - True if method should return none instead of raising NotFoundException (defaults to False) raises: NotFoundException """ def func_wrapper(f): def args_wrapper(*args, **kw): return_none = kw.pop("return_none", False) cls = args[0] key_arg = args[1] entities = f(*args, **kw) if not entities: if return_none: result = None else: raise NotFoundException("%s %s" % (cls.__name__, key_arg)) else: result = entities return result return args_wrapper return func_wrapper def single_result_query(): """ decorator enforces that query returned by wrapped method returns only one result """ def func_wrapper(f): def args_wrapper(*args, **kw): return_none = kw.pop("return_none", False) cls = args[0] q = f(*args, **kw) q_count = q.count() result = None if not q_count: if not return_none: raise NotFoundException("%s query" % cls.__name__) elif q_count > 1: raise DuplicateDataException("%s query" % cls.__name__) else: result = q.fetch(1)[0] return result return args_wrapper return func_wrapper class ArtifactSource(db.Model): name = db.StringProperty(required=True) @classmethod @strict_key_lookup() def get_by_name(cls, name, **kw): return cls.get_by_key_name(name, **kw) @classmethod @strict_key_lookup() def get_multiple_by_name(cls, *names, **kw): return cls.get_by_key_name(names, **kw) @classmethod def get_or_create(cls, name): """ returns: source key """ source = cls.get_by_name(name, return_none=True) return source.key() if source else cls.create(name) @classmethod def create(cls, name): return cls(key_name=name, name=name).put() def __repr__(self): return "%s{name='%s'}" % (self.__class__.__name__, self.name) class UrlResource(db.Model): url = db.StringProperty(required=True) modified = db.DateTimeProperty(required=True, auto_now=True) source_modified = db.DateTimeProperty() etag = db.StringProperty() # TODO: how to create bidirectional references between Feed and UrlResource? feed = db.ReferenceProperty() @classmethod @strict_key_lookup() def get_by_url(cls, url, **kw): return cls.get_by_key_name(url) @classmethod def create(cls, url, **kw): return cls(key_name=url, url=url, **kw).put() @classmethod def find_latest(cls, **kw): return cls.all().order("-modified") @classmethod def search_by_url(cls, url_regex, **kw): for u in cls.all().fetch(1000): if url_regex.search(u.url): yield u class Feed(db.Model): url = db.StringProperty(required=True) artifact_source = db.ReferenceProperty(ArtifactSource, required=True) url_resource = db.ReferenceProperty(UrlResource, required=True) active = db.BooleanProperty(required=True) @classmethod @strict_key_lookup() def get_by_source_name(cls, source_name, **kw): return cls.get_by_key_name(source_name) @classmethod def get_by_url(cls, url, **kw): return cls.all(**kw).filter("url =", url) @classmethod @single_result_query() def get_by_source(cls, source, **kw): return cls.all(**kw).filter("artifact_source =", source) @classmethod def find_active(cls, **kw): return cls.all(**kw).filter("active =", True) @classmethod def create(cls, source_name, **kw): return cls(key_name=source_name, **kw).put() class ArtifactInfo(db.Model): # identifier. will be the same for matching ArtifactContent instance. guid = db.StringProperty(required=True) source = db.ReferenceProperty(ArtifactSource, required=True) source_name = db.StringProperty(required=True) content_type = db.StringProperty(required=True) modified = db.DateTimeProperty(required=True, auto_now_add=True) modified_by = db.UserProperty(required=True) content_md5 = db.StringProperty(required=True) # references the source resource # TODO: why does artifactinfo_set exist, I see no ReferenceProperties implying its existence url = db.StringProperty() url_resource = db.ReferenceProperty(UrlResource, collection_name="artifactinfo_set2") @classmethod def create(cls, **kw): guid = IdGenerator.uuid() return cls(key_name=guid, guid=guid, **kw).put() @classmethod def get_by_guid(cls, guid): return cls.get_by_key_name(guid) @classmethod def find_by_source(cls, source, **kw): return cls.all(**kw).filter("source =", source) @classmethod def find_by_content_md5(cls, md5, **kw): return cls.all(**kw).filter("content_md5 =", md5) @classmethod def find_newer(cls, datetime, **kw): return cls.all(**kw).filter("modified > ", datetime) @classmethod def find_by_source_in_reverse_modified_order(cls, source, **kw): return cls.all(**kw).filter("source =", source).order("-modified") @classmethod def delete_oldest_by_source(cls, source, keep_count, max_delete=500): keys = cls.find_by_source_in_reverse_modified_order(source, keys_only=True).fetch(max_delete, keep_count) key_names = [m.name() for m in keys] db.delete(keys) return key_names class ArtifactContent(search.SearchableModel): # identifier. will be the same for matching ArtifactInfo instance. guid = db.StringProperty(required=True) source = db.ReferenceProperty(ArtifactSource, required=True) source_name = db.StringProperty(required=True) info = db.ReferenceProperty(ArtifactInfo, required=True) body = db.TextProperty(required=True) # TODO: make required modified = db.DateTimeProperty(auto_now_add=True) @classmethod def create(cls, guid, **kw): return cls(key_name=guid, guid=guid, **kw).put() @classmethod def get_by_guid(cls, guid): return cls.get_by_key_name(guid) @classmethod def find_by_source(cls, source, **kw): return cls.all().filter("source =", source) @classmethod def find_by_source_in_reverse_modified_order(cls, source, **kw): return cls.all().filter("source =", source).order("-modified") @classmethod def delete_oldest_by_source(cls, source, keep_count, pre_call=None, max_delete=500): models = cls.find_by_source_in_reverse_modified_order(source).fetch(max_delete, keep_count) key_names = [] for m in models: try: key_name = m.key().name() if pre_call: pre_call(m) m.delete() key_names.append(key_name) except Exception, e: logging.error(e) return key_names def __repr__(self): return "%s(guid=%s, source=%s, body=%s)" % \ (self.__class__.__name__, self.guid, self.source_name, self.body) class TwitterResponse(db.Model): MENTION = 'mention' DIRECT = 'direct' RETWEET = 'retweet' # TODO: are there conflicts between direct message ids and public status ids? # twitter ids message_id = db.StringProperty(required=True) response_id = db.StringProperty(required=True) # twitter user user = db.StringProperty(required=True) # can't be required, since this was added for retweets tweet_type = db.StringProperty(set([MENTION, DIRECT, RETWEET])) timestamp = db.DateTimeProperty(required=True, auto_now_add=True) @classmethod def get_by_message_id(cls, message_id): return cls.get_by_key_name(message_id) @classmethod def create(cls, message_id, **kw): return cls(key_name=message_id, message_id=message_id, **kw).put() @classmethod def find_latest(cls, **kw): return cls.all().order("-timestamp") class Configuration(db.Model): twitter_access_token = db.StringProperty() twitter_oauth_enabled = db.StringProperty() is_tweeting = db.StringProperty() twitter_read_only = db.StringProperty(default="1") @classmethod def _key_name(cls): return cls.__name__ @classmethod def get(cls): return cls.get_by_key_name(cls._key_name()) @classmethod def get_or_create(cls): c = cls.get() if not c: c = cls._new() c.put() return c @classmethod def _new(cls): return cls(key_name=cls._key_name()) @classmethod def update(cls, **kw): entity = cls.get() if not entity: entity = cls._new() defined_props = entity.properties() for k, v in kw.iteritems(): logging.info("%s %s=%s" % (cls._key_name(), k, v)) if not k in defined_props: raise IllegalArgumentException("invalid field: %s" % k) setattr(entity, k, v) return entity.put()
[ "mhawthorne@gmail.com" ]
mhawthorne@gmail.com
2f81c857ad70cb625554c4685c89572ec852b616
e79fcba3028185673337b657d42d104249a936ab
/main_keyword_extraction.py
1a895944ece983c77b3b9dbfe6a4d39340506965
[]
no_license
NusretOzates/TransformerFromTheGround
a7701438105bf8261589d49108f4b89a6774a2bc
a3b6f194ed594742613d95a0c306218cd877db96
refs/heads/master
2023-04-19T13:14:00.092586
2021-05-06T19:26:25
2021-05-06T19:26:25
364,676,410
0
0
null
null
null
null
UTF-8
Python
false
false
11,174
py
### This is a deprecated file, to see nice example look at the main_tr_to_en.py file! import tensorflow as tf import time from tokenizers import BertWordPieceTokenizer from transformers import T5TokenizerFast from learning_scheduler import CustomScheduler from models.transformer import Transformer from utilities import filter_max_length, loss_function, accuracy_function BUFFER_SIZE = 5000 BATCH_SIZE = 32 EPOCH = 5 # Hyperparameters num_layers = 8 d_model = 512 dff = 512 num_heads = 8 dropout_rate = 0.2 config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True tf.compat.v1.InteractiveSession(config=config) print("Loading data") import pandas as pd df = pd.read_json('data/sikayetvar.jsonl', lines=True, encoding='utf8') df['keywords'] = df['keywords'].apply(lambda x: ','.join(x)) df = df.sort_values(by='text', key=lambda x: x.str.len()) ds = tf.data.Dataset.from_tensor_slices((df['text'].to_numpy(), df['keywords'].to_numpy())) cardinality = tf.data.experimental.cardinality(ds).numpy() print('Create the Tokenizer') tokenizer = BertWordPieceTokenizer('turkish-tokenizer-vocab.txt', clean_text=False, lowercase=False) print(tokenizer.encode('merhaba ben nusret').tokens) def encode(tr, en): tr_text = tf.compat.as_text(tr.numpy()).lower() en_text = tf.compat.as_text(en.numpy()).lower() tr = tokenizer.encode(tr_text).ids en = tokenizer.encode(en_text).ids tr = tf.constant(tr, dtype=tf.int32) en = tf.constant(en, dtype=tf.int32) return tr, en def tf_encode(tr, en): result_tr, result_en = tf.py_function(encode, [tr, en], [tf.int32, tf.int32]) result_tr.set_shape([None]) result_en.set_shape([None]) return result_tr, result_en print('Encode Dataset') test_dataset = ds.take(2000) train_dataset = ds.skip(2000) train_dataset = train_dataset.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE) train_dataset = train_dataset.filter(filter_max_length) train_dataset = train_dataset.cache() train_dataset = train_dataset.padded_batch(BATCH_SIZE) train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE) print('encode val dataset') val_dataset = test_dataset.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE) val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE) # i = 0 # for (batch, (inp, tar)) in enumerate(train_dataset): # i+=1 # # print(i) print('Create Scheduler and Optimizer etc.') learning_rate = CustomScheduler(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.Mean(name='train_accuracy') val_loss = tf.keras.metrics.Mean(name='val_loss') val_accuracy = tf.keras.metrics.Mean(name='val_accuracy') vocab_size = tokenizer.get_vocab_size() print('Create Transformer model') transformer = Transformer(num_layers, d_model, num_heads, dff, vocab_size, vocab_size, pe_input=vocab_size, pe_target=vocab_size, rate=dropout_rate) inputs = tf.keras.Input(shape=(1, None), batch_size=BATCH_SIZE, name='inputs') targets = tf.keras.Input(shape=(1, None), batch_size=BATCH_SIZE, name='targets') preds, tar_real = Transformer(num_layers, d_model, num_heads, dff, vocab_size, vocab_size, pe_input=vocab_size, pe_target=vocab_size, rate=dropout_rate)([inputs, targets]) model = tf.keras.Model(inputs=[inputs, targets], outputs=[preds, tar_real]) model.compile(optimizer=optimizer, loss=loss_object, metrics=['accuracy'], run_eagerly=True) model.fit(x=train_dataset, validation_data=val_dataset) # inp = tf.convert_to_tensor([[2, 44, 22, 66, 77, 3]], dtype=tf.int32) # tar_inp = tf.convert_to_tensor([[2]], dtype=tf.int32) # # print(transformer((inp, tar_inp), training=False)) # print(transformer.summary()) # transformer.save('saved_tf', save_format='tf') # print('==='*35) # model = tf.keras.models.load_model('saved_tf') # print(model((inp, tar_inp), training=False)) checkpoint_path = './checkpoints/train' ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) # ckpt = tf.train.Checkpoint(transformer=model, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if ckpt_manager.latest_checkpoint: # ckpt.restore(ckpt_manager.latest_checkpoint) # print('Latest Checkpoint restored!!') """ The @tf.function trace-compiles train_step into a TF graph for faster execution. The function specialized to the precise shape of the argument tensors. To avoid re-tracing due to the variable sequence lengths or variable batch sizes (the last batch is smaller), use input_signature to specify more generic shape """ train_step_signature = [ tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=(None, None), dtype=tf.int32) ] @tf.function(input_signature=train_step_signature) def train_step(inp, tar): with tf.GradientTape() as tape: preds, tar_real = transformer([inp, tar], training=True, mask=None) loss = loss_function(tar_real, preds, loss_object) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) # preds, tar_real, _ = model([inp, tar]) # loss = loss_function(tar_real, preds, loss_object) # gradients = tape.gradient(loss, model.trainable_variables) # optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(accuracy_function(tar_real, preds)) @tf.function(input_signature=train_step_signature) def evaluate(inp, tar): preds, tar_real = transformer([inp, tar], True) loss = loss_function(tar_real, preds, loss_object) val_loss(loss) val_accuracy(accuracy_function(tar_real, preds)) print('Begin Training') for epoch in range(EPOCH): start = time.time() train_loss.reset_states() train_accuracy.reset_states() val_loss.reset_states() val_accuracy.reset_states() for (batch, (inp, tar)) in enumerate(train_dataset): train_step(inp, tar) if batch % 200 == 0: print('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format( epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 1 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) # print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start)) # Add validation code here! for (batch, (inp, tar)) in enumerate(val_dataset): evaluate(inp, tar) print('-' * 35) print('Validation Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1, val_loss.result(), val_accuracy.result())) print('-' * 35) transformer.summary() transformer.save('saved_tf', save_format='tf') # for sentence, prediction in val_dataset.take(5): # sentence_text = tokenizer.decode(sentence.numpy().tolist()[0], skip_special_tokens=False) # end = tf.convert_to_tensor([tokenizer.token_to_id('[SEP]')], dtype=tf.int64) # output = tf.convert_to_tensor([tokenizer.token_to_id('[CLS]')], dtype=tf.int64) # output = tf.expand_dims(output, 0) # encoder_input = sentence # for i in range(20): # enc_padding_mask, combined_mask, dec_padding_mask = create_masks( # encoder_input, output) # # # predictions.shape == (batch_size, seq_len, vocab_size) # predictions, attention_weights = transformer(encoder_input, # output, # False, # enc_padding_mask, # combined_mask, # dec_padding_mask) # # # select the last word from the seq_len dimension # predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size) # # predicted_id = tf.argmax(predictions, axis=-1) # # # concatentate the predicted_id to the output which is given to the decoder # # as its input. # output = tf.concat([output, predicted_id], axis=-1) # # # return the result if the predicted_id is equal to the end token # if predicted_id == end: # break # # tokens = output.numpy().tolist()[0] # text = tokenizer.decode(tokens) # shape: () # # print(output) # print('text: ') # print(sentence_text) # print('prediction:') # print(text) # print('real value') # real = prediction.numpy().tolist()[0] # real = tokenizer.decode(real) # print(real) # # # tokens = tokenizer_keyword.lookup(output.numpy().tolist())[0] # # # # print(tokens) # as the target is english, the first word to the transformer should be the # english start token. # start, end = tf.convert_to_tensor([tokenizer_keyword.vocab_size], dtype=tf.int64), tf.convert_to_tensor( # [tokenizer_keyword.vocab_size + 1], dtype=tf.int64) # output = start # output = tf.expand_dims(output, 0) # # for i in range(20): # enc_padding_mask, combined_mask, dec_padding_mask = create_masks( # encoder_input, output) # # # predictions.shape == (batch_size, seq_len, vocab_size) # predictions, attention_weights = transformer(encoder_input, # output, # False, # enc_padding_mask, # combined_mask, # dec_padding_mask) # # # select the last word from the seq_len dimension # predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size) # # predicted_id = tf.argmax(predictions, axis=-1) # # # concatentate the predicted_id to the output which is given to the decoder # # as its input. # output = tf.concat([output, predicted_id], axis=-1) # # # return the result if the predicted_id is equal to the end token # if predicted_id == end: # break # # # output.shape (1, tokens) # tokens = output.numpy().tolist()[0][1:-1] # text = tokenizer_keyword.decode(tokens) # shape: () # print(output) # print(text) # tokens = tokenizer_keyword.lookup(output.numpy().tolist())[0] # # print(tokens)
[ "nozatespc@gmail.com" ]
nozatespc@gmail.com
757e6d2de18eccf6e3b67a96e24952d1ff951630
384a30df1d8ed9c811c6ceed3db42eb3b5fe83ba
/configs/ocr/cascade.py
aff1403368b70e014a327c5466c89990595aee8d
[]
no_license
clw5180/prcv-ocr-detection
4a04300d1ebc7605b6f17467c5375464e79a5635
7c6187d9cfde3e91de84a25d36f359295e411ce6
refs/heads/master
2021-04-22T18:34:07.879055
2019-11-08T07:50:38
2019-11-08T07:50:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,514
py
# model settings model = dict( type='CascadeRCNN', num_stages=3, pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_scales=[8], anchor_ratios=[0.5, 1.0, 2.0], anchor_strides=[4, 8, 16, 32, 64], target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='SharedFCBBoxHead', num_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=3, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='SharedFCBBoxHead', num_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=3, target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1], reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='SharedFCBBoxHead', num_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=3, target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067], reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ]) # model training and testing settings train_cfg = dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False) ], stage_loss_weights=[1, 0.5, 0.25]) test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05), max_per_img=100), keep_all_stages=False) # dataset settings dataset_type = 'CocoDataset' data_root = '/gruntdata/openImages/ocr/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) data = dict( imgs_per_gpu=1, workers_per_gpu=1, train=dict( type=dataset_type, ann_file=data_root + 'OCR-train-publish-v2/train.json', img_prefix=data_root + 'OCR-train-publish-v2/images_labels/images', img_scale=[(1800, 1400), (1800, 1300), (1800, 1200)], img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0.5, with_mask=False, with_crowd=True, with_label=True), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0, with_mask=False, with_crowd=True, with_label=True), test=dict( type=dataset_type, ann_file=data_root + 'val.json', img_prefix=data_root + 'val_images', img_scale=[(2000, 1800), (2000, 1700), (2000, 1600), (1800, 1500), (1800, 1400)], img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=1, with_mask=False, with_label=False, test_mode=True)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[16, 22]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings total_epochs = 24 dist_params = dict(backend='nccl', port='2019') log_level = 'INFO' work_dir = '/gruntdata/openImages/ocr/models/cascade_rcnn_x101_32x4d_fpn_1x' load_from = '/gruntdata/openImages/pretrained/cascade_rcnn_x101_32x4d_fpn_2x_20181218-28f73c4c.pth' resume_from = None workflow = [('train', 1)]
[ "hope@peterzhangdeMacBook-Pro.local" ]
hope@peterzhangdeMacBook-Pro.local
7f5f88e7d894c4c6be190b9fd4c1111002cfb9fb
3e9b9c0257f7b009aa37dfc75524333ba6edc44e
/src-Webpage/old/main_old.py
6aa222c8dd5d205ca7791a5fd3452b784a322b45
[]
no_license
jaguila/councilmatic
dcf461b077194a1be04310ec2ef575aa95e71c46
5b00aa1f704ed3db866ee5039c87f13efd7dd915
refs/heads/main
2023-03-09T18:33:04.124110
2021-02-24T01:07:31
2021-02-24T01:07:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,874
py
# THIS IS NO LONGER USED IT IS FOR REFERENCE # # This program creates a web page # Create by Howard Matis for OpenOakland - October 19, 2018 # # Takes data scraped from Oakland Legistar web page - https://oakland.legistar.com/Calendar.aspx # import csv from datetime import datetime, timedelta import shutil from create_html import create_html import os def dateLessThanEqual(date1, date2): # Compare whether deadline has passed datetime1 = datetime.strptime(date1, '%m/%d/%Y') datetime2 = datetime.strptime(date2, '%m/%d/%Y') return datetime1 <= datetime2 def read_csvfile(datafile, search_string, f2): data = list(csv.reader(open(datafile, encoding="utf-8"), delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL, skipinitialspace=True)) numrows = len(data) # numcolumns = len(data[0]) citycouncil = search_string https = "https://" for i in range(numrows): if len(data[i][:]) >= 9: meeting = data[i][0] meeting = meeting.replace(" and ", " & ") # must do this because non-standardization of Oakland if citycouncil in meeting: meeting_video = data[i][8] meeting_date = data[i][1] meeting_time = data[i][3] meeting_room = data[i][4] meeting_agenda = data[i][6] meeting_minutes = data[i][7] ecomment = data[i][9] if https in meeting_video: link = meeting_video link_text = "Click for Video Minutes and Agenda" elif https in meeting_minutes: link = meeting_minutes link_text = "Click for Minutes" elif https in meeting_agenda: link = meeting_agenda link_text = "Click for Agenda" else: link = 'none' link_text = 'none' present = datetime.now().strftime('%m/%d/%Y') agenda_deadline = datetime.strptime(meeting_date, '%m/%d/%Y') + timedelta(days=10) agenda_deadline = agenda_deadline.strftime('%m/%d/%Y') if dateLessThanEqual(present, meeting_date): link_text = "Meeting at " + meeting_time + " in the " + meeting_room write_http_row(f2, meeting_date, link, link_text, ecomment) elif dateLessThanEqual(agenda_deadline, present): # 10 days have passed since meeting. Only keep if have video minutes if link_text == "Click for Video Minutes and Agenda": # Need video minutes for this length of time write_http_row(f2, meeting_date, link, link_text, "video") else: # print out meeting details in preliminary time write_http_row(f2, meeting_date, link, link_text, "video") else: print("***Error** on line", i) # Error condition if len(data[i][:]) > 0: print(data[i][0]) print(' ') def write_http_row(f2, date, link, message, emessage): https = "https://" f2.write("\n") lineout = "<tr>" f2.write(lineout + "\n") lineout = "<td><span class=\nstyle1\n>" + date + "</span></td>" f2.write(lineout + "\n") if link == "none": lineout = '<td>' + message # if no link omit it elif emessage == "video": lineout = '<td> <a href="' + link + '" target=\n_top data-toggle="tooltip" title="Open in new page">' \ + message + "</a>" else: lineout = '<td>' + message + " | "'<a href="' + link + '" target=\n_top">' + "Click for agenda</a>" if https in emessage: # to add e-comment lineout = lineout + " | " '<a href="' + emessage + '" target=\n_top">' + "Click to comment electronically</a>" f2.write(lineout + "\n") lineout = '</td>' f2.write(lineout + "\n") lineout = "</tr>" f2.write(lineout + "\n") # # Write out a navigation bar # def make_navbar(type, list, year_list, committee_list, loop_type, loop_index, f2): # write the top of navbar if type ==1: url = "template/template_navbar_top.txt" tooltip = "Select committee of interest" else: url = "template/template_navbar_top2.txt" tooltip = "Select year of interest" create_html(url, f2) # Create template for HTML page # for index, item in enumerate(list): linenav = '<li class="nav-item">' f2.write(linenav + "\n") if loop_type: year_bar = str(year_list[loop_index]) # Looping over the committees so fixed year committee_bar = str(index) else: year_bar = str(year_list[index]) # Looping over the years so fixed committee committee_bar = str(loop_index) urlnavbar = '../' + year_bar + '/committee' + committee_bar + ".html" # looping over years linenav = '<a class="nav-link" target="_self" href="' + urlnavbar + '" data-toggle="tooltip" title="' \ + tooltip + '">' + item + '</a>' # Problem may be here f2.write(linenav + "\n") f2.write(" </li>" + "\n") url = "template/template_navbar_bottom.txt" create_html(url, f2) # Create template for HTML page f2.write(" " + "\n") # # Main program # version = "4.0" print(" ") print("<------------------Running main.py - Version", version, "------------------>") committees = ["City Council", "Rules & Legislation", "Public Works", "Life Enrichment", "Public Safety", "Oakland Redevelopment", "Community & Economic Development", "Finance & Management"] # Figure out which years to use earliestYear = 2013 # The earliest year to process minus 1 maxyears = 10 # Maximum number of years to look at currentYear = datetime.now().year currentMonth = datetime.now().month if currentMonth == 12: # Start processing the next year in December startyear = currentYear + 1 else: startyear = currentYear endyear = max(earliestYear, earliestYear - maxyears) years = [] for i in range(startyear, endyear, -1): # Calculated the years to process years.append(str(i)) for index_year, year in enumerate(years): print() for index, committee in enumerate(committees): print(year, committee) outfile = "../website/" + year + "/committee" + str(index) + ".html" os.makedirs(os.path.dirname(outfile), exist_ok=True) if index == 0: save_outfile = outfile # Save the first committee as default with open(outfile, "w") as f1: # # write style section of the web page url = "template/template_style.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") # # write the top section of the web page url = "template/template_top1.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") # f1.write("<tbody> " + "\n") # Needed when use columns for full webpage # write the sidebar # f1.write("<tr>" + "\n") # Needed when use columns for full webpage # f1.write('<td style="width: 388px;">' + "\n") # Needed when use columns for full webpage # # write the sidebar url = "temp/dynamic_calendar.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") # # write the second top of the web page url = "template/template_top2.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") # loop_committee = True # Loop over committees loop_index = index_year # Fix the year make_navbar(1, committees, years, committees, loop_committee, loop_index, f1) # write the top of the web page url = "template/template_above_table.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") line = '<div align="center"><h3>' + committee + " - " + year + '</h3></div>' f1.write(line) url = "template/template_table_top.txt" create_html(url, f1) # Write bottom of header file f1.write(" " + "\n") scraper_file = "../website/scraped/year" + str(year) + ".csv" read_csvfile(scraper_file, committees[index], f1) # write the bottom of the table url = "template/template_table_bottom.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") # create the lower navigation bar loop_committee = False # Loop over years loop_index = index # Fix the year make_navbar(2, years, years, committees, loop_committee, loop_index, f1) # write the bottom of the web page url = "template/template_bottom.txt" create_html(url, f1) # Create template for HTML page f1.write(" " + "\n") f1.close() # Close the file if index ==0: indexfile = "../website/" + year + "/index.html" shutil.copyfile(outfile, indexfile) if years[index_year] == str(currentYear): # Put the main index.html as current year indexfile = "../website/pc/index.html" shutil.copyfile(save_outfile, indexfile) print("Saving ", year, save_outfile, "as default file") print("<-----------------End of main.py------------------------------->") quit()
[ "hsmatis@gmail.com" ]
hsmatis@gmail.com
bfc341b43e90e607ae4959008f9a5864d4b91510
9c6b4fd7e92918a7e9962ad6156b91b0eba0087e
/gossip/__init__.py
9f920f4b01416c553063c44b4483080553f18fde
[]
no_license
schatt/gossip
2145e26ddf50e35d060eedd2ea1575e9f3ebe45f
2d772125332667b78f1dc001458c764077a2e5b3
refs/heads/master
2021-01-18T07:41:11.752950
2012-11-08T22:18:56
2012-11-08T22:18:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
23
py
""" Gossip module. """
[ "rudnyh@corp.mail.ru" ]
rudnyh@corp.mail.ru
821dcfad9c455b3d5649a9d49a2739ac928f2005
1b1e8e73649ad1eed89556a5d479b0a549354fd5
/opennem/db/migrations/versions/12c8cbba29f0_update_mv_facility_all_view.py
0cb19d2383f03ce37029805efd331870c6553bbc
[ "MIT" ]
permissive
zalihat/opennem
3ea8db7246f350fb0eacf8c6078dbffa4fe9aea2
0f82e4fc3fd2bcfbf56a2741d89e4228d017dcf3
refs/heads/master
2023-02-27T15:37:47.206336
2021-02-08T07:28:57
2021-02-08T07:28:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,995
py
# pylint: disable=no-member """ Update mv_facility_all view Revision ID: 12c8cbba29f0 Revises: 3f6867b6de7e Create Date: 2021-01-19 17:15:59.557141 """ from alembic import op # revision identifiers, used by Alembic. revision = "12c8cbba29f0" down_revision = "3f6867b6de7e" branch_labels = None depends_on = None stmt_drop = "drop materialized view if exists mv_facility_all cascade;" stmt = """ create materialized view mv_facility_all as select fs.trading_interval, f.code, f.fueltech_id, f.network_id, f.network_region, f.interconnector, f.interconnector_region_to, date_trunc('day', fs.trading_interval at time zone 'AEST') as ti_day_aest, date_trunc('month', fs.trading_interval at time zone 'AEST') as ti_month_aest, date_trunc('day', fs.trading_interval at time zone 'AWST') as ti_day_awst, date_trunc('month', fs.trading_interval at time zone 'AWST') as ti_month_awst, max(fs.energy) as energy, case when avg(bs.price) >= 0 and min(fs.energy) >= 0 then coalesce( max(fs.energy) * avg(bs.price), 0.0 ) else 0.0 end as market_value, case when avg(f.emissions_factor_co2) >= 0 and min(fs.energy) >= 0 then coalesce( max(fs.energy) * avg(f.emissions_factor_co2), 0.0 ) else 0.0 end as emissions from mv_facility_energy_hour fs left join facility f on fs.facility_code = f.code left join balancing_summary bs on bs.trading_interval = fs.trading_interval and bs.network_id=f.network_id and bs.network_region = f.network_region where f.fueltech_id is not null group by 1, f.code, f.fueltech_id, f.network_id, f.network_region, f.interconnector, f.interconnector_region_to order by 1 desc; """ def upgrade() -> None: with op.get_context().autocommit_block(): op.execute(stmt_drop) op.execute(stmt) def downgrade() -> None: op.execute(stmt_drop)
[ "git@nikcub.me" ]
git@nikcub.me
157f95fbb9a5d858c93915e607ca89bf51d90eee
5eafdf502a7545ba33adedca2692aed7a20d129b
/books/migrations/0006_auto_20191201_1446.py
3e812a6c3aaf138809e2388a82b89273734d7a40
[]
no_license
tsuno1045/TadokuRecord
9e28555f34a54b0d01f90526278882b0ed8f40ab
8cee1fa3eed39da52120716f2a0fb9aef110b0f7
refs/heads/master
2021-09-27T00:13:42.315646
2020-04-24T17:18:41
2020-04-24T17:18:41
228,169,711
0
0
null
2021-09-22T18:16:23
2019-12-15T10:59:26
Python
UTF-8
Python
false
false
587
py
# Generated by Django 2.2.7 on 2019-12-01 05:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('books', '0005_auto_20191201_1442'), ] operations = [ migrations.AddField( model_name='readingrecord', name='read_page_count', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='readingrecord', name='read_word_count', field=models.IntegerField(blank=True, null=True), ), ]
[ "tsuno1045@gmail.com" ]
tsuno1045@gmail.com
d7f7a0cf28f990fa3f14fef754d51440f5c3f5f2
1db9fb289ac9987cd206d3736d2399ed59bebac6
/6 Типы данных/6.1 Числовые типы данных: int, float/2.py
be9488f6ff24dbb26c3fb8e8b92204e369d1c3d0
[]
no_license
vqpv/stepik-course-58852
780576c291aad481612d547405acb2bdf85b9feb
bcc1afbae4d8f35bc825e6cedb6645a4afa0604c
refs/heads/master
2023-05-13T07:36:49.331649
2021-06-02T20:31:41
2021-06-02T20:31:41
309,791,060
0
0
null
null
null
null
UTF-8
Python
false
false
81
py
s, v1, v2 = float(input()), float(input()), float(input()) print(s / (v1 + v2))
[ "raxer93@mail.ru" ]
raxer93@mail.ru
c43e9f244010e00a1d2ac7ef61e8a358e7114137
72b77f97876983025eb05a5aa1d6f248a1be3074
/nearest_exit_from_entrance_in_maze.py
09ca501f24559d8dce1ac7e51885cbd6b539e663
[ "Apache-2.0" ]
permissive
erjan/coding_exercises
4c6bccb2cdac65ccbc3107a482914275ecd157f7
68dac358a6d4dabd41d47dbd4addb2ec50e0ca11
refs/heads/master
2023-09-02T07:25:30.886175
2023-08-27T06:13:06
2023-08-27T06:13:06
236,281,070
5
0
Apache-2.0
2020-05-05T15:08:49
2020-01-26T07:32:09
Python
UTF-8
Python
false
false
3,839
py
''' You are given an m x n matrix maze (0-indexed) with empty cells (represented as '.') and walls (represented as '+'). You are also given the entrance of the maze, where entrance = [entrancerow, entrancecol] denotes the row and column of the cell you are initially standing at. In one step, you can move one cell up, down, left, or right. You cannot step into a cell with a wall, and you cannot step outside the maze. Your goal is to find the nearest exit from the entrance. An exit is defined as an empty cell that is at the border of the maze. The entrance does not count as an exit. Return the number of steps in the shortest path from the entrance to the nearest exit, or -1 if no such path exists. ''' class Solution: def nearestExit(self, maze: List[List[str]], entr: List[int]) -> int: rows, cols = len(maze), len(maze[0]) deq = deque() deq.append([entr[0], entr[1], -1]) while deq: r, c, dist = deq.popleft() if not (0 <= r < rows and 0 <= c < cols): if dist > 0: return dist continue if maze[r][c] == '+': continue maze[r][c] = '+' for _r, _c in ((0, 1), (0, -1), (1, 0), (-1, 0)): deq.append([r + _r, c + _c, dist + 1]) return -1 --------------------------------------------------------------------------------------------------------- class Solution: def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int: q = collections.deque([(*entrance, 0)]) m, n = len(maze), len(maze[0]) maze[entrance[0]][entrance[1]] == '+' while q: x, y, c = q.popleft() if (x == 0 or x == m-1 or y == 0 or y == n-1) and [x, y] != entrance: return c for i, j in [(x+_x, y+_y) for _x, _y in [(-1, 0), (1, 0), (0, -1), (0, 1)]]: if 0 <= i < m and 0 <= j < n and maze[i][j] == '.': maze[i][j] = '+' q.append((i, j, c + 1)) return -1 ---------------------------------------------------------------------------------------------------------------------------------- def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int: m, n = len(maze), len(maze[0]) q = [entrance] vis = {(entrance[0], entrance[1])} ans = 0 while(q): l = len(q) ans += 1 for _ in range(l): [i, j] = q.pop(0) if((i == 0 or i == m-1 or j == 0 or j == n-1) and [i, j] != entrance): return ans-1 for x, y in [(1, 0), (-1, 0), (0, -1), (0, 1)]: if(0<=i+x<m and 0<=j+y<n and maze[i+x][j+y] == "." and (i+x, j+y) not in vis): vis.add((i+x, j+y)) q.append([i+x, j+y]) return -1 -------------------------------------------------------------------------------------------------------------------- class Solution: def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int: directions = [[1, 0], [-1, 0], [0, 1], [0, -1]] rows = len(maze) cols = len(maze[0]) result = -1 q = collections.deque() q.append((entrance[0], entrance[1], 0)) alreadyVisited = set() alreadyVisited.add((entrance[0], entrance[1])) while q: r, c, step = q.popleft() if (r == 0 or c == 0 or r == rows-1 or c == cols-1) and [r,c] != entrance: return step step += 1 for dr, dc in directions: if (r + dr) in range(rows) and (c + dc) in range(cols) and \ maze[r + dr][c + dc] == '.' and (r + dr, c + dc) not in alreadyVisited: alreadyVisited.add((r + dr, c + dc)) q.append((r + dr, c + dc, step)) return -1
[ "noreply@github.com" ]
erjan.noreply@github.com
16e30da4b6f91ca32176574c0adce3cd21cfc776
b7e5f03fa633a6a2d86fec39c4afee5186bb9b42
/scripts/doug
a99efc64846eac4eab0f3da893704e199c64aabe
[]
no_license
moretea/doug
858a59914d5f13120ab462224a4c0ee4f091146a
1ff4acdba0e6d4167f9a79c6790cc4ba4d2b56b2
refs/heads/master
2021-05-09T04:27:49.033901
2018-01-28T16:12:40
2018-01-28T16:12:40
119,274,543
0
0
null
null
null
null
UTF-8
Python
false
false
240
#!/usr/bin/env python from gui.application import Application from gui import Gtk # Initialize application Application() # Make CTRL+C work in GTK import signal signal.signal(signal.SIGINT, signal.SIG_DFL) # Run GTK main loop Gtk.main()
[ "maarten@moretea.nl" ]
maarten@moretea.nl
71dce96d87c61d035b1a2b7516eb19663198d0ea
5855d7432fecdf58341d8410ed34a1ccf13b516c
/algorithms/flat.py
225b784b060166bc71d6b626eaafed988ce326f6
[]
no_license
zyz10086/leetcode
376fc648b218e869e1213dbbbb12a07751cb35a3
2358c417d4d31739e79e965e1a0093db9e7a6b1b
refs/heads/master
2022-10-01T10:20:44.518761
2020-06-07T12:48:36
2020-06-07T12:48:36
259,667,994
0
0
null
null
null
null
UTF-8
Python
false
false
1,054
py
# 给你们个题玩玩: # 写一个flat函数,拍平一个数组,数组内可能有多重嵌套,例如这种: # [1,"2",[3,"4",[5,undefined,[null,[1,[2,3,[{}]]]]],null]] # 拍平后返回 # [1, "2", 3, "4", 5, undefined, null, 1, 2, 3, {}, null] # 要求不能使用语言提供的原生flat方法,不能使用第三方库,不能使用递归 # [1,2,[3,[4]],5] def flat(datas): if(not datas): return datas res,stack = [],[] tmp,tmp_index = datas,0 tmp_len = len(tmp) while(tmp_index<tmp_len): if(type(tmp[tmp_index])==list): if(tmp_index!=tmp_len-1): stack.append(tmp) stack.append(tmp_index) tmp,tmp_index = tmp[tmp_index],0 tmp_len = len(tmp) continue res.append(tmp[tmp_index]) if(tmp_index == tmp_len-1 and len(stack) ): tmp_index,tmp = stack.pop(),stack.pop() tmp_len = len(tmp) tmp_index += 1 return res print(flat([1,[[2],[3],[1,[2,[3]]]]]))
[ "693076854@qq.com" ]
693076854@qq.com
f19b5bd0d68a0ea60efb0252df8167f299652108
5da6e6cc655421038efc1e4fcb4bd28b36055a11
/Python/cadenas.py
2fd8a725981e869e95f05a88bcea5e7359f2ccd4
[]
no_license
rmlu20xx/MiPrimerRepositorio
2ea8b408f927fbb9fa4e97d07d50b8b9a1d68214
9512e8211fb8d47b1da6c022553e5218184b1ab0
refs/heads/master
2023-07-20T05:47:53.120660
2021-09-07T01:31:28
2021-09-07T01:31:28
403,801,718
0
0
null
null
null
null
UTF-8
Python
false
false
1,305
py
#*************************************************************************************************************************************************** #*************************************************************************************************************************************************** # ANALIZANDO CADENAS #*************************************************************************************************************************************************** cadena ="Esto es una prueba" lista=list(cadena) print('Convierte una cadena en una lista de caracteres con la funcion "list"',lista) #['E', 's', 't', 'o', ' ', 'e', 's', ' ', 'u', 'n', 'a', ' ', 'p', 'r', 'u', 'e', 'b', 'a'] lista=cadena.split() print('Convierte una cadena en una lista de palabras con la funcion "split"',lista) #['Esto', 'es', 'una', 'prueba'] limite='-' lista=limite.join(lista) print('Convierte una cadena en otra cadena y le agrega un delimitador con la funcion "join" ',lista) #Esto-es-una-prueba cadena=cadena.upper() print("Imprime la cadena toda en mayuscula",cadena) # capturar el nombre de usuario de una direccion de internet dato='From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008' espacio=dato.find(' ') arroba_pos = dato.find('@') nombre_usuario=dato[espacio+1:arroba_pos] print(nombre_usuario)
[ "rmlu20xx@gmail.com" ]
rmlu20xx@gmail.com
79c023c03992644798a3cfc41a250db969961b86
5cb29d1ddd97f5836ed407bb82ee340c3c49169f
/py/examples/client_send_request.py
155452b1687a5f30a206409fb40438ca499ba0ed
[ "MIT" ]
permissive
Mee6/loqui
6de1ce7a192303d60dd165199ac9f870e4a83406
34c7d1c016ed646589068536987a04c4cdae0696
refs/heads/master
2020-05-07T09:08:04.082314
2019-04-09T12:40:26
2019-04-09T12:40:26
180,363,712
2
1
MIT
2019-04-09T12:39:11
2019-04-09T12:39:11
null
UTF-8
Python
false
false
125
py
from loqui.client import LoquiClient client = LoquiClient(('localhost', 4001)) print len(client.send_request('hello world'))
[ "me@jh.gg" ]
me@jh.gg
6304c7501303e467156ad1a4ee7a4123d27ec28a
642401fc5ed408de3d9cf44cd66a73ed4937415f
/Scraping/models.py
c4e060b11dafa81f4f6c708fc14bf10d7bfddff2
[]
no_license
cacamacho6/Scraping
632df28848950aef358b3e6966fb3d4e5377e503
0a9403775b84cec1c21677209c1cf1d61e78986c
refs/heads/master
2020-12-11T02:43:06.500216
2020-01-14T06:33:48
2020-01-14T06:33:48
233,770,550
0
0
null
null
null
null
UTF-8
Python
false
false
239
py
from django.conf import settings from django.db import models class Producto(models.Model): titulo = models.CharField() precio = models.CharField() envio = models.CharField() def _str_(self): return self.titulo
[ "carloscamacho09@hotmail.com" ]
carloscamacho09@hotmail.com
0d0aeca39228f1876ddd1e96fff08303e582462d
feb4dbe7f57b617151bf2e85f202ba423955513a
/hdv/redfish/conftest.py
b1d8d5f0ea7324d8b9efd34ff74153d3794d2d9d
[ "Apache-2.0", "CC-BY-4.0" ]
permissive
opnfv/cirv-hdv
6d91a0c19c9f970dd1e991718b0f0ef957fd71f7
2d145d4f1fd231def2c9d52a71267031b938c0ac
refs/heads/master
2023-01-23T12:27:33.545466
2020-11-18T03:21:10
2020-11-18T03:21:10
293,824,124
0
1
null
null
null
null
UTF-8
Python
false
false
745
py
import pytest from hdv_redfish import read_yaml, parse_config def pytest_addoption(parser): parser.addoption( "--cases", action="store", default="./conf/cases.yaml", help="case yaml file" ) parser.addoption( "--config", action="store", default="./conf/pdf2.0.json", help="given global config.yaml file" ) def pytest_generate_tests(metafunc): if "config_list" in metafunc.fixturenames: config_file = metafunc.config.getoption("--config") metafunc.parametrize("config_list", parse_config(config_file), indirect=True, scope='session') if "case" in metafunc.fixturenames: cases_file = metafunc.config.getoption("--cases") metafunc.parametrize("case", read_yaml(cases_file))
[ "shivam828787@gmail.com" ]
shivam828787@gmail.com
675455d54ade58bd3490f43bcfb4faa5e0edaf0b
f829b3c47acd05a48991dfd28431a2628cf7d62c
/blog/views.py
1d94385e891151bf588f57f119b128c762308970
[]
no_license
jamcoy/Django-blog
18d6831969ee36e4ebeca5d1e2f0231ae155a175
40a631f9375a2b711d4142d9533002c1c8d2bcbd
refs/heads/master
2020-07-12T11:51:17.386761
2016-12-02T13:24:08
2016-12-02T13:24:08
73,909,077
0
0
null
null
null
null
UTF-8
Python
false
false
2,331
py
from django.utils import timezone from .models import Post from django.shortcuts import render, get_object_or_404 from .forms import BlogPostForm from django.shortcuts import redirect def post_detail(request, db_id): """ Create a view that return a single Post object based on the post ID and and render it to the 'postdetail.html' template. Or return a 404 error if the post is not found """ post = get_object_or_404(Post, pk=db_id) # pk is primary key post.views += 1 # clock up the number of post views post.save() return render(request, "blog/postdetail.html", {'post': post}) def post_list(request): """ Create a view that will return a list of Posts that were published prior to'now' and render them to the 'blogposts.html' template """ posts = Post.objects.filter(published_date__lte=timezone.now() ).order_by('-published_date') return render(request, "blog/blogposts.html", {'posts': posts}) def top_posts(request): """ Get list of posts ordered by number of views Return top 5. Render to blogposts.html """ posts = Post.objects.filter(published_date__lte=timezone.now() ).order_by('-views')[:5] return render(request, "blog/blogposts.html", {'posts': posts}) def new_post(request): if request.method == "POST": form = BlogPostForm(request.POST, request.FILES) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect(post_detail, post.pk) else: form = BlogPostForm() return render(request, 'blog/blogpostform.html', {'form': form}) def edit_post(request, db_id): post = get_object_or_404(Post, pk=db_id) if request.method == "POST": form = BlogPostForm(request.POST, request.FILES, instance=post) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect(post_detail, post.pk) else: form = BlogPostForm(instance=post) return render(request, 'blog/blogpostform.html', {'form': form})
[ "james@coynemail.net" ]
james@coynemail.net
af5289946025cce438a0d5c1c4207ff8809d349e
8a6db656031a7225c9a27ec830f93fff12b612be
/tests/settings.py
a3a1ce02052c9513de957f3eb438d10e70424527
[ "BSD-3-Clause" ]
permissive
arneb/django-campaign
aa149bb5724bf6e2a1f7024d2402e09d95418951
181d96d1af31f60f426b64ea79fc9acd6608a001
refs/heads/master
2023-07-07T12:55:51.237537
2023-06-30T13:26:34
2023-06-30T13:26:34
8,314,115
57
27
BSD-3-Clause
2023-08-16T09:28:43
2013-02-20T14:00:23
Python
UTF-8
Python
false
false
2,402
py
import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'only-for-the-test-suite' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] SITE_ID = 1 # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'campaign', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/'
[ "mail@arnebrodowski.de" ]
mail@arnebrodowski.de
f579e96041e0b2e5e50434649ec9ccf0fc33072b
397ee3067ccd999bd1dc7d3e2779961d9dfb92f0
/backbone/MNISTMLP_OC.py
7ad88caa644e96d2f7b9aebe3e860f6b1385abc5
[]
no_license
tq-zhang/ILCOC
afd491cf8c5c070335494fb32710ffba0a2aba28
14de25e22f975d88fcb408416a86a26ba8e8d71f
refs/heads/main
2023-08-02T20:37:45.323567
2021-09-21T11:49:00
2021-09-21T11:49:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,023
py
# Copyright 2020-present, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Davide Abati, Simone Calderara. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from backbone import xavier, num_flat_features, He_init class MNISTMLP_OC(nn.Module): """ Network composed of two hidden layers, each containing 100 ReLU activations. Designed for the MNIST dataset. """ def __init__(self, input_size: int, embedding_dim: int, output_size: int) -> None: """ Instantiates the layers of the network. :param input_size: the size of the input data :param output_size: the size of the output """ super(MNISTMLP_OC, self).__init__() self.input_size = input_size self.embedding_dim = embedding_dim self.output_size = output_size self.fc1 = nn.Linear(self.input_size, 128, bias=False) self.fc2 = nn.Linear(128, 256, bias=False) self.fc3 = nn.Linear(256, self.embedding_dim, bias=False) self._features = nn.Sequential( self.fc1, nn.ReLU(), self.fc2, nn.ReLU(), self.fc3, ) self.classifier = nn.Linear(self.embedding_dim, self.output_size, bias=False) self.net = nn.Sequential( self._features, nn.ReLU(), self.classifier) self.reset_parameters() def classify(self, x: torch.Tensor) -> torch.Tensor: """ Returns the non-activated output of the second-last layer. :param x: input tensor (batch_size, input_size) :return: output tensor (100) """ x = x.view(-1, num_flat_features(x)) return self.net(x) def reset_parameters(self) -> None: """ Calls the Xavier parameter initialization function. """ self.net.apply(He_init) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Compute a forward pass. :param x: input tensor (batch_size, input_size) :return: output tensor (output_size) """ x = x.view(-1, num_flat_features(x)) return self._features(x) def get_params(self) -> torch.Tensor: """ Returns all the parameters concatenated in a single tensor. :return: parameters tensor (input_size * 100 + 100 + 100 * 100 + 100 + + 100 * output_size + output_size) """ params = [] for pp in list(self.parameters()): params.append(pp.view(-1)) return torch.cat(params) def set_params(self, new_params: torch.Tensor) -> None: """ Sets the parameters to a given value. :param new_params: concatenated values to be set (input_size * 100 + 100 + 100 * 100 + 100 + 100 * output_size + output_size) """ assert new_params.size() == self.get_params().size() progress = 0 for pp in list(self.parameters()): cand_params = new_params[progress: progress + torch.tensor(pp.size()).prod()].view(pp.size()) progress += torch.tensor(pp.size()).prod() pp.data = cand_params def get_grads(self) -> torch.Tensor: """ Returns all the gradients concatenated in a single tensor. :return: gradients tensor (input_size * 100 + 100 + 100 * 100 + 100 + + 100 * output_size + output_size) """ grads = [] for pp in list(self.parameters()): grads.append(pp.grad.view(-1)) return torch.cat(grads) def get_grads_list(self): """ Returns a list containing the gradients (a tensor for each layer). :return: gradients list """ grads = [] for pp in list(self.parameters()): grads.append(pp.grad.view(-1)) return grads
[ "swjyouxiang@126.com" ]
swjyouxiang@126.com
a89b19068362d568996f1248eb937e7a3f09c0d2
5e358c2f37e8e2b329b412bc153377d3b62e916d
/tests/test_doctest.py
9344c70d5723c0e4646249977ad200bbbc955fb3
[ "MIT" ]
permissive
miketheman/pytest-socket
3f81470c2dd12f9769a98482013130aa4d6c4ee7
bdf012cffdfd6606dfac504fa033a3ca55b374ee
refs/heads/main
2023-08-07T22:30:32.218725
2023-07-31T23:41:04
2023-07-31T23:41:04
93,013,711
233
42
MIT
2023-09-12T00:39:48
2017-06-01T03:43:01
Python
UTF-8
Python
false
false
326
py
def test_function_with_doctest(testdir): testdir.makepyfile( ''' def my_sum(a, b): """Sum two values. >>> my_sum(1, 1) 2 """ return a + b ''' ) result = testdir.runpytest("--doctest-modules") result.assert_outcomes(passed=1)
[ "noreply@github.com" ]
miketheman.noreply@github.com
1af00bc3688f5f95b5a75de652d9bcd3650618be
36885d07b9a5fa3d805547d99457ccbce7211fea
/isprime.py
235c68d316a34c64a49fb1649791e29e3f4dd72e
[]
no_license
kks18/solutions
ee25cc16540cc531bb0f76f525c19ef6be209690
c39026acee2c80bdab7f809b385feb82b4a05dc4
refs/heads/master
2020-04-09T16:41:26.659671
2018-12-05T04:39:43
2018-12-05T04:39:43
160,459,995
0
0
null
null
null
null
UTF-8
Python
false
false
249
py
#function definitions def IsNum(x): if x % 2 == 0: print "%d is not a prime number" %x elif x == 1: print "1 is a prime number indeed" else: print "%s is a prime number" %x x = input('enter a number') IsNum(x)
[ "kks2018y@gmail.com" ]
kks2018y@gmail.com
a0e99c6c50719741c81822c086a9c47b494cfd24
48fff5012b0459f7f7cd3a4374b7c56351461409
/_utils/util_evaluate.py
344ed1985408f3005cd1a2cf7a8581307e20b9ed
[]
no_license
ikesan009/CCSSAV-SpeechEnhancement
5bd16fe9b8086691f03d48c8a53a0d2b066e0508
5a98db38e70ee4802eb52cac5f8ab18d80cf5be7
refs/heads/master
2023-03-13T17:59:56.433503
2021-03-09T07:17:49
2021-03-09T07:17:49
345,915,866
0
0
null
null
null
null
UTF-8
Python
false
false
2,415
py
#coding: utf-8 import math import pickle import numpy as np from pypesq import pypesq from collections import namedtuple import _utils.data_processor Sample = namedtuple('Sample', [ 'speaker_id', 'video_file_path', 'speech_file_path', 'noise_file_path', 'video_samples', 'mixed_spectrograms', 'speech_spectrograms', 'noise_spectrograms', 'mixed_signal', 'video_frame_rate' ]) def preprocess_sample(path_audio, path_video, path_noise, slice_duration_ms=200): video_samples, video_frame_rate = _utils.data_processor.preprocess_video_sample(path_video, slice_duration_ms) mixed_spectrograms, speech_spectrograms, noise_spectrograms, mixed_signal = \ _utils.data_processor.preprocess_audio_pair( path_audio, path_noise, slice_duration_ms, video_samples.shape[0], video_frame_rate) n_slices = min(video_samples.shape[0], mixed_spectrograms.shape[0]) sample = Sample( speaker_id=None, video_file_path=path_video, speech_file_path=path_audio, noise_file_path=path_noise, video_samples=video_samples[:n_slices], mixed_spectrograms=mixed_spectrograms[:n_slices], speech_spectrograms=speech_spectrograms[:n_slices], noise_spectrograms=noise_spectrograms[:n_slices], mixed_signal=mixed_signal, video_frame_rate=video_frame_rate ) return sample def make_normalizer(args, assets, name_model): with open(assets.get_normalization_cache_path(name_model, args.data_dir), 'rb') as normalization_fd: video_normalizer = pickle.load(normalization_fd) return video_normalizer """ def calcurate_snr(signal, noise): s = signal.get_data() n = noise.get_data() snr = 10 * math.log10(np.var(s) / np.var(n)) return snr """ def calcurate_snr(signal, mixed_signal): signal = signal.get_data() mixed_signal = mixed_signal.get_data() n_len = min(signal.size, mixed_signal.size) signal = signal[:n_len] mixed_signal = mixed_signal[:n_len] noise = mixed_signal - signal snr = 10 * math.log10(np.var(signal) / np.var(noise)) return snr def calcurate_pesq(signal, noise): s = signal.get_data() n = noise.get_data() sr = signal.get_sample_rate() n_len = min(s.size, n.size) pesq = pypesq(sr, s[:n_len], n[:n_len], 'wb') return pesq
[ "ikesan009@gmail.com" ]
ikesan009@gmail.com
d3398311ad24bdba3ea54ffed03ff46a20de77fc
2d634953149d5e6c793123349b1d56606a945c41
/bin/freeze_graph
342bf4965165eda9e404d8918a817600f4a511b0
[]
no_license
Bhoomikapanwar/S-P-Index-Predictor
cb705bbf7ad2b8d9713bc5ad68302001ceb8f862
c53418079226a246dac37d9fa21237bd43c9edd5
refs/heads/master
2020-04-03T22:21:02.002222
2018-07-25T16:35:43
2018-07-25T16:35:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
284
#!/home/shivam/gitRepos/stockPricePredictor/bin/python3 # -*- coding: utf-8 -*- import re import sys from tensorflow.python.tools.freeze_graph import run_main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(run_main())
[ "shivam.mcs17.du@gmail.com" ]
shivam.mcs17.du@gmail.com
9d786ce994891cf351b32beb491a051eb0956581
1418e843a6535d934cd3b0d4452de00eaa37b6ad
/02.Part B/19.graph-algos/graph.py
f6895cdee03a899aa034502fcafcadb9183388f0
[]
no_license
filvi/sciprog
afdd9bb629570a381c0f71d3451946708a943f83
1393ca95e8645f608826bd43cd6bfa488229853e
refs/heads/master
2023-02-02T12:30:31.847045
2020-12-16T10:17:27
2020-12-16T10:17:27
310,028,831
0
0
null
null
null
null
UTF-8
Python
false
false
17,160
py
import pprint from queue import Queue from collections import deque DEBUG = True def debug(msg): if DEBUG: print("DEBUG: ", msg.replace('\n', '\n' + (' '*8))) #PrettyPrint(indent=4) pp = pprint.PrettyPrinter(indent=4).pprint pformat = pprint.PrettyPrinter(indent=4).pformat class DiGraph: """ A simple graph data structure, represented as a dictionary of adjacency lists Verteces can be of any type, to keep things simple in this data model they coincide with their labels. Adjacency lists hold the target verteces. Attempts to add duplicate targets will be silently ignored. For shorthand construction, see separate dig() function """ def __init__(self): # The class just holds the dictionary _edges: as keys it has the verteces, and # to each vertex associates a list with the verteces it is linked to. self._edges = {} def add_vertex(self, vertex): """ Adds vertex to the DiGraph. A vertex can be any object. If the vertex already exist, does nothing. """ if vertex not in self._edges: self._edges[vertex] = [] def verteces(self): """ Returns a set of the graph verteces. Verteces can be any object. """ # Note dict keys() return a list, not a set. Bleah. # See http://stackoverflow.com/questions/13886129/why-does-pythons-dict-keys-return-a-list-and-not-a-set return set(self._edges.keys()) def has_vertex(self, vertex): """ Returns true if graph contains given vertex. A vertex can be any object. """ return vertex in self._edges def add_edge(self, vertex1, vertex2): """ Adds an edge to the graph, from vertex1 to vertex2 If verteces don't exist, raises a ValueError If there is already such an edge, exits silently. """ if not vertex1 in self._edges: raise ValueError("Couldn't find source vertex: " + str(vertex1)) if not vertex2 in self._edges: raise ValueError("Couldn't find target vertex: " + str(vertex2)) if not vertex2 in self._edges[vertex1]: self._edges[vertex1].append(vertex2) def __str__(self): """ Returns a string representation like the following: >>> print gr('a',['b','c', 'd'], 'b', ['b'], 'c', ['a']) a: [b,c] b: [b] c: [a] d: [] """ if (len(self._edges) == 0): return "\nDiGraph()" max_len=0 sorted_verteces = sorted(self._edges.keys(), key=str) for source in self._edges: max_len = max(max_len, len(str(source))) strings = ["\n"] for source in sorted_verteces: strings.append(str(source).ljust(max_len)) strings.append(': ') strings.append(str(self._edges[source])) strings.append('\n') return ''.join(strings) def __repr__(self): return self.__str__() def adj(self, vertex): """ Returns the verteces adjacent to vertex. - If vertex is not in edges, raises a ValueError - NOTE: verteces are returned in a NEW list. Modifying the list will have NO effect on the graph! """ if not vertex in self._edges: raise ValueError("Couldn't find a vertex " + str(vertex)) return self._edges[vertex][:] def __eq__(self, other): """ !!! NOTE: although we represent the set with adjanceny lists, for __eq__ graph dig('a', ['b','c']) is considered equals to a graph dig('a', ['c', 'b']) !!! """ if not isinstance(other, DiGraph): return False if self.verteces() != other.verteces(): return False for source in self._edges: if set(self._edges[source]) != set(other._edges[source]): return False return True def is_empty(self): """ A DiGraph for us is empty if it has no verteces and no edges """ return len(self._edges) == 0 def bfs(self, source): """ Example bfs that performs a simple breadth first search in the graph and prints visited nodes. Starts from provided source vertex id. If source is not in the graph, raises a ValueError """ if not source in self.verteces(): raise ValueError("Can't find vertex:" + str(source)) Q = deque() # we start from source Q.append(source) visited = set() visited.add(source) while len(Q)>0: u = Q.popleft() debug("Removed from queue: %s" % u) # Visit node u for v in self._edges[u]: debug(" Found neighbor: %s" % v) # Visit edge (u,v) if v not in visited: debug(" not yet visited, enqueueing ..") visited.add(v) Q.append(v) else: debug(" already visited") debug(" Queue is: %s " % list(Q)) def dfs(self, source): """ Example of a simple recursive depth first search on the graph, Starts from source vertex id and prints steps. Already visited nodes are set in provided boolean list mark - If the graph is empty, raises a ValueError """ if self.is_empty(): raise ValueError("Cannot perform DFS on an empty graph!") S = [] S.append(source) visited = set() debug("Stack is: %s " % S) while not len(S) == 0: u = S.pop() debug("popping from stack: %s" % u) if u not in visited: debug(" not yet visited") # visit node u (pre-order) visited.add(u) for v in self.adj(u): debug(" Scheduling for visit: %s" % v) # visit edge (u,v) S.append(v) debug("Stack is : %s " % S) else: debug(" already visited!") def has_edge(self, source, target): """ Returns True if there is an edge between source vertex and target vertex. Otherwise returns False. If either source, target or both verteces don't exist raises a ValueError """ raise Exception('TODO IMPLEMENT ME !') def remove_vertex(self, vertex): """ Removes the provided vertex and returns it If the vertex is not found, raises a LookupError """ raise Exception('TODO IMPLEMENT ME !') def transpose(self): """ Reverses the direction of all the edges NOTE: this method changes in-place the graph: does **not** create a new instance and does *not* return anything !! NOTE: To implement it *avoid* modifying the existing _edges dictionary (would probably more problems than anything else). Instead, create a new dictionary, fill it with the required verteces and edges ad then set _edges to point to the new dictionary. """ raise Exception('TODO IMPLEMENT ME !') def has_self_loops(self): """ Returns True if the graph has any self loop (a.k.a. cap), False otherwise """ raise Exception('TODO IMPLEMENT ME !') def remove_self_loops(self): """ Removes all of the self-loops edges (a.k.a. caps) NOTE: Removes just the edges, not the verteces! """ raise Exception('TODO IMPLEMENT ME !') def undir(self): """ Return a *NEW* undirected version of this graph, that is, if an edge a->b exists in this graph, the returned graph must also have both edges a->b and b->a *DO NOT* modify the current graph, just return an entirely new one. """ raise Exception('TODO IMPLEMENT ME !') def distances(self, source): """ Returns a dictionary where the keys are verteces, and each vertex v is associated to the *minimal* distance in number of edges required to go from the source vertex to vertex v. If node is unreachable, the distance will be -1 Source has distance zero from itself Verteces immediately connected to source have distance one. - if source is not a vertex, raises a LookupError - MUST execute in O(|V| + |E|) - HINT: implement this using bfs search. """ raise Exception('TODO IMPLEMENT ME !') def equidistances(self, va, vb): """ RETURN a dictionary holding the nodes which are equidistant from input verteces va and vb. The dictionary values will be the distances of the nodes. - if va or vb are not present in the graph, raises LookupError - MUST execute in O(|V| + |E|) - HINT: To implement this, you can use the previously defined distances() method """ raise Exception('TODO IMPLEMENT ME !') def cp(self, source): """ Performs a BFS search starting from provided node label source and RETURN a dictionary of nodes representing the visit tree in the child-to-parent format, that is, each key is a node label and as value has the node label from which it was discovered for the first time So if node "n2" was discovered for the first time while inspecting the neighbors of "n1", then in the output dictionary there will be the pair "n2":"n1". The source node will have None as parent, so if source is "n1" in the output dictionary there will be the pair "n1": None - if source is not found, raise LookupError - MUST execute in O(|V| + |E|) - NOTE: This method must *NOT* distinguish between exits and normal nodes, in the tests we label them n1, e1 etc just because we will reuse in next exercise - NOTE: You are allowed to put debug prints, but the only thing that matters for the evaluation and tests to pass is the returned dictionary """ raise Exception('TODO IMPLEMENT ME !') def cc(self): """ Finds the connected components of the graph, returning a dict object which associates to the verteces the corresponding connected component number id, where 1 <= id <= |V| IMPORTANT: ASSUMES THE GRAPH IS UNDIRECTED ! ON DIRECTED GRAPHS, THE RESULT IS UNPREDICTABLE ! To develop this function, implement also ccdfs function inside this method. """ raise Exception('TODO IMPLEMENT ME !') def has_cycle(self): """ Return True if this directed graph has a cycle, return False otherwise. - To develop this function, implement also has_cycle_rec(u) inside this method - Inside has_cycle_rec, to reference variables of has_cycle you need to declare them as nonlocal like nonlocal clock, dt, ft - MUST be able to also detect self-loops """ raise Exception('TODO IMPLEMENT ME !') def top_sort(self): """ RETURN a topological sort of the graph. To implement this code, feel free to adapt Montresor algorithm - implement Stack S as a list - implement visited as a set - NOTE: differently from Montresor code, for tests to pass you will need to return a reversed list. Why ? """ raise Exception('TODO IMPLEMENT ME !') def full_graph(verteces): """ Returns a DiGraph which is a full graph with provided verteces list. In a full graph all verteces link to all other verteces (including themselves!). """ raise Exception('TODO IMPLEMENT ME !') def dag(verteces): """ Returns a DiGraph which is DAG (Directed Acyclic Graph) made out of provided verteces list Provided list is intended to be in topological order. NOTE: a DAG is ACYCLIC, so caps (self-loops) are not allowed !! """ raise Exception('TODO IMPLEMENT ME !') def list_graph(n): """ Return a graph of n verteces displaced like a monodirectional list: 1 -> 2 -> 3 -> ... -> n Each vertex is a number i, 1 <= i <= n and has only one edge connecting it to the following one in the sequence If n = 0, return the empty graph. if n < 0, raises a ValueError. """ raise Exception('TODO IMPLEMENT ME !') def star_graph(n): """ Returns graph which is a star with n nodes First node is the center of the star and it is labeled with 1. This node is linked to all the others. For example, for n=4 you would have a graph like this: 3 ^ | 2 <- 1 -> 4 If n = 0, the empty graph is returned If n < 0, raises a ValueError """ raise Exception('TODO IMPLEMENT ME !') def odd_line(n): """ Returns a DiGraph with n verteces, displaced like a line of odd numbers Each vertex is an odd number i, for 1 <= i < 2n. For example, for n=4 verteces are displaced like this: 1 -> 3 -> 5 -> 7 For n = 0, return the empty graph """ raise Exception('TODO IMPLEMENT ME !') def even_line(n): """ Returns a DiGraph with n verteces, displaced like a line of even numbers Each vertex is an even number i, for 2 <= i <= 2n. For example, for n=4 verteces are displaced like this: 2 <- 4 <- 6 <- 8 For n = 0, return the empty graph """ raise Exception('TODO IMPLEMENT ME !') def quads(n): """ Returns a DiGraph with 2n verteces, displaced like a strip of quads. Each vertex is a number i, 1 <= i <= 2n. For example, for n = 4, verteces are displaced like this: 1 -> 3 -> 5 -> 7 ^ | ^ | | ; | ; 2 <- 4 <- 6 <- 8 where ^ | | represents an upward arrow, while ; represents a downward arrow """ raise Exception('TODO IMPLEMENT ME !') def pie(n): """ Returns a DiGraph with n+1 verteces, displaced like a polygon with a perimeter of n verteces progressively numbered from 1 to n. A central vertex numbered zero has outgoing edges to all other verteces. For n = 0, return the empty graph. For n = 1, return vertex zero connected to node 1, and node 1 has a self-loop. """ raise Exception('TODO IMPLEMENT ME !') def flux(depth): """ Returns a DiGraph with 1 + (d * 3) verteces displaced like a Y: - from a central node numbered 0, three branches depart - all edges are directed outward - on each branch there are 'depth' verteces. - if depth < 0, raises a ValueError For example, for depth=2 we get the following graph (suppose arrows point outward): 4 5 \ / 1 2 \ / 0 | 3 | 6 """ raise Exception('TODO IMPLEMENT ME !') def exits(cp): """ INPUT: a dictionary of nodes representing a visit tree in the child-to-parent format, that is, each key is a node label and as value has its parent as a node label. The root has associated None as parent. OUTPUT: a dictionary mapping node labels of exits to a list of node labels representing the the shortest path from the root to the exit (root and exit included) - MUST execute in O(|V| + |E|) """ raise Exception('TODO IMPLEMENT ME !')
[ "fil.vicari@gmail.com" ]
fil.vicari@gmail.com
6e5fbc9f665ac5fad6c6debd465470d6056c7bef
6cf01d84ec49050ffa0a5f4f10e79c89e3970818
/gufm1/__init__.py
550cd7a479d6531766edf144c76417d9a897bd2a
[ "MIT" ]
permissive
nknezek/gufm1
1a4e72df6f826dadae6b46b96239165de5556823
a0ca60aa9b1932bfc63713ce8d95d0443746aa3c
refs/heads/master
2022-02-18T20:00:48.718995
2016-03-14T22:31:57
2016-03-14T22:31:57
52,418,205
0
1
MIT
2022-02-07T02:00:44
2016-02-24T05:56:12
Python
UTF-8
Python
false
false
20
py
from .gufm1 import *
[ "nknezek@mcc-411-6.geo.berkeley.edu" ]
nknezek@mcc-411-6.geo.berkeley.edu