blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9581235950edd78c5dcad76402bc11c604e1611f
|
447db789b432b2f665f0ab1a0ee75abc02151b1a
|
/01_python_fundamentals/01_03_yeehaw.py
|
ea8eabaffaae8ffc0c726039fbfd31807f2ae612
|
[] |
no_license
|
jesusdmartinez/python-labs
|
cfcf69964409a3284208d8fb52c637ffd6bda535
|
4d314985480b8db482d49e445a671c535fbcb88c
|
refs/heads/master
| 2020-07-09T23:38:40.935626
| 2019-09-20T00:18:42
| 2019-09-20T00:18:42
| 204,110,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
'''
Write the necessary code to display the follow message to the console
I'm a programmer now.
Yeehaw!
Coding here I come!
'''
print("I'm a programmer now.")
print("Yeehaw!")
print("Coding here I come")
|
[
"jesusd.martinez@Jesuss-MBP.home"
] |
jesusd.martinez@Jesuss-MBP.home
|
cf894f9e2b9e9e3cfa72915ce476c80f7b86f700
|
aa6260a66bdedee64882663f831de869eb5ef193
|
/Repository/Points.py
|
1c1a09bfd7540675eb6480251fa50c247b675ea2
|
[] |
no_license
|
BrunoICardoso/GEO4ALL
|
dc1076ea0241440b52120483a2d505a6cae99099
|
889cd305ca97f64b75a482ccfcea82c856a4dc33
|
refs/heads/master
| 2022-11-10T03:12:28.334437
| 2019-07-07T19:05:57
| 2019-07-07T19:05:57
| 195,507,889
| 0
| 2
| null | 2022-10-23T20:28:09
| 2019-07-06T06:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
from DAO.DataBase import DataBase
from datetime import datetime
# Esse Repositorio da tabela Points tem a função de ter todo qualquer comando que tem como função direta da tabela
class Points(DataBase):
def __init__(self):
DataBase.__init__(self)
def CheckExistingPoint(self, latitude, longitude) -> None or int:
# Essa função faz a verificação se o registgro exsite no banco dados se existe ele retorna o ID
IDPoints = self.Query("SELECT IDPoints FROM Points WHERE Latitude = ? AND Logintude = ?", (latitude, longitude))
if not IDPoints:
return None
return IDPoints[0]
def Save(self, Latitude, Logintude, Distance, Bearing, IDFile, id=False) -> int:
# id => Tem a resposabilidade de retornar o ID quando é TRUE ou apenas salva o registro
if id:
IDPoints = self.Insert(
"INSERT INTO Points (Latitude,Logintude,Distance,Bearing,IDFile) OUTPUT inserted.IDPoints VALUES(?,?,?,?,?)",
(Latitude, Logintude, Distance, Bearing, IDFile), getID=True)
return IDPoints
else:
self.Insert("INSERT INTO Points (Latitude,Logintude,Distance,Bearing,IDFile)VALUES(?,?,?,?,?)",
(Latitude, Logintude, Distance, Bearing, IDFile))
|
[
"bruno.inacio88@gmail.com"
] |
bruno.inacio88@gmail.com
|
0a74bf17d9386a48de39f608db074336cda8a1b9
|
5354ce5fd5836047f019f614d2490085d44b06f0
|
/Project2/reinforcement/analysis.py
|
638e3e909065a2fdc7cca98cc6c9ae95177955cf
|
[] |
no_license
|
jshartar/cs3600
|
8f8be7bb7c2034e67f61d6b98ff64037ca933758
|
45bf4b30bd3232e6bc63ec9bdac527b360aab81b
|
refs/heads/master
| 2020-03-28T21:47:45.026848
| 2018-09-17T20:20:26
| 2018-09-17T20:20:26
| 149,182,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
# analysis.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
#change from 0.2 to 0.0
answerNoise = 0.0
return answerDiscount, answerNoise
def question3a():
answerDiscount = 0.3
answerNoise = 0.0
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = 0.3
answerNoise = 0.1
answerLivingReward = 0.1
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = 0.9
answerNoise = 0.1
answerLivingReward = -0.3
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.9
answerNoise = 0.3
answerLivingReward = 0.2
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = 0.5
answerNoise = 0.5
answerLivingReward = 5
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
answerEpsilon = None
answerLearningRate = None
#return answerEpsilon, answerLearningRate
# If not possible, return 'NOT POSSIBLE'
return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
|
[
"jordan.shartar@gmail.com"
] |
jordan.shartar@gmail.com
|
5deac943862d22111f394e515c2f8ae54d238c37
|
2657d7cdc745e38cfddec1c04fce9e367937b393
|
/pricing_1a.py
|
f4c5b78e0df97d7235b4a7cf099f8f656141a5a6
|
[] |
no_license
|
amasl2048/pricing
|
ab0f1d4c49dec3ed3ad646b08e1bbc9ea2bc5545
|
ff5ede99f98c49f18e87e23cd65115b3f7b665c4
|
refs/heads/master
| 2021-01-17T09:16:20.649084
| 2017-08-11T14:22:17
| 2017-08-11T14:22:17
| 28,592,031
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Выборка по заданному списку с part.num. соответствующих строк из общего файла с ценами
1 аргумент - текстовой файл с нужными именами (part.num.) в одну колонку
2 аргумент - исходный csv файл c ценами msrp ref trans
3 аргумент - имя выходного csv файла
2014 Dec
'''
import csv
import sys
from numpy import loadtxt, size
desccol = 2 # колонка с описаниями
catalog = 3
msrpcol = 4 # колонка с ценой msrp
grpcol = 7 # колонка ценовой группой
a = loadtxt(sys.argv[1], dtype=str)
n = size(a)
ifile = open(sys.argv[2], 'rb')
reader = csv.reader(ifile, delimiter=';', quotechar='"')
ofile = open(sys.argv[3], 'wb')
writer = csv.writer(ofile, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
rownum = 0
for row in reader:
# Save header row.
if rownum == 0:
header = row
writer.writerow(header)
else:
colnum = 0
findit = False
for col in row:
if (colnum == 0): # ищем в первой колонке нужный part.num.
if (n == 1):
partnum = a.item()
if (col == partnum.strip()):
writer.writerow(row)
else:
for partnum in a:
if (col == partnum.strip()):
writer.writerow(row)
#findit = True
colnum += 1
rownum += 1
ifile.close()
ofile.close()
|
[
"amasl2048@gmail.com"
] |
amasl2048@gmail.com
|
c1b5a0723dfe2275003edde27ad695f06d4f896f
|
61a43dbd5ee615bf5e5f8f52e739efdfe5b57443
|
/open_cv/cv10.py
|
8c39449a4eba0296754c7f38f0025a019ee21aae
|
[] |
no_license
|
tianqibucuohao/MyPython
|
6edef8fa2635862914af26f96b3fb630a0198403
|
12b9689882dc197128d1a27b68c9a2840a57d0fc
|
refs/heads/master
| 2020-04-07T15:04:09.838646
| 2020-01-09T09:03:48
| 2020-01-09T09:03:48
| 158,471,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
[
"lichzhenglz@gmail.com"
] |
lichzhenglz@gmail.com
|
71129ddfbf63fd4d1bcd4996ef697cfe339d0644
|
81ebceaea5d26dd1c20a596430d274eb31c7234c
|
/hw3_code/scripts/concat_feats.py
|
80addc0a01cb4c43096b6834530c63e41629779d
|
[] |
no_license
|
richcode6/Multimedia-Event-Detection
|
d22f3aca9b7e770e91315978cb8b5f3baee218c2
|
c9f9b9c2117785e13d7b482ad460d78822ecf275
|
refs/heads/master
| 2020-04-21T19:32:59.644781
| 2019-03-21T04:51:34
| 2019-03-21T04:51:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
#!/bin/python
import numpy as np
import os
import pickle
from sklearn.cluster.k_means_ import KMeans
import sys
if __name__ == '__main__':
print(sys.argv)
if len(sys.argv) < 2:
print("Usage: {0} feat_combination_type".format(sys.argv[0]))
print("feat_dict_n -- dictionary of video id to feature vector stored as pickle file")
print("output_feat_dict -- name of pickle file to store concatenated features (feat_1; feat_2;...;feat_n)")
print("Note - Minimum 2 feature dictionaries have to be provided !!!")
exit(1)
feat_combo = sys.argv[1]
output_file = "features/{}.pkl".format(feat_combo)
input_files = list()
for feat in feat_combo.split("."):
input_files.append("features/{}.pkl".format(feat))
feats = len(input_files)
M = [pickle.load(open(input_files[i], 'rb')) for i in range(len(input_files))]
dim = [len(M[i][list(M[i])[i]]) for i in range(feats)]
total = sum(dim)
print(total)
keys = set().union(*M)
X = {}
for key in keys:
a = [np.zeros(dim[i]) for i in range(feats)]
for j in range(feats):
if key in M[j]:
a[j] = M[j][key]
X[key] = np.concatenate(a)
with open(output_file, 'wb') as w:
pickle.dump(X, w)
print("Features concatenated successfully! -> {}".format(output_file))
|
[
"rrnigam@compute-0-30.local"
] |
rrnigam@compute-0-30.local
|
49f674bbf8deb4d9b188a03d297bbea60bbade52
|
06159808f6e2976aef5a48cffa998d84ad0a7653
|
/Algorithmic Toolbox/week3_greedy_algorithms/1_money_change/change.py
|
63faa89d2d4eab2c731b1ec447c7fbc4cc6ca34d
|
[] |
no_license
|
sreeragrnandan/Data-Structures-and-Algorithms-Specialisation
|
60f5c7845ec738bfc116947c81c4476211176dd6
|
f47188942a1a12994730a09af1b094106dcc4b95
|
refs/heads/master
| 2023-01-30T00:51:44.282135
| 2020-12-14T11:14:06
| 2020-12-14T11:14:06
| 288,779,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# Uses python3
import sys
def get_change(m):
#write your code here
coin = [4, 3, 1]
i = 0
n = 0
while m != 0:
if coin[i] <= m:
m -= coin[i]
n += 1
else:
i += 1
return n
# if __name__ == '__main__':
# m = int(sys.stdin.read())
# print(get_change(m))
m = int(input())
print(get_change(m))
|
[
"sreeragraghunandan@gmail.com"
] |
sreeragraghunandan@gmail.com
|
f8cbc044b1234674be65d031fd31641997b778f9
|
78dd211bf471c5b472ad3bf6b753c0b0d9210551
|
/hardening/tests/test_sudo.py
|
aaefef69af66bfbf88f21380f99440b4fd980f45
|
[] |
no_license
|
Junowa/hardening-roles
|
be227685fd302bc9e583db3707009ecbcf1d47b2
|
14f546c50ecfbf23312136c8bbe58e58d4d9504d
|
refs/heads/master
| 2020-03-19T00:37:27.140874
| 2018-05-30T20:28:55
| 2018-05-30T20:28:55
| 135,493,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
import time
""" hardening sudo module """
def test_log_sudo_actions (host):
""" run the privileged command \'sudo hostname'\ and test if a log exists with \'sudo journalctl -f\' """
with host.sudo():
action = host.run("hostname")
time.sleep(3)
action_log = host.run("journalctl --since \"1 minute ago\" -t sudo | grep /bin/hostname")
assert action_log.stdout
def test_log_auth_failure (host):
""" run the privileged command \'sudo hostname'\ with a wrong password and test if a failure log exists with \'sudo journalctl -f\' """
auth = host.run("echo \"wrong_password\" | sudo -S hostname")
with host.sudo():
time.sleep(3)
authfailure_log = host.run("journalctl --since \"10 seconds ago\" | grep \"incorrect password attempt\"")
assert authfailure_log.stdout
|
[
"julien.nowalczyk@thalesgroup.com"
] |
julien.nowalczyk@thalesgroup.com
|
4d15eb1750b1d7b7d4d335a60c966aa2110ea6b7
|
89f19fbb1f59c14b3f48f779dd80fe4d223fde4a
|
/tools/caffe/python/caffe/proto/caffe_pb2.py
|
8eb4838ed85977bd632bd8709f5297d28569c618
|
[
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"MIT"
] |
permissive
|
lixiaoxing0426/PixelNet_vessel
|
7ff72e387ec9bf11d0d7fed69448128c5d053984
|
992c6365a4121e75310c4ca3cc1b8cb73c5fc6ef
|
refs/heads/master
| 2022-04-10T00:49:21.080399
| 2020-03-14T06:17:28
| 2020-03-14T06:17:28
| 246,781,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 277,745
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: caffe.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='caffe.proto',
package='caffe',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\x9c\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x11\n\trms_decay\x18& \x01(\x02\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xd3\x15\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\nnorm_param\x18\x95\x01 \x01(\x0b\x32\x19.caffe.NormalizeParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x39\n\x13rand_cat_conv_param\x18\x93\x01 \x01(\x0b\x32\x1b.caffe.RandCatConvParameter\x12\x30\n\x0erand_cat_param\x18\x94\x01 \x01(\x0b\x32\x17.caffe.RandCatParameter\x12\x32\n\x0frand_comp_param\x18\x96\x01 \x01(\x0b\x32\x18.caffe.RandCompParameter\x12\x33\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x19.caffe.RecurrentParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"\xab\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12,\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x17\n\tfix_scale\x18\x04 \x01(\x08:\x04true\x12\x12\n\x03\x65ps\x18\x05 \x01(\x02:\x05\x31\x65-10\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"J\n\x10RandCatParameter\x12\x1c\n\x0erand_selection\x18\x01 \x01(\x08:\x04true\x12\x18\n\nnum_output\x18\x02 \x01(\r:\x04\x31\x30\x30\x30\"}\n\x14RandCatConvParameter\x12\x1c\n\x0erand_selection\x18\x01 \x01(\x08:\x04true\x12\x18\n\nnum_output\x18\x02 \x01(\r:\x04\x31\x30\x30\x30\x12\x16\n\x0epooling_factor\x18\x03 \x03(\x05\x12\x15\n\npad_factor\x18\x04 \x01(\r:\x01\x30\"A\n\x11RandCompParameter\x12\x18\n\x10\x63ompression_rate\x18\x01 \x03(\x05\x12\x12\n\x03pad\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xc0\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12-\n\rweight_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xaa\x15\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12-\n\nnorm_param\x18- \x01(\x0b\x32\x19.caffe.NormalizeParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12\x38\n\x13rand_cat_conv_param\x18, \x01(\x0b\x32\x1b.caffe.RandCatConvParameter\x12/\n\x0erand_cat_param\x18+ \x01(\x0b\x32\x17.caffe.RandCatParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\x88\x05\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\r\n\tNORMALIZE\x10*\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x11\n\rRAND_CAT_CONV\x10)\x12\x0c\n\x08RAND_CAT\x10(\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
)
_PHASE = _descriptor.EnumDescriptor(
name='Phase',
full_name='caffe.Phase',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TRAIN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEST', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=16202,
serialized_end=16230,
)
_sym_db.RegisterEnumDescriptor(_PHASE)
Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE)
TRAIN = 0
TEST = 1
_FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor(
name='VarianceNorm',
full_name='caffe.FillerParameter.VarianceNorm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FAN_IN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAN_OUT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVERAGE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=658,
serialized_end=710,
)
_sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM)
_SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor(
name='SnapshotFormat',
full_name='caffe.SolverParameter.SnapshotFormat',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='HDF5', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BINARYPROTO', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2132,
serialized_end=2175,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT)
_SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor(
name='SolverMode',
full_name='caffe.SolverParameter.SolverMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CPU', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GPU', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2177,
serialized_end=2207,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE)
_SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor(
name='SolverType',
full_name='caffe.SolverParameter.SolverType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SGD', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NESTEROV', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADAGRAD', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RMSPROP', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADADELTA', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADAM', index=5, number=5,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2209,
serialized_end=2294,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE)
_PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor(
name='DimCheckMode',
full_name='caffe.ParamSpec.DimCheckMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRICT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSIVE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2725,
serialized_end=2767,
)
_sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE)
_LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor(
name='NormalizationMode',
full_name='caffe.LossParameter.NormalizationMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FULL', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VALID', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BATCH_SIZE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NONE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=5857,
serialized_end=5923,
)
_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE)
_CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.ConvolutionParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE)
_DATAPARAMETER_DB = _descriptor.EnumDescriptor(
name='DB',
full_name='caffe.DataParameter.DB',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LEVELDB', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LMDB', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=7249,
serialized_end=7276,
)
_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB)
_ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor(
name='EltwiseOp',
full_name='caffe.EltwiseParameter.EltwiseOp',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PROD', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUM', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAX', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=7616,
serialized_end=7655,
)
_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP)
_HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor(
name='Norm',
full_name='caffe.HingeLossParameter.Norm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='L1', index=0, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=1, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=8190,
serialized_end=8212,
)
_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM)
_LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor(
name='NormRegion',
full_name='caffe.LRNParameter.NormRegion',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACROSS_CHANNELS', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WITHIN_CHANNEL', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=9079,
serialized_end=9132,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION)
_LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.LRNParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE)
_POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.PoolingParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=9930,
serialized_end=9976,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD)
_POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.PoolingParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE)
_REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor(
name='ReductionOp',
full_name='caffe.ReductionParameter.ReductionOp',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SUM', index=0, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASUM', index=1, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUMSQ', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEAN', index=3, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=10786,
serialized_end=10839,
)
_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP)
_RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.ReLUParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE)
_SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SigmoidParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE)
_SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SoftmaxParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE)
_TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.TanHParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE)
_SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.SPPParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=9930,
serialized_end=9976,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD)
_SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SPPParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6888,
serialized_end=6931,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE)
_V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor(
name='LayerType',
full_name='caffe.V1LayerParameter.LayerType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABSVAL', index=1, number=35,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCURACY', index=2, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ARGMAX', index=3, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BNLL', index=4, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCAT', index=5, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTRASTIVE_LOSS', index=6, number=37,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONVOLUTION', index=7, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA', index=8, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DECONVOLUTION', index=9, number=39,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DROPOUT', index=10, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUMMY_DATA', index=11, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EUCLIDEAN_LOSS', index=12, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ELTWISE', index=13, number=25,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXP', index=14, number=38,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLATTEN', index=15, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDF5_DATA', index=16, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDF5_OUTPUT', index=17, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HINGE_LOSS', index=18, number=28,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IM2COL', index=19, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_DATA', index=20, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFOGAIN_LOSS', index=21, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INNER_PRODUCT', index=22, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LRN', index=23, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEMORY_DATA', index=24, number=29,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTINOMIAL_LOGISTIC_LOSS', index=25, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MVN', index=26, number=34,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NORMALIZE', index=27, number=42,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POOLING', index=28, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POWER', index=29, number=26,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RAND_CAT_CONV', index=30, number=41,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RAND_CAT', index=31, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RELU', index=32, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=33, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID_CROSS_ENTROPY_LOSS', index=34, number=27,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SILENCE', index=35, number=36,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX', index=36, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX_LOSS', index=37, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPLIT', index=38, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SLICE', index=39, number=33,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TANH', index=40, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WINDOW_DATA', index=41, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='THRESHOLD', index=42, number=31,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=14395,
serialized_end=15043,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE)
_V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor(
name='DimCheckMode',
full_name='caffe.V1LayerParameter.DimCheckMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRICT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSIVE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2725,
serialized_end=2767,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE)
_V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.V0LayerParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=9930,
serialized_end=9976,
)
_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD)
_BLOBSHAPE = _descriptor.Descriptor(
name='BlobShape',
full_name='caffe.BlobShape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dim', full_name='caffe.BlobShape.dim', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=50,
)
_BLOBPROTO = _descriptor.Descriptor(
name='BlobProto',
full_name='caffe.BlobProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.BlobProto.shape', index=0,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='caffe.BlobProto.data', index=1,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='diff', full_name='caffe.BlobProto.diff', index=2,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='double_data', full_name='caffe.BlobProto.double_data', index=3,
number=8, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='double_diff', full_name='caffe.BlobProto.double_diff', index=4,
number=9, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num', full_name='caffe.BlobProto.num', index=5,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.BlobProto.channels', index=6,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.BlobProto.height', index=7,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.BlobProto.width', index=8,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=257,
)
_BLOBPROTOVECTOR = _descriptor.Descriptor(
name='BlobProtoVector',
full_name='caffe.BlobProtoVector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.BlobProtoVector.blobs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=309,
)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='caffe.Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.Datum.channels', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.Datum.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.Datum.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='caffe.Datum.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='caffe.Datum.label', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float_data', full_name='caffe.Datum.float_data', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encoded', full_name='caffe.Datum.encoded', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=312,
serialized_end=441,
)
_FILLERPARAMETER = _descriptor.Descriptor(
name='FillerParameter',
full_name='caffe.FillerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='caffe.FillerParameter.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("constant").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='caffe.FillerParameter.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min', full_name='caffe.FillerParameter.min', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max', full_name='caffe.FillerParameter.max', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean', full_name='caffe.FillerParameter.mean', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='std', full_name='caffe.FillerParameter.std', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sparse', full_name='caffe.FillerParameter.sparse', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='variance_norm', full_name='caffe.FillerParameter.variance_norm', index=7,
number=8, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILLERPARAMETER_VARIANCENORM,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=444,
serialized_end=710,
)
_NETPARAMETER = _descriptor.Descriptor(
name='NetParameter',
full_name='caffe.NetParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.NetParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input', full_name='caffe.NetParameter.input', index=1,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_shape', full_name='caffe.NetParameter.input_shape', index=2,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_dim', full_name='caffe.NetParameter.input_dim', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_backward', full_name='caffe.NetParameter.force_backward', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='caffe.NetParameter.state', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.NetParameter.debug_info', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.NetParameter.layer', index=7,
number=100, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='layers', full_name='caffe.NetParameter.layers', index=8,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=713,
serialized_end=983,
)
_SOLVERPARAMETER = _descriptor.Descriptor(
name='SolverParameter',
full_name='caffe.SolverParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='net', full_name='caffe.SolverParameter.net', index=0,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='net_param', full_name='caffe.SolverParameter.net_param', index=1,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_net', full_name='caffe.SolverParameter.train_net', index=2,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_net', full_name='caffe.SolverParameter.test_net', index=3,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_net_param', full_name='caffe.SolverParameter.train_net_param', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_net_param', full_name='caffe.SolverParameter.test_net_param', index=5,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_state', full_name='caffe.SolverParameter.train_state', index=6,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_state', full_name='caffe.SolverParameter.test_state', index=7,
number=27, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_iter', full_name='caffe.SolverParameter.test_iter', index=8,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_interval', full_name='caffe.SolverParameter.test_interval', index=9,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_compute_loss', full_name='caffe.SolverParameter.test_compute_loss', index=10,
number=19, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_initialization', full_name='caffe.SolverParameter.test_initialization', index=11,
number=32, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_lr', full_name='caffe.SolverParameter.base_lr', index=12,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display', full_name='caffe.SolverParameter.display', index=13,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_loss', full_name='caffe.SolverParameter.average_loss', index=14,
number=33, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_iter', full_name='caffe.SolverParameter.max_iter', index=15,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iter_size', full_name='caffe.SolverParameter.iter_size', index=16,
number=36, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gamma', full_name='caffe.SolverParameter.gamma', index=18,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='power', full_name='caffe.SolverParameter.power', index=19,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='caffe.SolverParameter.momentum', index=20,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.SolverParameter.weight_decay', index=21,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22,
number=29, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("L2").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stepsize', full_name='caffe.SolverParameter.stepsize', index=23,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stepvalue', full_name='caffe.SolverParameter.stepvalue', index=24,
number=34, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clip_gradients', full_name='caffe.SolverParameter.clip_gradients', index=25,
number=35, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='snapshot', full_name='caffe.SolverParameter.snapshot', index=26,
number=14, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='snapshot_diff', full_name='caffe.SolverParameter.snapshot_diff', index=28,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='snapshot_format', full_name='caffe.SolverParameter.snapshot_format', index=29,
number=37, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='solver_mode', full_name='caffe.SolverParameter.solver_mode', index=30,
number=17, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_id', full_name='caffe.SolverParameter.device_id', index=31,
number=18, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='caffe.SolverParameter.random_seed', index=32,
number=20, type=3, cpp_type=2, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.SolverParameter.type', index=33,
number=40, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("SGD").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='delta', full_name='caffe.SolverParameter.delta', index=34,
number=31, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-08),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum2', full_name='caffe.SolverParameter.momentum2', index=35,
number=39, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rms_decay', full_name='caffe.SolverParameter.rms_decay', index=36,
number=38, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.SolverParameter.debug_info', index=37,
number=23, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='snapshot_after_train', full_name='caffe.SolverParameter.snapshot_after_train', index=38,
number=28, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='solver_type', full_name='caffe.SolverParameter.solver_type', index=39,
number=30, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOLVERPARAMETER_SNAPSHOTFORMAT,
_SOLVERPARAMETER_SOLVERMODE,
_SOLVERPARAMETER_SOLVERTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=986,
serialized_end=2294,
)
_SOLVERSTATE = _descriptor.Descriptor(
name='SolverState',
full_name='caffe.SolverState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='iter', full_name='caffe.SolverState.iter', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learned_net', full_name='caffe.SolverState.learned_net', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='history', full_name='caffe.SolverState.history', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='current_step', full_name='caffe.SolverState.current_step', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2296,
serialized_end=2404,
)
_NETSTATE = _descriptor.Descriptor(
name='NetState',
full_name='caffe.NetState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.NetState.phase', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='caffe.NetState.level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stage', full_name='caffe.NetState.stage', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2406,
serialized_end=2484,
)
_NETSTATERULE = _descriptor.Descriptor(
name='NetStateRule',
full_name='caffe.NetStateRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.NetStateRule.phase', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_level', full_name='caffe.NetStateRule.min_level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_level', full_name='caffe.NetStateRule.max_level', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stage', full_name='caffe.NetStateRule.stage', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='not_stage', full_name='caffe.NetStateRule.not_stage', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2486,
serialized_end=2601,
)
_PARAMSPEC = _descriptor.Descriptor(
name='ParamSpec',
full_name='caffe.ParamSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.ParamSpec.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='share_mode', full_name='caffe.ParamSpec.share_mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_mult', full_name='caffe.ParamSpec.lr_mult', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay_mult', full_name='caffe.ParamSpec.decay_mult', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_PARAMSPEC_DIMCHECKMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2604,
serialized_end=2767,
)
_LAYERPARAMETER = _descriptor.Descriptor(
name='LayerParameter',
full_name='caffe.LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.LayerParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.LayerParameter.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bottom', full_name='caffe.LayerParameter.bottom', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='top', full_name='caffe.LayerParameter.top', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.LayerParameter.phase', index=4,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_weight', full_name='caffe.LayerParameter.loss_weight', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='param', full_name='caffe.LayerParameter.param', index=6,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.LayerParameter.blobs', index=7,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='propagate_down', full_name='caffe.LayerParameter.propagate_down', index=8,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include', full_name='caffe.LayerParameter.include', index=9,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exclude', full_name='caffe.LayerParameter.exclude', index=10,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transform_param', full_name='caffe.LayerParameter.transform_param', index=11,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_param', full_name='caffe.LayerParameter.loss_param', index=12,
number=101, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accuracy_param', full_name='caffe.LayerParameter.accuracy_param', index=13,
number=102, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='argmax_param', full_name='caffe.LayerParameter.argmax_param', index=14,
number=103, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_norm_param', full_name='caffe.LayerParameter.batch_norm_param', index=15,
number=139, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_param', full_name='caffe.LayerParameter.bias_param', index=16,
number=141, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='concat_param', full_name='caffe.LayerParameter.concat_param', index=17,
number=104, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrastive_loss_param', full_name='caffe.LayerParameter.contrastive_loss_param', index=18,
number=105, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='convolution_param', full_name='caffe.LayerParameter.convolution_param', index=19,
number=106, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_param', full_name='caffe.LayerParameter.crop_param', index=20,
number=144, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_param', full_name='caffe.LayerParameter.data_param', index=21,
number=107, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_param', full_name='caffe.LayerParameter.dropout_param', index=22,
number=108, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dummy_data_param', full_name='caffe.LayerParameter.dummy_data_param', index=23,
number=109, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eltwise_param', full_name='caffe.LayerParameter.eltwise_param', index=24,
number=110, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='elu_param', full_name='caffe.LayerParameter.elu_param', index=25,
number=140, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='embed_param', full_name='caffe.LayerParameter.embed_param', index=26,
number=137, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exp_param', full_name='caffe.LayerParameter.exp_param', index=27,
number=111, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flatten_param', full_name='caffe.LayerParameter.flatten_param', index=28,
number=135, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hdf5_data_param', full_name='caffe.LayerParameter.hdf5_data_param', index=29,
number=112, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.LayerParameter.hdf5_output_param', index=30,
number=113, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hinge_loss_param', full_name='caffe.LayerParameter.hinge_loss_param', index=31,
number=114, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_data_param', full_name='caffe.LayerParameter.image_data_param', index=32,
number=115, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='infogain_loss_param', full_name='caffe.LayerParameter.infogain_loss_param', index=33,
number=116, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inner_product_param', full_name='caffe.LayerParameter.inner_product_param', index=34,
number=117, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_param', full_name='caffe.LayerParameter.input_param', index=35,
number=143, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='log_param', full_name='caffe.LayerParameter.log_param', index=36,
number=134, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lrn_param', full_name='caffe.LayerParameter.lrn_param', index=37,
number=118, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory_data_param', full_name='caffe.LayerParameter.memory_data_param', index=38,
number=119, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mvn_param', full_name='caffe.LayerParameter.mvn_param', index=39,
number=120, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='norm_param', full_name='caffe.LayerParameter.norm_param', index=40,
number=149, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameter_param', full_name='caffe.LayerParameter.parameter_param', index=41,
number=145, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=42,
number=121, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='power_param', full_name='caffe.LayerParameter.power_param', index=43,
number=122, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=44,
number=131, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='python_param', full_name='caffe.LayerParameter.python_param', index=45,
number=130, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_cat_conv_param', full_name='caffe.LayerParameter.rand_cat_conv_param', index=46,
number=147, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_cat_param', full_name='caffe.LayerParameter.rand_cat_param', index=47,
number=148, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_comp_param', full_name='caffe.LayerParameter.rand_comp_param', index=48,
number=150, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recurrent_param', full_name='caffe.LayerParameter.recurrent_param', index=49,
number=146, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=50,
number=136, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relu_param', full_name='caffe.LayerParameter.relu_param', index=51,
number=123, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=52,
number=133, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale_param', full_name='caffe.LayerParameter.scale_param', index=53,
number=142, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=54,
number=124, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=55,
number=125, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spp_param', full_name='caffe.LayerParameter.spp_param', index=56,
number=132, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slice_param', full_name='caffe.LayerParameter.slice_param', index=57,
number=126, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=58,
number=127, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=59,
number=128, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_param', full_name='caffe.LayerParameter.tile_param', index=60,
number=138, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=61,
number=129, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2770,
serialized_end=5541,
)
_TRANSFORMATIONPARAMETER = _descriptor.Descriptor(
name='TransformationParameter',
full_name='caffe.TransformationParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.TransformationParameter.scale', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.TransformationParameter.mirror', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.TransformationParameter.crop_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_value', full_name='caffe.TransformationParameter.mean_value', index=4,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_color', full_name='caffe.TransformationParameter.force_color', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_gray', full_name='caffe.TransformationParameter.force_gray', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5544,
serialized_end=5726,
)
_LOSSPARAMETER = _descriptor.Descriptor(
name='LossParameter',
full_name='caffe.LossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ignore_label', full_name='caffe.LossParameter.ignore_label', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalization', full_name='caffe.LossParameter.normalization', index=1,
number=3, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalize', full_name='caffe.LossParameter.normalize', index=2,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOSSPARAMETER_NORMALIZATIONMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5729,
serialized_end=5923,
)
_ACCURACYPARAMETER = _descriptor.Descriptor(
name='AccuracyParameter',
full_name='caffe.AccuracyParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='top_k', full_name='caffe.AccuracyParameter.top_k', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.AccuracyParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ignore_label', full_name='caffe.AccuracyParameter.ignore_label', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5925,
serialized_end=6001,
)
_ARGMAXPARAMETER = _descriptor.Descriptor(
name='ArgMaxParameter',
full_name='caffe.ArgMaxParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='out_max_val', full_name='caffe.ArgMaxParameter.out_max_val', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='top_k', full_name='caffe.ArgMaxParameter.top_k', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ArgMaxParameter.axis', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6003,
serialized_end=6080,
)
_CONCATPARAMETER = _descriptor.Descriptor(
name='ConcatParameter',
full_name='caffe.ConcatParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ConcatParameter.axis', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='concat_dim', full_name='caffe.ConcatParameter.concat_dim', index=1,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6082,
serialized_end=6139,
)
_BATCHNORMPARAMETER = _descriptor.Descriptor(
name='BatchNormParameter',
full_name='caffe.BatchNormParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='use_global_stats', full_name='caffe.BatchNormParameter.use_global_stats', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='moving_average_fraction', full_name='caffe.BatchNormParameter.moving_average_fraction', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eps', full_name='caffe.BatchNormParameter.eps', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-05),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6141,
serialized_end=6247,
)
_BIASPARAMETER = _descriptor.Descriptor(
name='BiasParameter',
full_name='caffe.BiasParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.BiasParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_axes', full_name='caffe.BiasParameter.num_axes', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filler', full_name='caffe.BiasParameter.filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6249,
serialized_end=6342,
)
_CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor(
name='ContrastiveLossParameter',
full_name='caffe.ContrastiveLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='margin', full_name='caffe.ContrastiveLossParameter.margin', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='legacy_version', full_name='caffe.ContrastiveLossParameter.legacy_version', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6344,
serialized_end=6420,
)
_CONVOLUTIONPARAMETER = _descriptor.Descriptor(
name='ConvolutionParameter',
full_name='caffe.ConvolutionParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.ConvolutionParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.ConvolutionParameter.bias_term', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.ConvolutionParameter.pad', index=2,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='caffe.ConvolutionParameter.kernel_size', index=3,
number=4, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.ConvolutionParameter.stride', index=4,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dilation', full_name='caffe.ConvolutionParameter.dilation', index=5,
number=18, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad_h', full_name='caffe.ConvolutionParameter.pad_h', index=6,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad_w', full_name='caffe.ConvolutionParameter.pad_w', index=7,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_h', full_name='caffe.ConvolutionParameter.kernel_h', index=8,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_w', full_name='caffe.ConvolutionParameter.kernel_w', index=9,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride_h', full_name='caffe.ConvolutionParameter.stride_h', index=10,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride_w', full_name='caffe.ConvolutionParameter.stride_w', index=11,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='caffe.ConvolutionParameter.group', index=12,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.ConvolutionParameter.weight_filler', index=13,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.ConvolutionParameter.bias_filler', index=14,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.ConvolutionParameter.engine', index=15,
number=15, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ConvolutionParameter.axis', index=16,
number=16, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_nd_im2col', full_name='caffe.ConvolutionParameter.force_nd_im2col', index=17,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CONVOLUTIONPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6423,
serialized_end=6931,
)
_CROPPARAMETER = _descriptor.Descriptor(
name='CropParameter',
full_name='caffe.CropParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.CropParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='caffe.CropParameter.offset', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6933,
serialized_end=6981,
)
_DATAPARAMETER = _descriptor.Descriptor(
name='DataParameter',
full_name='caffe.DataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.DataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.DataParameter.batch_size', index=1,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_skip', full_name='caffe.DataParameter.rand_skip', index=2,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='backend', full_name='caffe.DataParameter.backend', index=3,
number=8, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.DataParameter.scale', index=4,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.DataParameter.mean_file', index=5,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.DataParameter.crop_size', index=6,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.DataParameter.mirror', index=7,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_encoded_color', full_name='caffe.DataParameter.force_encoded_color', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prefetch', full_name='caffe.DataParameter.prefetch', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_DATAPARAMETER_DB,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6984,
serialized_end=7276,
)
_DROPOUTPARAMETER = _descriptor.Descriptor(
name='DropoutParameter',
full_name='caffe.DropoutParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dropout_ratio', full_name='caffe.DropoutParameter.dropout_ratio', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7278,
serialized_end=7324,
)
_DUMMYDATAPARAMETER = _descriptor.Descriptor(
name='DummyDataParameter',
full_name='caffe.DummyDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_filler', full_name='caffe.DummyDataParameter.data_filler', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.DummyDataParameter.shape', index=1,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num', full_name='caffe.DummyDataParameter.num', index=2,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.DummyDataParameter.channels', index=3,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.DummyDataParameter.height', index=4,
number=4, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.DummyDataParameter.width', index=5,
number=5, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7327,
serialized_end=7487,
)
_ELTWISEPARAMETER = _descriptor.Descriptor(
name='EltwiseParameter',
full_name='caffe.EltwiseParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='caffe.EltwiseParameter.operation', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coeff', full_name='caffe.EltwiseParameter.coeff', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stable_prod_grad', full_name='caffe.EltwiseParameter.stable_prod_grad', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_ELTWISEPARAMETER_ELTWISEOP,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7490,
serialized_end=7655,
)
_ELUPARAMETER = _descriptor.Descriptor(
name='ELUParameter',
full_name='caffe.ELUParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alpha', full_name='caffe.ELUParameter.alpha', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7657,
serialized_end=7689,
)
_EMBEDPARAMETER = _descriptor.Descriptor(
name='EmbedParameter',
full_name='caffe.EmbedParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.EmbedParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_dim', full_name='caffe.EmbedParameter.input_dim', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.EmbedParameter.bias_term', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.EmbedParameter.weight_filler', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.EmbedParameter.bias_filler', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7692,
serialized_end=7864,
)
_EXPPARAMETER = _descriptor.Descriptor(
name='ExpParameter',
full_name='caffe.ExpParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base', full_name='caffe.ExpParameter.base', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.ExpParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shift', full_name='caffe.ExpParameter.shift', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7866,
serialized_end=7934,
)
_FLATTENPARAMETER = _descriptor.Descriptor(
name='FlattenParameter',
full_name='caffe.FlattenParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.FlattenParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_axis', full_name='caffe.FlattenParameter.end_axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7936,
serialized_end=7993,
)
_HDF5DATAPARAMETER = _descriptor.Descriptor(
name='HDF5DataParameter',
full_name='caffe.HDF5DataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.HDF5DataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.HDF5DataParameter.batch_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shuffle', full_name='caffe.HDF5DataParameter.shuffle', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7995,
serialized_end=8074,
)
_HDF5OUTPUTPARAMETER = _descriptor.Descriptor(
name='HDF5OutputParameter',
full_name='caffe.HDF5OutputParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_name', full_name='caffe.HDF5OutputParameter.file_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8076,
serialized_end=8116,
)
_HINGELOSSPARAMETER = _descriptor.Descriptor(
name='HingeLossParameter',
full_name='caffe.HingeLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='norm', full_name='caffe.HingeLossParameter.norm', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_HINGELOSSPARAMETER_NORM,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8118,
serialized_end=8212,
)
_IMAGEDATAPARAMETER = _descriptor.Descriptor(
name='ImageDataParameter',
full_name='caffe.ImageDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.ImageDataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.ImageDataParameter.batch_size', index=1,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_skip', full_name='caffe.ImageDataParameter.rand_skip', index=2,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shuffle', full_name='caffe.ImageDataParameter.shuffle', index=3,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_height', full_name='caffe.ImageDataParameter.new_height', index=4,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_width', full_name='caffe.ImageDataParameter.new_width', index=5,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_color', full_name='caffe.ImageDataParameter.is_color', index=6,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.ImageDataParameter.scale', index=7,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.ImageDataParameter.mean_file', index=8,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.ImageDataParameter.crop_size', index=9,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.ImageDataParameter.mirror', index=10,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root_folder', full_name='caffe.ImageDataParameter.root_folder', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8215,
serialized_end=8494,
)
_INFOGAINLOSSPARAMETER = _descriptor.Descriptor(
name='InfogainLossParameter',
full_name='caffe.InfogainLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.InfogainLossParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8496,
serialized_end=8535,
)
_INNERPRODUCTPARAMETER = _descriptor.Descriptor(
name='InnerProductParameter',
full_name='caffe.InnerProductParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.InnerProductParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.InnerProductParameter.bias_term', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.InnerProductParameter.weight_filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.InnerProductParameter.bias_filler', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.InnerProductParameter.axis', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transpose', full_name='caffe.InnerProductParameter.transpose', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8538,
serialized_end=8741,
)
_INPUTPARAMETER = _descriptor.Descriptor(
name='InputParameter',
full_name='caffe.InputParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.InputParameter.shape', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8743,
serialized_end=8792,
)
_LOGPARAMETER = _descriptor.Descriptor(
name='LogParameter',
full_name='caffe.LogParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base', full_name='caffe.LogParameter.base', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.LogParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shift', full_name='caffe.LogParameter.shift', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8794,
serialized_end=8862,
)
_LRNPARAMETER = _descriptor.Descriptor(
name='LRNParameter',
full_name='caffe.LRNParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='local_size', full_name='caffe.LRNParameter.local_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alpha', full_name='caffe.LRNParameter.alpha', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta', full_name='caffe.LRNParameter.beta', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.75),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='norm_region', full_name='caffe.LRNParameter.norm_region', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='k', full_name='caffe.LRNParameter.k', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.LRNParameter.engine', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LRNPARAMETER_NORMREGION,
_LRNPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8865,
serialized_end=9177,
)
_MEMORYDATAPARAMETER = _descriptor.Descriptor(
name='MemoryDataParameter',
full_name='caffe.MemoryDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.MemoryDataParameter.batch_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.MemoryDataParameter.channels', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.MemoryDataParameter.height', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.MemoryDataParameter.width', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9179,
serialized_end=9269,
)
_MVNPARAMETER = _descriptor.Descriptor(
name='MVNParameter',
full_name='caffe.MVNParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='normalize_variance', full_name='caffe.MVNParameter.normalize_variance', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='across_channels', full_name='caffe.MVNParameter.across_channels', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eps', full_name='caffe.MVNParameter.eps', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-09),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9271,
serialized_end=9371,
)
_NORMALIZEPARAMETER = _descriptor.Descriptor(
name='NormalizeParameter',
full_name='caffe.NormalizeParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='across_spatial', full_name='caffe.NormalizeParameter.across_spatial', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale_filler', full_name='caffe.NormalizeParameter.scale_filler', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel_shared', full_name='caffe.NormalizeParameter.channel_shared', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fix_scale', full_name='caffe.NormalizeParameter.fix_scale', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eps', full_name='caffe.NormalizeParameter.eps', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-10),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9374,
serialized_end=9545,
)
_PARAMETERPARAMETER = _descriptor.Descriptor(
name='ParameterParameter',
full_name='caffe.ParameterParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.ParameterParameter.shape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9547,
serialized_end=9600,
)
_POOLINGPARAMETER = _descriptor.Descriptor(
name='PoolingParameter',
full_name='caffe.PoolingParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pool', full_name='caffe.PoolingParameter.pool', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.PoolingParameter.pad', index=1,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad_h', full_name='caffe.PoolingParameter.pad_h', index=2,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad_w', full_name='caffe.PoolingParameter.pad_w', index=3,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='caffe.PoolingParameter.kernel_size', index=4,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_h', full_name='caffe.PoolingParameter.kernel_h', index=5,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_w', full_name='caffe.PoolingParameter.kernel_w', index=6,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.PoolingParameter.stride', index=7,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride_h', full_name='caffe.PoolingParameter.stride_h', index=8,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride_w', full_name='caffe.PoolingParameter.stride_w', index=9,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.PoolingParameter.engine', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='global_pooling', full_name='caffe.PoolingParameter.global_pooling', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_POOLINGPARAMETER_POOLMETHOD,
_POOLINGPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9603,
serialized_end=10021,
)
_POWERPARAMETER = _descriptor.Descriptor(
name='PowerParameter',
full_name='caffe.PowerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='power', full_name='caffe.PowerParameter.power', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.PowerParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shift', full_name='caffe.PowerParameter.shift', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10023,
serialized_end=10093,
)
_PYTHONPARAMETER = _descriptor.Descriptor(
name='PythonParameter',
full_name='caffe.PythonParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='module', full_name='caffe.PythonParameter.module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.PythonParameter.layer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='param_str', full_name='caffe.PythonParameter.param_str', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='share_in_parallel', full_name='caffe.PythonParameter.share_in_parallel', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10095,
serialized_end=10198,
)
_RANDCATPARAMETER = _descriptor.Descriptor(
name='RandCatParameter',
full_name='caffe.RandCatParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rand_selection', full_name='caffe.RandCatParameter.rand_selection', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.RandCatParameter.num_output', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10200,
serialized_end=10274,
)
_RANDCATCONVPARAMETER = _descriptor.Descriptor(
name='RandCatConvParameter',
full_name='caffe.RandCatConvParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rand_selection', full_name='caffe.RandCatConvParameter.rand_selection', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.RandCatConvParameter.num_output', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pooling_factor', full_name='caffe.RandCatConvParameter.pooling_factor', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad_factor', full_name='caffe.RandCatConvParameter.pad_factor', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10276,
serialized_end=10401,
)
_RANDCOMPPARAMETER = _descriptor.Descriptor(
name='RandCompParameter',
full_name='caffe.RandCompParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='compression_rate', full_name='caffe.RandCompParameter.compression_rate', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.RandCompParameter.pad', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10403,
serialized_end=10468,
)
_RECURRENTPARAMETER = _descriptor.Descriptor(
name='RecurrentParameter',
full_name='caffe.RecurrentParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.RecurrentParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.RecurrentParameter.weight_filler', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.RecurrentParameter.bias_filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.RecurrentParameter.debug_info', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expose_hidden', full_name='caffe.RecurrentParameter.expose_hidden', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10471,
serialized_end=10663,
)
_REDUCTIONPARAMETER = _descriptor.Descriptor(
name='ReductionParameter',
full_name='caffe.ReductionParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='caffe.ReductionParameter.operation', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ReductionParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coeff', full_name='caffe.ReductionParameter.coeff', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_REDUCTIONPARAMETER_REDUCTIONOP,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10666,
serialized_end=10839,
)
_RELUPARAMETER = _descriptor.Descriptor(
name='ReLUParameter',
full_name='caffe.ReLUParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='negative_slope', full_name='caffe.ReLUParameter.negative_slope', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.ReLUParameter.engine', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_RELUPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10842,
serialized_end=10983,
)
_RESHAPEPARAMETER = _descriptor.Descriptor(
name='ReshapeParameter',
full_name='caffe.ReshapeParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.ReshapeParameter.shape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ReshapeParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_axes', full_name='caffe.ReshapeParameter.num_axes', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10985,
serialized_end=11075,
)
_SCALEPARAMETER = _descriptor.Descriptor(
name='ScaleParameter',
full_name='caffe.ScaleParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ScaleParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_axes', full_name='caffe.ScaleParameter.num_axes', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filler', full_name='caffe.ScaleParameter.filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.ScaleParameter.bias_term', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.ScaleParameter.bias_filler', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11078,
serialized_end=11243,
)
_SIGMOIDPARAMETER = _descriptor.Descriptor(
name='SigmoidParameter',
full_name='caffe.SigmoidParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.SigmoidParameter.engine', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SIGMOIDPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11245,
serialized_end=11365,
)
_SLICEPARAMETER = _descriptor.Descriptor(
name='SliceParameter',
full_name='caffe.SliceParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.SliceParameter.axis', index=0,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slice_point', full_name='caffe.SliceParameter.slice_point', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slice_dim', full_name='caffe.SliceParameter.slice_dim', index=2,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11367,
serialized_end=11443,
)
_SOFTMAXPARAMETER = _descriptor.Descriptor(
name='SoftmaxParameter',
full_name='caffe.SoftmaxParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.SoftmaxParameter.engine', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.SoftmaxParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOFTMAXPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11446,
serialized_end=11583,
)
_TANHPARAMETER = _descriptor.Descriptor(
name='TanHParameter',
full_name='caffe.TanHParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.TanHParameter.engine', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TANHPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11585,
serialized_end=11699,
)
_TILEPARAMETER = _descriptor.Descriptor(
name='TileParameter',
full_name='caffe.TileParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.TileParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tiles', full_name='caffe.TileParameter.tiles', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11701,
serialized_end=11748,
)
_THRESHOLDPARAMETER = _descriptor.Descriptor(
name='ThresholdParameter',
full_name='caffe.ThresholdParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='threshold', full_name='caffe.ThresholdParameter.threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11750,
serialized_end=11792,
)
_WINDOWDATAPARAMETER = _descriptor.Descriptor(
name='WindowDataParameter',
full_name='caffe.WindowDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.WindowDataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.WindowDataParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.WindowDataParameter.mean_file', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.WindowDataParameter.batch_size', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.WindowDataParameter.crop_size', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.WindowDataParameter.mirror', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fg_threshold', full_name='caffe.WindowDataParameter.fg_threshold', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bg_threshold', full_name='caffe.WindowDataParameter.bg_threshold', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fg_fraction', full_name='caffe.WindowDataParameter.fg_fraction', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='context_pad', full_name='caffe.WindowDataParameter.context_pad', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_mode', full_name='caffe.WindowDataParameter.crop_mode', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("warp").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cache_images', full_name='caffe.WindowDataParameter.cache_images', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root_folder', full_name='caffe.WindowDataParameter.root_folder', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11795,
serialized_end=12116,
)
_SPPPARAMETER = _descriptor.Descriptor(
name='SPPParameter',
full_name='caffe.SPPParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pyramid_height', full_name='caffe.SPPParameter.pyramid_height', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pool', full_name='caffe.SPPParameter.pool', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.SPPParameter.engine', index=2,
number=6, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SPPPARAMETER_POOLMETHOD,
_SPPPARAMETER_ENGINE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=12119,
serialized_end=12354,
)
_V1LAYERPARAMETER = _descriptor.Descriptor(
name='V1LayerParameter',
full_name='caffe.V1LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bottom', full_name='caffe.V1LayerParameter.bottom', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='top', full_name='caffe.V1LayerParameter.top', index=1,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='caffe.V1LayerParameter.name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include', full_name='caffe.V1LayerParameter.include', index=3,
number=32, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exclude', full_name='caffe.V1LayerParameter.exclude', index=4,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.V1LayerParameter.type', index=5,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.V1LayerParameter.blobs', index=6,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='param', full_name='caffe.V1LayerParameter.param', index=7,
number=1001, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blob_share_mode', full_name='caffe.V1LayerParameter.blob_share_mode', index=8,
number=1002, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blobs_lr', full_name='caffe.V1LayerParameter.blobs_lr', index=9,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.V1LayerParameter.weight_decay', index=10,
number=8, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_weight', full_name='caffe.V1LayerParameter.loss_weight', index=11,
number=35, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accuracy_param', full_name='caffe.V1LayerParameter.accuracy_param', index=12,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='argmax_param', full_name='caffe.V1LayerParameter.argmax_param', index=13,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='concat_param', full_name='caffe.V1LayerParameter.concat_param', index=14,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrastive_loss_param', full_name='caffe.V1LayerParameter.contrastive_loss_param', index=15,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='convolution_param', full_name='caffe.V1LayerParameter.convolution_param', index=16,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_param', full_name='caffe.V1LayerParameter.data_param', index=17,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_param', full_name='caffe.V1LayerParameter.dropout_param', index=18,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dummy_data_param', full_name='caffe.V1LayerParameter.dummy_data_param', index=19,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eltwise_param', full_name='caffe.V1LayerParameter.eltwise_param', index=20,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exp_param', full_name='caffe.V1LayerParameter.exp_param', index=21,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hdf5_data_param', full_name='caffe.V1LayerParameter.hdf5_data_param', index=22,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.V1LayerParameter.hdf5_output_param', index=23,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hinge_loss_param', full_name='caffe.V1LayerParameter.hinge_loss_param', index=24,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_data_param', full_name='caffe.V1LayerParameter.image_data_param', index=25,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='infogain_loss_param', full_name='caffe.V1LayerParameter.infogain_loss_param', index=26,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inner_product_param', full_name='caffe.V1LayerParameter.inner_product_param', index=27,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lrn_param', full_name='caffe.V1LayerParameter.lrn_param', index=28,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory_data_param', full_name='caffe.V1LayerParameter.memory_data_param', index=29,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mvn_param', full_name='caffe.V1LayerParameter.mvn_param', index=30,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='norm_param', full_name='caffe.V1LayerParameter.norm_param', index=31,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pooling_param', full_name='caffe.V1LayerParameter.pooling_param', index=32,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='power_param', full_name='caffe.V1LayerParameter.power_param', index=33,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_cat_conv_param', full_name='caffe.V1LayerParameter.rand_cat_conv_param', index=34,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_cat_param', full_name='caffe.V1LayerParameter.rand_cat_param', index=35,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relu_param', full_name='caffe.V1LayerParameter.relu_param', index=36,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sigmoid_param', full_name='caffe.V1LayerParameter.sigmoid_param', index=37,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='softmax_param', full_name='caffe.V1LayerParameter.softmax_param', index=38,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slice_param', full_name='caffe.V1LayerParameter.slice_param', index=39,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tanh_param', full_name='caffe.V1LayerParameter.tanh_param', index=40,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='threshold_param', full_name='caffe.V1LayerParameter.threshold_param', index=41,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='window_data_param', full_name='caffe.V1LayerParameter.window_data_param', index=42,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transform_param', full_name='caffe.V1LayerParameter.transform_param', index=43,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_param', full_name='caffe.V1LayerParameter.loss_param', index=44,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.V1LayerParameter.layer', index=45,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_V1LAYERPARAMETER_LAYERTYPE,
_V1LAYERPARAMETER_DIMCHECKMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=12357,
serialized_end=15087,
)
_V0LAYERPARAMETER = _descriptor.Descriptor(
name='V0LayerParameter',
full_name='caffe.V0LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.V0LayerParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.V0LayerParameter.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.V0LayerParameter.num_output', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='biasterm', full_name='caffe.V0LayerParameter.biasterm', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.V0LayerParameter.weight_filler', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.V0LayerParameter.bias_filler', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.V0LayerParameter.pad', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernelsize', full_name='caffe.V0LayerParameter.kernelsize', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='caffe.V0LayerParameter.group', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.V0LayerParameter.stride', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pool', full_name='caffe.V0LayerParameter.pool', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_ratio', full_name='caffe.V0LayerParameter.dropout_ratio', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_size', full_name='caffe.V0LayerParameter.local_size', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alpha', full_name='caffe.V0LayerParameter.alpha', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta', full_name='caffe.V0LayerParameter.beta', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.75),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='k', full_name='caffe.V0LayerParameter.k', index=15,
number=22, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='caffe.V0LayerParameter.source', index=16,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.V0LayerParameter.scale', index=17,
number=17, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='meanfile', full_name='caffe.V0LayerParameter.meanfile', index=18,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchsize', full_name='caffe.V0LayerParameter.batchsize', index=19,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cropsize', full_name='caffe.V0LayerParameter.cropsize', index=20,
number=20, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.V0LayerParameter.mirror', index=21,
number=21, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.V0LayerParameter.blobs', index=22,
number=50, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blobs_lr', full_name='caffe.V0LayerParameter.blobs_lr', index=23,
number=51, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.V0LayerParameter.weight_decay', index=24,
number=52, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rand_skip', full_name='caffe.V0LayerParameter.rand_skip', index=25,
number=53, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='det_fg_threshold', full_name='caffe.V0LayerParameter.det_fg_threshold', index=26,
number=54, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='det_bg_threshold', full_name='caffe.V0LayerParameter.det_bg_threshold', index=27,
number=55, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='det_fg_fraction', full_name='caffe.V0LayerParameter.det_fg_fraction', index=28,
number=56, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='det_context_pad', full_name='caffe.V0LayerParameter.det_context_pad', index=29,
number=58, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='det_crop_mode', full_name='caffe.V0LayerParameter.det_crop_mode', index=30,
number=59, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("warp").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_num', full_name='caffe.V0LayerParameter.new_num', index=31,
number=60, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_channels', full_name='caffe.V0LayerParameter.new_channels', index=32,
number=61, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_height', full_name='caffe.V0LayerParameter.new_height', index=33,
number=62, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_width', full_name='caffe.V0LayerParameter.new_width', index=34,
number=63, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shuffle_images', full_name='caffe.V0LayerParameter.shuffle_images', index=35,
number=64, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='concat_dim', full_name='caffe.V0LayerParameter.concat_dim', index=36,
number=65, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.V0LayerParameter.hdf5_output_param', index=37,
number=1001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_V0LAYERPARAMETER_POOLMETHOD,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=15090,
serialized_end=16111,
)
_PRELUPARAMETER = _descriptor.Descriptor(
name='PReLUParameter',
full_name='caffe.PReLUParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='filler', full_name='caffe.PReLUParameter.filler', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel_shared', full_name='caffe.PReLUParameter.channel_shared', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=16113,
serialized_end=16200,
)
_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE
_BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO
_FILLERPARAMETER.fields_by_name['variance_norm'].enum_type = _FILLERPARAMETER_VARIANCENORM
_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER
_NETPARAMETER.fields_by_name['input_shape'].message_type = _BLOBSHAPE
_NETPARAMETER.fields_by_name['state'].message_type = _NETSTATE
_NETPARAMETER.fields_by_name['layer'].message_type = _LAYERPARAMETER
_NETPARAMETER.fields_by_name['layers'].message_type = _V1LAYERPARAMETER
_SOLVERPARAMETER.fields_by_name['net_param'].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name['train_net_param'].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name['test_net_param'].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name['train_state'].message_type = _NETSTATE
_SOLVERPARAMETER.fields_by_name['test_state'].message_type = _NETSTATE
_SOLVERPARAMETER.fields_by_name['snapshot_format'].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT
_SOLVERPARAMETER.fields_by_name['solver_mode'].enum_type = _SOLVERPARAMETER_SOLVERMODE
_SOLVERPARAMETER.fields_by_name['solver_type'].enum_type = _SOLVERPARAMETER_SOLVERTYPE
_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER
_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER
_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER
_SOLVERSTATE.fields_by_name['history'].message_type = _BLOBPROTO
_NETSTATE.fields_by_name['phase'].enum_type = _PHASE
_NETSTATERULE.fields_by_name['phase'].enum_type = _PHASE
_PARAMSPEC.fields_by_name['share_mode'].enum_type = _PARAMSPEC_DIMCHECKMODE
_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC
_LAYERPARAMETER.fields_by_name['phase'].enum_type = _PHASE
_LAYERPARAMETER.fields_by_name['param'].message_type = _PARAMSPEC
_LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO
_LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE
_LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE
_LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER
_LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER
_LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER
_LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER
_LAYERPARAMETER.fields_by_name['batch_norm_param'].message_type = _BATCHNORMPARAMETER
_LAYERPARAMETER.fields_by_name['bias_param'].message_type = _BIASPARAMETER
_LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER
_LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER
_LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER
_LAYERPARAMETER.fields_by_name['crop_param'].message_type = _CROPPARAMETER
_LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER
_LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER
_LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER
_LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER
_LAYERPARAMETER.fields_by_name['elu_param'].message_type = _ELUPARAMETER
_LAYERPARAMETER.fields_by_name['embed_param'].message_type = _EMBEDPARAMETER
_LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER
_LAYERPARAMETER.fields_by_name['flatten_param'].message_type = _FLATTENPARAMETER
_LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER
_LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER
_LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER
_LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER
_LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER
_LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER
_LAYERPARAMETER.fields_by_name['input_param'].message_type = _INPUTPARAMETER
_LAYERPARAMETER.fields_by_name['log_param'].message_type = _LOGPARAMETER
_LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER
_LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER
_LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER
_LAYERPARAMETER.fields_by_name['norm_param'].message_type = _NORMALIZEPARAMETER
_LAYERPARAMETER.fields_by_name['parameter_param'].message_type = _PARAMETERPARAMETER
_LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER
_LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER
_LAYERPARAMETER.fields_by_name['prelu_param'].message_type = _PRELUPARAMETER
_LAYERPARAMETER.fields_by_name['python_param'].message_type = _PYTHONPARAMETER
_LAYERPARAMETER.fields_by_name['rand_cat_conv_param'].message_type = _RANDCATCONVPARAMETER
_LAYERPARAMETER.fields_by_name['rand_cat_param'].message_type = _RANDCATPARAMETER
_LAYERPARAMETER.fields_by_name['rand_comp_param'].message_type = _RANDCOMPPARAMETER
_LAYERPARAMETER.fields_by_name['recurrent_param'].message_type = _RECURRENTPARAMETER
_LAYERPARAMETER.fields_by_name['reduction_param'].message_type = _REDUCTIONPARAMETER
_LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER
_LAYERPARAMETER.fields_by_name['reshape_param'].message_type = _RESHAPEPARAMETER
_LAYERPARAMETER.fields_by_name['scale_param'].message_type = _SCALEPARAMETER
_LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER
_LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER
_LAYERPARAMETER.fields_by_name['spp_param'].message_type = _SPPPARAMETER
_LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER
_LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER
_LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER
_LAYERPARAMETER.fields_by_name['tile_param'].message_type = _TILEPARAMETER
_LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER
_LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE
_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER
_BIASPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name['engine'].enum_type = _CONVOLUTIONPARAMETER_ENGINE
_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER
_DATAPARAMETER.fields_by_name['backend'].enum_type = _DATAPARAMETER_DB
_DATAPARAMETER_DB.containing_type = _DATAPARAMETER
_DUMMYDATAPARAMETER.fields_by_name['data_filler'].message_type = _FILLERPARAMETER
_DUMMYDATAPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE
_ELTWISEPARAMETER.fields_by_name['operation'].enum_type = _ELTWISEPARAMETER_ELTWISEOP
_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER
_EMBEDPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_EMBEDPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_HINGELOSSPARAMETER.fields_by_name['norm'].enum_type = _HINGELOSSPARAMETER_NORM
_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER
_INNERPRODUCTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_INNERPRODUCTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_INPUTPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE
_LRNPARAMETER.fields_by_name['norm_region'].enum_type = _LRNPARAMETER_NORMREGION
_LRNPARAMETER.fields_by_name['engine'].enum_type = _LRNPARAMETER_ENGINE
_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER
_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER
_NORMALIZEPARAMETER.fields_by_name['scale_filler'].message_type = _FILLERPARAMETER
_PARAMETERPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE
_POOLINGPARAMETER.fields_by_name['pool'].enum_type = _POOLINGPARAMETER_POOLMETHOD
_POOLINGPARAMETER.fields_by_name['engine'].enum_type = _POOLINGPARAMETER_ENGINE
_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER
_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER
_RECURRENTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_RECURRENTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_REDUCTIONPARAMETER.fields_by_name['operation'].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP
_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER
_RELUPARAMETER.fields_by_name['engine'].enum_type = _RELUPARAMETER_ENGINE
_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER
_RESHAPEPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE
_SCALEPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER
_SCALEPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_SIGMOIDPARAMETER.fields_by_name['engine'].enum_type = _SIGMOIDPARAMETER_ENGINE
_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER
_SOFTMAXPARAMETER.fields_by_name['engine'].enum_type = _SOFTMAXPARAMETER_ENGINE
_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER
_TANHPARAMETER.fields_by_name['engine'].enum_type = _TANHPARAMETER_ENGINE
_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER
_SPPPARAMETER.fields_by_name['pool'].enum_type = _SPPPARAMETER_POOLMETHOD
_SPPPARAMETER.fields_by_name['engine'].enum_type = _SPPPARAMETER_ENGINE
_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER
_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER
_V1LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE
_V1LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE
_V1LAYERPARAMETER.fields_by_name['type'].enum_type = _V1LAYERPARAMETER_LAYERTYPE
_V1LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO
_V1LAYERPARAMETER.fields_by_name['blob_share_mode'].enum_type = _V1LAYERPARAMETER_DIMCHECKMODE
_V1LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER
_V1LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER
_V1LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER
_V1LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER
_V1LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER
_V1LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER
_V1LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER
_V1LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER
_V1LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER
_V1LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER
_V1LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER
_V1LAYERPARAMETER.fields_by_name['norm_param'].message_type = _NORMALIZEPARAMETER
_V1LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER
_V1LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER
_V1LAYERPARAMETER.fields_by_name['rand_cat_conv_param'].message_type = _RANDCATCONVPARAMETER
_V1LAYERPARAMETER.fields_by_name['rand_cat_param'].message_type = _RANDCATPARAMETER
_V1LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER
_V1LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER
_V1LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER
_V1LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER
_V1LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER
_V1LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER
_V1LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER
_V1LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['layer'].message_type = _V0LAYERPARAMETER
_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER
_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER
_V0LAYERPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_V0LAYERPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_V0LAYERPARAMETER.fields_by_name['pool'].enum_type = _V0LAYERPARAMETER_POOLMETHOD
_V0LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO
_V0LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER
_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER
_PRELUPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER
DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE
DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO
DESCRIPTOR.message_types_by_name['BlobProtoVector'] = _BLOBPROTOVECTOR
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
DESCRIPTOR.message_types_by_name['FillerParameter'] = _FILLERPARAMETER
DESCRIPTOR.message_types_by_name['NetParameter'] = _NETPARAMETER
DESCRIPTOR.message_types_by_name['SolverParameter'] = _SOLVERPARAMETER
DESCRIPTOR.message_types_by_name['SolverState'] = _SOLVERSTATE
DESCRIPTOR.message_types_by_name['NetState'] = _NETSTATE
DESCRIPTOR.message_types_by_name['NetStateRule'] = _NETSTATERULE
DESCRIPTOR.message_types_by_name['ParamSpec'] = _PARAMSPEC
DESCRIPTOR.message_types_by_name['LayerParameter'] = _LAYERPARAMETER
DESCRIPTOR.message_types_by_name['TransformationParameter'] = _TRANSFORMATIONPARAMETER
DESCRIPTOR.message_types_by_name['LossParameter'] = _LOSSPARAMETER
DESCRIPTOR.message_types_by_name['AccuracyParameter'] = _ACCURACYPARAMETER
DESCRIPTOR.message_types_by_name['ArgMaxParameter'] = _ARGMAXPARAMETER
DESCRIPTOR.message_types_by_name['ConcatParameter'] = _CONCATPARAMETER
DESCRIPTOR.message_types_by_name['BatchNormParameter'] = _BATCHNORMPARAMETER
DESCRIPTOR.message_types_by_name['BiasParameter'] = _BIASPARAMETER
DESCRIPTOR.message_types_by_name['ContrastiveLossParameter'] = _CONTRASTIVELOSSPARAMETER
DESCRIPTOR.message_types_by_name['ConvolutionParameter'] = _CONVOLUTIONPARAMETER
DESCRIPTOR.message_types_by_name['CropParameter'] = _CROPPARAMETER
DESCRIPTOR.message_types_by_name['DataParameter'] = _DATAPARAMETER
DESCRIPTOR.message_types_by_name['DropoutParameter'] = _DROPOUTPARAMETER
DESCRIPTOR.message_types_by_name['DummyDataParameter'] = _DUMMYDATAPARAMETER
DESCRIPTOR.message_types_by_name['EltwiseParameter'] = _ELTWISEPARAMETER
DESCRIPTOR.message_types_by_name['ELUParameter'] = _ELUPARAMETER
DESCRIPTOR.message_types_by_name['EmbedParameter'] = _EMBEDPARAMETER
DESCRIPTOR.message_types_by_name['ExpParameter'] = _EXPPARAMETER
DESCRIPTOR.message_types_by_name['FlattenParameter'] = _FLATTENPARAMETER
DESCRIPTOR.message_types_by_name['HDF5DataParameter'] = _HDF5DATAPARAMETER
DESCRIPTOR.message_types_by_name['HDF5OutputParameter'] = _HDF5OUTPUTPARAMETER
DESCRIPTOR.message_types_by_name['HingeLossParameter'] = _HINGELOSSPARAMETER
DESCRIPTOR.message_types_by_name['ImageDataParameter'] = _IMAGEDATAPARAMETER
DESCRIPTOR.message_types_by_name['InfogainLossParameter'] = _INFOGAINLOSSPARAMETER
DESCRIPTOR.message_types_by_name['InnerProductParameter'] = _INNERPRODUCTPARAMETER
DESCRIPTOR.message_types_by_name['InputParameter'] = _INPUTPARAMETER
DESCRIPTOR.message_types_by_name['LogParameter'] = _LOGPARAMETER
DESCRIPTOR.message_types_by_name['LRNParameter'] = _LRNPARAMETER
DESCRIPTOR.message_types_by_name['MemoryDataParameter'] = _MEMORYDATAPARAMETER
DESCRIPTOR.message_types_by_name['MVNParameter'] = _MVNPARAMETER
DESCRIPTOR.message_types_by_name['NormalizeParameter'] = _NORMALIZEPARAMETER
DESCRIPTOR.message_types_by_name['ParameterParameter'] = _PARAMETERPARAMETER
DESCRIPTOR.message_types_by_name['PoolingParameter'] = _POOLINGPARAMETER
DESCRIPTOR.message_types_by_name['PowerParameter'] = _POWERPARAMETER
DESCRIPTOR.message_types_by_name['PythonParameter'] = _PYTHONPARAMETER
DESCRIPTOR.message_types_by_name['RandCatParameter'] = _RANDCATPARAMETER
DESCRIPTOR.message_types_by_name['RandCatConvParameter'] = _RANDCATCONVPARAMETER
DESCRIPTOR.message_types_by_name['RandCompParameter'] = _RANDCOMPPARAMETER
DESCRIPTOR.message_types_by_name['RecurrentParameter'] = _RECURRENTPARAMETER
DESCRIPTOR.message_types_by_name['ReductionParameter'] = _REDUCTIONPARAMETER
DESCRIPTOR.message_types_by_name['ReLUParameter'] = _RELUPARAMETER
DESCRIPTOR.message_types_by_name['ReshapeParameter'] = _RESHAPEPARAMETER
DESCRIPTOR.message_types_by_name['ScaleParameter'] = _SCALEPARAMETER
DESCRIPTOR.message_types_by_name['SigmoidParameter'] = _SIGMOIDPARAMETER
DESCRIPTOR.message_types_by_name['SliceParameter'] = _SLICEPARAMETER
DESCRIPTOR.message_types_by_name['SoftmaxParameter'] = _SOFTMAXPARAMETER
DESCRIPTOR.message_types_by_name['TanHParameter'] = _TANHPARAMETER
DESCRIPTOR.message_types_by_name['TileParameter'] = _TILEPARAMETER
DESCRIPTOR.message_types_by_name['ThresholdParameter'] = _THRESHOLDPARAMETER
DESCRIPTOR.message_types_by_name['WindowDataParameter'] = _WINDOWDATAPARAMETER
DESCRIPTOR.message_types_by_name['SPPParameter'] = _SPPPARAMETER
DESCRIPTOR.message_types_by_name['V1LayerParameter'] = _V1LAYERPARAMETER
DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER
DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER
DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict(
DESCRIPTOR = _BLOBSHAPE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BlobShape)
))
_sym_db.RegisterMessage(BlobShape)
BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), dict(
DESCRIPTOR = _BLOBPROTO,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BlobProto)
))
_sym_db.RegisterMessage(BlobProto)
BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), dict(
DESCRIPTOR = _BLOBPROTOVECTOR,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BlobProtoVector)
))
_sym_db.RegisterMessage(BlobProtoVector)
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict(
DESCRIPTOR = _DATUM,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.Datum)
))
_sym_db.RegisterMessage(Datum)
FillerParameter = _reflection.GeneratedProtocolMessageType('FillerParameter', (_message.Message,), dict(
DESCRIPTOR = _FILLERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.FillerParameter)
))
_sym_db.RegisterMessage(FillerParameter)
NetParameter = _reflection.GeneratedProtocolMessageType('NetParameter', (_message.Message,), dict(
DESCRIPTOR = _NETPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NetParameter)
))
_sym_db.RegisterMessage(NetParameter)
SolverParameter = _reflection.GeneratedProtocolMessageType('SolverParameter', (_message.Message,), dict(
DESCRIPTOR = _SOLVERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SolverParameter)
))
_sym_db.RegisterMessage(SolverParameter)
SolverState = _reflection.GeneratedProtocolMessageType('SolverState', (_message.Message,), dict(
DESCRIPTOR = _SOLVERSTATE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SolverState)
))
_sym_db.RegisterMessage(SolverState)
NetState = _reflection.GeneratedProtocolMessageType('NetState', (_message.Message,), dict(
DESCRIPTOR = _NETSTATE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NetState)
))
_sym_db.RegisterMessage(NetState)
NetStateRule = _reflection.GeneratedProtocolMessageType('NetStateRule', (_message.Message,), dict(
DESCRIPTOR = _NETSTATERULE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NetStateRule)
))
_sym_db.RegisterMessage(NetStateRule)
ParamSpec = _reflection.GeneratedProtocolMessageType('ParamSpec', (_message.Message,), dict(
DESCRIPTOR = _PARAMSPEC,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ParamSpec)
))
_sym_db.RegisterMessage(ParamSpec)
LayerParameter = _reflection.GeneratedProtocolMessageType('LayerParameter', (_message.Message,), dict(
DESCRIPTOR = _LAYERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LayerParameter)
))
_sym_db.RegisterMessage(LayerParameter)
TransformationParameter = _reflection.GeneratedProtocolMessageType('TransformationParameter', (_message.Message,), dict(
DESCRIPTOR = _TRANSFORMATIONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.TransformationParameter)
))
_sym_db.RegisterMessage(TransformationParameter)
LossParameter = _reflection.GeneratedProtocolMessageType('LossParameter', (_message.Message,), dict(
DESCRIPTOR = _LOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LossParameter)
))
_sym_db.RegisterMessage(LossParameter)
AccuracyParameter = _reflection.GeneratedProtocolMessageType('AccuracyParameter', (_message.Message,), dict(
DESCRIPTOR = _ACCURACYPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.AccuracyParameter)
))
_sym_db.RegisterMessage(AccuracyParameter)
ArgMaxParameter = _reflection.GeneratedProtocolMessageType('ArgMaxParameter', (_message.Message,), dict(
DESCRIPTOR = _ARGMAXPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter)
))
_sym_db.RegisterMessage(ArgMaxParameter)
ConcatParameter = _reflection.GeneratedProtocolMessageType('ConcatParameter', (_message.Message,), dict(
DESCRIPTOR = _CONCATPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ConcatParameter)
))
_sym_db.RegisterMessage(ConcatParameter)
BatchNormParameter = _reflection.GeneratedProtocolMessageType('BatchNormParameter', (_message.Message,), dict(
DESCRIPTOR = _BATCHNORMPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BatchNormParameter)
))
_sym_db.RegisterMessage(BatchNormParameter)
BiasParameter = _reflection.GeneratedProtocolMessageType('BiasParameter', (_message.Message,), dict(
DESCRIPTOR = _BIASPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BiasParameter)
))
_sym_db.RegisterMessage(BiasParameter)
ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType('ContrastiveLossParameter', (_message.Message,), dict(
DESCRIPTOR = _CONTRASTIVELOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter)
))
_sym_db.RegisterMessage(ContrastiveLossParameter)
ConvolutionParameter = _reflection.GeneratedProtocolMessageType('ConvolutionParameter', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter)
))
_sym_db.RegisterMessage(ConvolutionParameter)
CropParameter = _reflection.GeneratedProtocolMessageType('CropParameter', (_message.Message,), dict(
DESCRIPTOR = _CROPPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.CropParameter)
))
_sym_db.RegisterMessage(CropParameter)
DataParameter = _reflection.GeneratedProtocolMessageType('DataParameter', (_message.Message,), dict(
DESCRIPTOR = _DATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.DataParameter)
))
_sym_db.RegisterMessage(DataParameter)
DropoutParameter = _reflection.GeneratedProtocolMessageType('DropoutParameter', (_message.Message,), dict(
DESCRIPTOR = _DROPOUTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.DropoutParameter)
))
_sym_db.RegisterMessage(DropoutParameter)
DummyDataParameter = _reflection.GeneratedProtocolMessageType('DummyDataParameter', (_message.Message,), dict(
DESCRIPTOR = _DUMMYDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.DummyDataParameter)
))
_sym_db.RegisterMessage(DummyDataParameter)
EltwiseParameter = _reflection.GeneratedProtocolMessageType('EltwiseParameter', (_message.Message,), dict(
DESCRIPTOR = _ELTWISEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.EltwiseParameter)
))
_sym_db.RegisterMessage(EltwiseParameter)
ELUParameter = _reflection.GeneratedProtocolMessageType('ELUParameter', (_message.Message,), dict(
DESCRIPTOR = _ELUPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ELUParameter)
))
_sym_db.RegisterMessage(ELUParameter)
EmbedParameter = _reflection.GeneratedProtocolMessageType('EmbedParameter', (_message.Message,), dict(
DESCRIPTOR = _EMBEDPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.EmbedParameter)
))
_sym_db.RegisterMessage(EmbedParameter)
ExpParameter = _reflection.GeneratedProtocolMessageType('ExpParameter', (_message.Message,), dict(
DESCRIPTOR = _EXPPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ExpParameter)
))
_sym_db.RegisterMessage(ExpParameter)
FlattenParameter = _reflection.GeneratedProtocolMessageType('FlattenParameter', (_message.Message,), dict(
DESCRIPTOR = _FLATTENPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.FlattenParameter)
))
_sym_db.RegisterMessage(FlattenParameter)
HDF5DataParameter = _reflection.GeneratedProtocolMessageType('HDF5DataParameter', (_message.Message,), dict(
DESCRIPTOR = _HDF5DATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter)
))
_sym_db.RegisterMessage(HDF5DataParameter)
HDF5OutputParameter = _reflection.GeneratedProtocolMessageType('HDF5OutputParameter', (_message.Message,), dict(
DESCRIPTOR = _HDF5OUTPUTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter)
))
_sym_db.RegisterMessage(HDF5OutputParameter)
HingeLossParameter = _reflection.GeneratedProtocolMessageType('HingeLossParameter', (_message.Message,), dict(
DESCRIPTOR = _HINGELOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.HingeLossParameter)
))
_sym_db.RegisterMessage(HingeLossParameter)
ImageDataParameter = _reflection.GeneratedProtocolMessageType('ImageDataParameter', (_message.Message,), dict(
DESCRIPTOR = _IMAGEDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ImageDataParameter)
))
_sym_db.RegisterMessage(ImageDataParameter)
InfogainLossParameter = _reflection.GeneratedProtocolMessageType('InfogainLossParameter', (_message.Message,), dict(
DESCRIPTOR = _INFOGAINLOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter)
))
_sym_db.RegisterMessage(InfogainLossParameter)
InnerProductParameter = _reflection.GeneratedProtocolMessageType('InnerProductParameter', (_message.Message,), dict(
DESCRIPTOR = _INNERPRODUCTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.InnerProductParameter)
))
_sym_db.RegisterMessage(InnerProductParameter)
InputParameter = _reflection.GeneratedProtocolMessageType('InputParameter', (_message.Message,), dict(
DESCRIPTOR = _INPUTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.InputParameter)
))
_sym_db.RegisterMessage(InputParameter)
LogParameter = _reflection.GeneratedProtocolMessageType('LogParameter', (_message.Message,), dict(
DESCRIPTOR = _LOGPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LogParameter)
))
_sym_db.RegisterMessage(LogParameter)
LRNParameter = _reflection.GeneratedProtocolMessageType('LRNParameter', (_message.Message,), dict(
DESCRIPTOR = _LRNPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LRNParameter)
))
_sym_db.RegisterMessage(LRNParameter)
MemoryDataParameter = _reflection.GeneratedProtocolMessageType('MemoryDataParameter', (_message.Message,), dict(
DESCRIPTOR = _MEMORYDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter)
))
_sym_db.RegisterMessage(MemoryDataParameter)
MVNParameter = _reflection.GeneratedProtocolMessageType('MVNParameter', (_message.Message,), dict(
DESCRIPTOR = _MVNPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.MVNParameter)
))
_sym_db.RegisterMessage(MVNParameter)
NormalizeParameter = _reflection.GeneratedProtocolMessageType('NormalizeParameter', (_message.Message,), dict(
DESCRIPTOR = _NORMALIZEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NormalizeParameter)
))
_sym_db.RegisterMessage(NormalizeParameter)
ParameterParameter = _reflection.GeneratedProtocolMessageType('ParameterParameter', (_message.Message,), dict(
DESCRIPTOR = _PARAMETERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ParameterParameter)
))
_sym_db.RegisterMessage(ParameterParameter)
PoolingParameter = _reflection.GeneratedProtocolMessageType('PoolingParameter', (_message.Message,), dict(
DESCRIPTOR = _POOLINGPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PoolingParameter)
))
_sym_db.RegisterMessage(PoolingParameter)
PowerParameter = _reflection.GeneratedProtocolMessageType('PowerParameter', (_message.Message,), dict(
DESCRIPTOR = _POWERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PowerParameter)
))
_sym_db.RegisterMessage(PowerParameter)
PythonParameter = _reflection.GeneratedProtocolMessageType('PythonParameter', (_message.Message,), dict(
DESCRIPTOR = _PYTHONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PythonParameter)
))
_sym_db.RegisterMessage(PythonParameter)
RandCatParameter = _reflection.GeneratedProtocolMessageType('RandCatParameter', (_message.Message,), dict(
DESCRIPTOR = _RANDCATPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.RandCatParameter)
))
_sym_db.RegisterMessage(RandCatParameter)
RandCatConvParameter = _reflection.GeneratedProtocolMessageType('RandCatConvParameter', (_message.Message,), dict(
DESCRIPTOR = _RANDCATCONVPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.RandCatConvParameter)
))
_sym_db.RegisterMessage(RandCatConvParameter)
RandCompParameter = _reflection.GeneratedProtocolMessageType('RandCompParameter', (_message.Message,), dict(
DESCRIPTOR = _RANDCOMPPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.RandCompParameter)
))
_sym_db.RegisterMessage(RandCompParameter)
RecurrentParameter = _reflection.GeneratedProtocolMessageType('RecurrentParameter', (_message.Message,), dict(
DESCRIPTOR = _RECURRENTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.RecurrentParameter)
))
_sym_db.RegisterMessage(RecurrentParameter)
ReductionParameter = _reflection.GeneratedProtocolMessageType('ReductionParameter', (_message.Message,), dict(
DESCRIPTOR = _REDUCTIONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ReductionParameter)
))
_sym_db.RegisterMessage(ReductionParameter)
ReLUParameter = _reflection.GeneratedProtocolMessageType('ReLUParameter', (_message.Message,), dict(
DESCRIPTOR = _RELUPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ReLUParameter)
))
_sym_db.RegisterMessage(ReLUParameter)
ReshapeParameter = _reflection.GeneratedProtocolMessageType('ReshapeParameter', (_message.Message,), dict(
DESCRIPTOR = _RESHAPEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ReshapeParameter)
))
_sym_db.RegisterMessage(ReshapeParameter)
ScaleParameter = _reflection.GeneratedProtocolMessageType('ScaleParameter', (_message.Message,), dict(
DESCRIPTOR = _SCALEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ScaleParameter)
))
_sym_db.RegisterMessage(ScaleParameter)
SigmoidParameter = _reflection.GeneratedProtocolMessageType('SigmoidParameter', (_message.Message,), dict(
DESCRIPTOR = _SIGMOIDPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SigmoidParameter)
))
_sym_db.RegisterMessage(SigmoidParameter)
SliceParameter = _reflection.GeneratedProtocolMessageType('SliceParameter', (_message.Message,), dict(
DESCRIPTOR = _SLICEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SliceParameter)
))
_sym_db.RegisterMessage(SliceParameter)
SoftmaxParameter = _reflection.GeneratedProtocolMessageType('SoftmaxParameter', (_message.Message,), dict(
DESCRIPTOR = _SOFTMAXPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter)
))
_sym_db.RegisterMessage(SoftmaxParameter)
TanHParameter = _reflection.GeneratedProtocolMessageType('TanHParameter', (_message.Message,), dict(
DESCRIPTOR = _TANHPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.TanHParameter)
))
_sym_db.RegisterMessage(TanHParameter)
TileParameter = _reflection.GeneratedProtocolMessageType('TileParameter', (_message.Message,), dict(
DESCRIPTOR = _TILEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.TileParameter)
))
_sym_db.RegisterMessage(TileParameter)
ThresholdParameter = _reflection.GeneratedProtocolMessageType('ThresholdParameter', (_message.Message,), dict(
DESCRIPTOR = _THRESHOLDPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ThresholdParameter)
))
_sym_db.RegisterMessage(ThresholdParameter)
WindowDataParameter = _reflection.GeneratedProtocolMessageType('WindowDataParameter', (_message.Message,), dict(
DESCRIPTOR = _WINDOWDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.WindowDataParameter)
))
_sym_db.RegisterMessage(WindowDataParameter)
SPPParameter = _reflection.GeneratedProtocolMessageType('SPPParameter', (_message.Message,), dict(
DESCRIPTOR = _SPPPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SPPParameter)
))
_sym_db.RegisterMessage(SPPParameter)
V1LayerParameter = _reflection.GeneratedProtocolMessageType('V1LayerParameter', (_message.Message,), dict(
DESCRIPTOR = _V1LAYERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.V1LayerParameter)
))
_sym_db.RegisterMessage(V1LayerParameter)
V0LayerParameter = _reflection.GeneratedProtocolMessageType('V0LayerParameter', (_message.Message,), dict(
DESCRIPTOR = _V0LAYERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.V0LayerParameter)
))
_sym_db.RegisterMessage(V0LayerParameter)
PReLUParameter = _reflection.GeneratedProtocolMessageType('PReLUParameter', (_message.Message,), dict(
DESCRIPTOR = _PRELUPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PReLUParameter)
))
_sym_db.RegisterMessage(PReLUParameter)
_BLOBSHAPE.fields_by_name['dim']._options = None
_BLOBPROTO.fields_by_name['data']._options = None
_BLOBPROTO.fields_by_name['diff']._options = None
_BLOBPROTO.fields_by_name['double_data']._options = None
_BLOBPROTO.fields_by_name['double_diff']._options = None
# @@protoc_insertion_point(module_scope)
|
[
"1317695730@qq.com"
] |
1317695730@qq.com
|
e6df2385a6326287632d959c1ff2c0c2e9cd841a
|
919452e2272a7b3a0a8483c69bcd39d9e4c2c736
|
/excepti.py
|
5f190dce616fbb301f05ae73a045e21bb0ef7ac1
|
[] |
no_license
|
preetam-patel/flask_programs
|
0c1fd82a30e896edbd5411b3e1a3f154b5974066
|
15a5220a9a009ce3f4d6662ea3452686dfe94ad3
|
refs/heads/master
| 2023-04-20T13:11:53.598997
| 2021-05-24T11:18:27
| 2021-05-24T11:18:27
| 351,060,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
class Error(Exception):
pass
class Valuetosmall(Error):
pass
class Valuetolarge(Error):
pass
class Zeronotdivisable(Error):
pass
|
[
"preetampatel22699@gmail.com"
] |
preetampatel22699@gmail.com
|
cdee1160e9d5c6c4b3d2a2c4144253246dc2ff23
|
b896be9c0e136ff88a823333f20be150470f9e8d
|
/Week 6/DatabaseApp.wsgi
|
29854fbf69d4a1ebb5a37eea9475946e86028fc2
|
[] |
no_license
|
JoonasKulmala/Linux-Server-course
|
180e94e5ebb6d09c58fa3c94c3ef6598300a6d20
|
5fcb626def68f59ab66971b18c55bfd6c9be940f
|
refs/heads/main
| 2023-03-18T01:03:14.516917
| 2021-03-16T07:06:38
| 2021-03-16T07:06:38
| 332,947,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
wsgi
|
def application(env, startResponse):
startResponse("200 OK", [("Content-type", "text/plain")])
return [b"See you at TeroKarvinen.com\n"]from DatabaseApp.py import app as application
|
[
"joonas.kulmala@protonmail.com"
] |
joonas.kulmala@protonmail.com
|
a4aaf49f16be1e0de36211c0a275da59240fabeb
|
6096e044b5ca95591de83695a4325d046597b713
|
/Final_code/read_lidar_lite.py
|
26ec4fab1b57ed7a2ca039f067d1c9a7353f02a5
|
[] |
no_license
|
flossielau/ARoD
|
b9fd9f876533836935384ceb9e7cace675671400
|
0afc2f519a668a2c5ec3333a1acdad8d2449f22d
|
refs/heads/main
| 2023-07-16T03:41:59.319177
| 2021-09-04T07:27:29
| 2021-09-04T07:27:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
import serial, time
arduino = serial.Serial('/dev/ttyACM0', 115200, timeout=1)
arduino.flush()
def lidar_lite():
while True:
distance = arduino.readline().decode('utf-8').rstrip()
#print(distance)
with open("/home/pi/Desktop/lidar_lite_info.txt", "w") as output:
output.write(distance)
|
[
"noreply@github.com"
] |
flossielau.noreply@github.com
|
0a53ab68989d286f013da079bf2fa922a9c6acde
|
8dd000d05a29cece1460fd48c4f6b12c56281ca1
|
/ugly/default_settings.py
|
e3dea2571f19db51dc2da821e5e99b015d7ad1a8
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
imclab/ugly
|
3e2717982b6e999c99d3e884d7f4d48d08fc7609
|
bc09834849184552619ee926d7563ed37630accb
|
refs/heads/master
| 2021-01-15T19:35:38.835572
| 2014-01-08T20:24:56
| 2014-01-08T20:24:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Flask stuff.
DEBUG = False
TESTING = False
SECRET_KEY = "development key"
# App stuff.
ADMIN_EMAIL = "Ugly Reader <ugly@dfm.io>"
BASE_MAILBOX = "[Ugly Reader]"
AES_KEY = b"test AES key... change this in production"
MAX_FEEDS = 100
# Database stuff.
SQLALCHEMY_DATABASE_URI = "postgresql://localhost/ugly"
# Google OAuth stuff.
GOOGLE_OAUTH2_CLIENT_ID = None
GOOGLE_OAUTH2_CLIENT_SECRET = None
|
[
"danfm@nyu.edu"
] |
danfm@nyu.edu
|
8e4d97f37da8f8810b76dc4891344d88eb65c183
|
292f0c2a5985fb1bcdcfc9ac3a4acbfc9b528643
|
/MLB the Show 2019 V2/classesv2.py
|
dc9d2b20ccd5a70b7a6dbc0d98489c3c19695675
|
[] |
no_license
|
AtBatMac/19_The_Show
|
626c917b653d9840df757a650df42b26fc8a3017
|
d7b332f06b1adce363880581178a75fbef7e1cfa
|
refs/heads/master
| 2020-07-10T08:13:42.084410
| 2019-08-24T21:52:58
| 2019-08-24T21:52:58
| 204,215,166
| 0
| 0
| null | 2019-08-24T21:48:15
| 2019-08-24T21:34:19
| null |
UTF-8
|
Python
| false
| false
| 12,108
|
py
|
import random
import pandas as pd
import datetime as dt
class GlobalSwitch:
DEBUG = False
class Utilities:
def Pause():
if GlobalSwitch.DEBUG == True:
input('Press any key to continue...')
class Base:
def __init__(self, name):
self.Name = name
self.PlayerHere = None
class Stat:
def __init__(self, name, range):
self.Name = name
self.Value = range
class Pitcher:
def __init__(self, name, myTeam,
bid, avgouts, SingleR,
DoubleR, TripleR, HomeRunR,
WalkR, SOR, FlyGrR, DPR, source):
# general stats
self.Name = name
self.MyTeam = myTeam
self.BID = bid
self.Avgouts = avgouts
# base stats
self.SingleR = SingleR
self.DoubleR = DoubleR
self.TripleR = TripleR
self.HomeRunR = HomeRunR
self.WalkR = WalkR
self.SOR = SOR
self.FlyGrR = FlyGrR
self.DPR = DPR
self.Source = source
class Team:
def __init__(self, name):
self.Name = name
self.Score = 0
self.Players = None
self.AccumulatedOuts = 0
self.OutCount = 0
self.CurrentPlayer = None
self.CurrentIndex = 0
self.Pitcher = None
self.OpposingTeam = None
self.Pitchers = None
def GetPlayer(self):
if (self.CurrentIndex > len(self.Players) - 1):
self.CurrentIndex = 0
return self.Players[self.CurrentIndex]
def HalfInning(self, bases):
teamIsIn = True
while (teamIsIn):
OpposingPitcherTeam = self.OpposingTeam
#
Activeplayer = self.GetPlayer()
Activeplayer.swing(bases, OpposingPitcherTeam)
# Activeplayer.PrintBasesStatus(bases)
self.CurrentIndex += 1
# self.PrintScore()
Utilities.Pause()
if (self.OutCount >= 3):
ResetBases(bases)
teamIsIn = False
return
# def PrintScore(self):
# print('DEBUG: Stats for ' + self.Name +
# ' Score: ' + str(self.Score) +
# ' Outs: ' + str(self.OutCount) +
# ' Total Outs: ' + str(self.AccumulatedOuts))
class Player:
def __init__(self, name, myTeam,
bid, SingleR,
DoubleR, TripleR, HomeRunR,
WalkR, SOR, FlyGrR, DPR, source):
# general stats
self.Name = name
self.MyTeam = myTeam
self.BID = bid
# base stats
self.SingleR = SingleR
self.DoubleR = DoubleR
self.TripleR = TripleR
self.HomeRunR = HomeRunR
self.WalkR = WalkR
self.SOR = SOR
self.FlyGrR = FlyGrR
self.DPR = DPR
self.Source = source
def swing(self, allBases, Opposingteam):
if self.MyTeam.AccumulatedOuts < Opposingteam.Pitcher.Avgouts:
#print(Opposingteam.Pitcher.Name)
swingResult = self.getSwingResult(Opposingteam.Pitcher);
#print(self.MyTeam.AccumulatedOuts)
#print(self.Name + ' gets swingResult = ' + str(swingResult))
if (swingResult > 0 and swingResult < 4):
self.AdjustBases(swingResult, allBases)
allBases[swingResult].PlayerHere = self
elif (swingResult == 4):
self.AdjustBases(swingResult, allBases)
self.MyTeam.Score += 1
elif (swingResult == 0):
self.MyTeam.OutCount += 1
self.MyTeam.AccumulatedOuts += 1
else:
Opposingteam.Pitcher = Opposingteam.Pitchers[1]
#print(Opposingteam.Pitcher.Name)
#print('\n' + Opposingteam.Pitcher.Name + ' is now pitching' + '\n')
swingResult = self.getSwingResult(Opposingteam.Pitcher);
#print(self.MyTeam.AccumulatedOuts)
#print(self.Name + ' gets swingResult = ' + str(swingResult))
if (swingResult > 0 and swingResult < 4):
self.AdjustBases(swingResult, allBases)
allBases[swingResult].PlayerHere = self
elif (swingResult == 4):
self.AdjustBases(swingResult, allBases)
self.MyTeam.Score += 1
elif (swingResult == 0):
self.MyTeam.OutCount += 1
self.MyTeam.AccumulatedOuts += 1
def getSwingResult(self, OpposingPitcher):
#print(OpposingPitcher.Name + ' pitched to ' + self.Name + '\n')
result = -1
p = OpposingPitcher
# print(tempOBP)
ball = random.randint(1, 1000)
if p.TripleR == None:
p.TripleR = 0
else:
p.TripleR = p.TripleR
if self.TripleR == None:
self.TripleR = 0
else:
self.TripleR = self.TripleR
singleC = int(round((p.SingleR + self.SingleR) / 2))
doubleC = int(round((p.DoubleR + self.DoubleR) / 2))
tripleC = int(round((p.TripleR + self.TripleR) / 2))
homerunC = int(round((p.HomeRunR + self.HomeRunR) / 2))
walkC = int(round((p.WalkR + self.WalkR) / 2))
soC = int(round((p.SOR + self.SOR) / 2))
FlyGrC = int(round((p.FlyGrR + self.FlyGrR) / 2))
DPC = int(round((p.DPR + self.DPR) / 2))
total_range = 1001
if tripleC == None:
tripleC = 0
else:
tripleC = tripleC
single = int(round(total_range - singleC))
double = int(round(single - doubleC))
triple = int(round(double - tripleC))
homerun = int(round(triple - homerunC))
walk = int(round(homerun - walkC))
strikeout = int(round(walk - soC))
field_out = int(round(strikeout - FlyGrC))
double_play = int(round(field_out - DPC))
if double == None:
double = 0
else:
double = double
# Converts range values to ranges per at bat result
DPR = range(0, field_out)
FlyGrR = range(field_out, strikeout)
SOR = range(strikeout, walk)
walkR = range(walk, homerun)
homerunR = range(homerun, triple)
tripleR = range(triple, double)
doubleR = range(double, single)
singleR = range(single, total_range)
# assigns result of ab depending on where ball is in range
if ball in DPR:
result = 0
elif ball in FlyGrR:
result = 0
elif ball in SOR:
result = 0
elif ball in walkR:
result = 1
elif ball in homerunR:
result = 4
elif ball in tripleR:
result = 3
elif ball in doubleR:
result = 2
elif ball in singleR:
result = 1
else:
print('error')
print(ball)
print(self.Name)
#def PrintBasesStatus(self, allBases):
#for base in allBases:
#if (base.PlayerHere != None):
#print('Name: ' + base.Name + '\t Player: ' + base.PlayerHere.Name)
#else:
#print('Name: ' + base.Name + '\t Player: None')
return result
def AdjustBases(self, swingResult, allBases):
count = 4
for base in reversed(allBases):
if (base.PlayerHere != None and count == 4 and swingResult >= 1):
count -= 1
# player is on Base 4 and swingResult is at least 1
# remove player from bases list and add 1 to teams score
base.PlayerHere = None
self.MyTeam.Score += 1
elif (base.PlayerHere != None and count == 3 and swingResult >= 2):
count -= 1
# player is on Base 3 and swingResult is at least 2
# remove player from bases list and add 1 to teams score
base.PlayerHere = None
self.MyTeam.Score += 1
elif (base.PlayerHere != None and count == 2 and swingResult >= 3):
count -= 1
# player is on Base 2 and swingResult is at least 3
# remove player from bases list and add 1 to teams score
base.PlayerHere = None
self.MyTeam.Score += 1
elif (base.PlayerHere != None and count == 1 and swingResult >= 4):
count -= 1
# player is on Base 1 and swingResult is at least 4
# remove player from bases list and add 1 to teams score
base.PlayerHere = None
self.MyTeam.Score += 1
elif (base.PlayerHere != None):
count -= 1
tempPlayer = base.PlayerHere
allBases[count + swingResult].PlayerHere = tempPlayer
base.PlayerHere = None
else:
count -= 1
class Game:
def __init__(self, team1, team2):
self.Team1 = team1
self.Team2 = team2
self.CurrentInning = 0
self.Team1wins = 0
self.Team2wins = 0
def PlayBall(self, bases):
for i in range(1, 10):
self.CurrentInning = i
self.Team1.HalfInning(bases)
self.Team1.OutCount = 0
self.Team2.HalfInning(bases)
self.Team2.OutCount = 0
#self.PrintCurrentResults()
# self.Team1.OpposingTeam = (Team2)
# self.Team2.OpposingTeam = (Team1)
Utilities.Pause()
def PlayRepeat(self, iterations, bases):
for i in range(0, iterations):
self.PlayBall(bases)
if self.Team1.Score > self.Team2.Score:
self.Team1wins += 1
elif self.Team2.Score > self.Team1.Score:
self.Team2wins += 1
else:
pass # for draw scenario
self.CurrentInning = 0
self.Team1.OutCount = 0
self.Team2.OutCount = 0
self.Team1.Score = 0
self.Team2.Score = 0
self.Team1.AccumulatedOuts = 0
self.Team2.AccumulatedOuts = 0
self.Team1.Pitcher = self.Team1.Pitchers[0]
self.Team2.Pitcher = self.Team2.Pitchers[0]
#print(self.Team1wins)
#print(self.Team2wins)
#print(self.Team1.Name)
#print(self.Team2.Name)
t1wins = self.Team1wins
t2wins = self.Team2wins
todays_date = dt.datetime.today().strftime("%d/%m/%Y")
totalgamewins = t1wins + t2wins
t1_win_percent = round(t1wins / totalgamewins, 2)
t2_win_percent = round(t2wins / totalgamewins, 2)
df = pd.read_csv('game_result_new.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
df.to_csv('game_result_old.csv')
game_series = [todays_date, self.Team1.Name, self.Team1.Pitcher.Name, t1_win_percent, self.Team2.Name,
self.Team2.Pitcher.Name, t2_win_percent]
print(game_series)
df1 = pd.DataFrame([game_series])
df1.columns = ['Date', 'away_team', 'away_pitcher', 'away_win_percent', 'home_team', 'home_pitcher',
'home_win_percent']
df2 = df.append(df1, sort=False)
df2.to_csv('game_result_new.csv')
#def PrintCurrentResults(self):
#print('Inning ' + str(self.CurrentInning) + ': '
#+ self.Team1.Name + " " + str(self.Team1.Score)
#+ self.Team2.Name + " " + str(self.Team2.Score))
# general utility function
# general utility function
def ResetBases(bases):
for base in bases:
base.PlayerHere = None
|
[
"noreply@github.com"
] |
AtBatMac.noreply@github.com
|
fcd60992320670938ad1008e770d883589962908
|
8a35a12c48bfaaf7c7d36bb3dd13bb1a3e3189a2
|
/home/urls.py
|
5de6e595883ab1ea0f268eeb047f42dda1169531
|
[] |
no_license
|
Aurelian-Nkonde/Django-login-system-authentication-
|
e91eebf8e160aa29fb4544cd5008fb966dd06454
|
9a1178e7fc3ab54c1530bc928ab8c2cda1f17d29
|
refs/heads/master
| 2022-09-01T18:46:45.855765
| 2020-05-09T00:05:23
| 2020-05-09T00:05:23
| 262,452,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from django.urls import path
from .views import Home
urlpatterns = [
path('', Home.as_view(), name="home")
]
|
[
"aurelianrobot@gmail.com"
] |
aurelianrobot@gmail.com
|
7c8e6b5117040ebbb9607cd4947cdba6e45e554d
|
25b5f5a6c9c95ef9a1264eef12384003f037cb58
|
/solve_captchas.py
|
862ca107b30f7b716ad90a58011ea7509c054c67
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
RobTwoThree/Monocle-Level30
|
7dbe76ead87e489faff6ef68d267fbc3610d953e
|
94ed6e121ad41b437083bf207954c34ff8f312f9
|
refs/heads/master
| 2022-11-22T06:17:51.203216
| 2017-12-07T22:11:52
| 2017-12-07T22:11:52
| 113,503,725
| 1
| 1
|
MIT
| 2020-07-22T03:56:03
| 2017-12-07T22:12:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,691
|
py
|
#!/usr/bin/env python3
from asyncio import get_event_loop, sleep
from multiprocessing.managers import BaseManager
from time import time
from queue import Empty, Full
from aiopogo import PGoApi, close_sessions, activate_hash_server, exceptions as ex
from aiopogo.auth_ptc import AuthPtc
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from monocle import altitudes, sanitized as conf
from monocle.utils import get_device_info, get_address, randomize_point
from monocle.bounds import center
async def solve_captcha(url, api, driver, timestamp):
driver.get(url)
WebDriverWait(driver, 86400).until(EC.text_to_be_present_in_element_value((By.NAME, "g-recaptcha-response"), ""))
driver.switch_to.frame(driver.find_element_by_xpath("//*/iframe[@title='recaptcha challenge']"))
token = driver.find_element_by_id("recaptcha-token").get_attribute("value")
request = api.create_request()
request.verify_challenge(token=token)
request.get_hatched_eggs()
request.get_inventory(last_timestamp_ms=timestamp)
request.check_awarded_badges()
request.get_buddy_walked()
request.check_challenge()
for attempt in range(-1, conf.MAX_RETRIES):
try:
responses = await request.call()
return responses['VERIFY_CHALLENGE'].success
except (ex.HashServerException, ex.MalformedResponseException, ex.ServerBusyOrOfflineException) as e:
if attempt == conf.MAX_RETRIES - 1:
raise
else:
print('{}, trying again soon.'.format(e))
await sleep(4)
except (KeyError, TypeError):
return False
async def main():
try:
class AccountManager(BaseManager): pass
AccountManager.register('captcha_queue')
AccountManager.register('extra_queue')
AccountManager.register('lv30_captcha_queue')
AccountManager.register('lv30_account_queue')
manager = AccountManager(address=get_address(), authkey=conf.AUTHKEY)
manager.connect()
captcha_queue = manager.captcha_queue()
extra_queue = manager.extra_queue()
lv30_captcha_queue = manager.lv30_captcha_queue()
lv30_account_queue = manager.lv30_account_queue()
def put_account_queue(account):
if account.get('level', 0) < 30:
extra_queue.put(account)
else:
lv30_account_queue.put(account)
def put_captcha_queue(account):
if account.get('leve', 0) < 30:
captcha_queue.put(account)
else:
lv30_captcha_queue.put(account)
if conf.GO_HASH:
hashkey = conf.GO_HASH_KEY
else:
hashkey = conf.HASH_KEY
activate_hash_server(hashkey,
go_hash=conf.GO_HASH,
hash_endpoint=conf.HASH_ENDPOINT,
gohash_endpoint=conf.GOHASH_ENDPOINT)
driver = webdriver.Chrome()
driver.set_window_size(803, 807)
while not captcha_queue.empty() or not lv30_captcha_queue.empty():
try:
account = captcha_queue.get()
except Empty:
try:
account = lv30_captcha_queue.get()
except Empty:
break
username = account.get('username')
location = account.get('location')
if location and location != (0,0,0):
lat = location[0]
lon = location[1]
else:
lat, lon = randomize_point(center, 0.0001)
try:
alt = altitudes.get((lat, lon))
except KeyError:
alt = await altitudes.fetch((lat, lon))
try:
device_info = get_device_info(account)
api = PGoApi(device_info=device_info)
api.set_position(lat, lon, alt)
authenticated = False
try:
if account['provider'] == 'ptc':
api.auth_provider = AuthPtc()
api.auth_provider._access_token = account['auth']
api.auth_provider._access_token_expiry = account['expiry']
if api.auth_provider.check_access_token():
api.auth_provider.authenticated = True
authenticated = True
except KeyError:
pass
if not authenticated:
await api.set_authentication(username=username,
password=account['password'],
provider=account.get('provider', 'ptc'))
request = api.create_request()
await request.call()
await sleep(.6)
request.download_remote_config_version(platform=1, app_version=8300)
request.check_challenge()
request.get_hatched_eggs()
request.get_inventory(last_timestamp_ms=account.get('inventory_timestamp', 0))
request.check_awarded_badges()
request.download_settings()
responses = await request.call()
account['time'] = time()
challenge_url = responses['CHECK_CHALLENGE'].challenge_url
timestamp = responses['GET_INVENTORY'].inventory_delta.new_timestamp_ms
account['location'] = lat, lon
account['inventory_timestamp'] = timestamp
if challenge_url == ' ':
account['captcha'] = False
print('No CAPTCHA was pending on {}.'.format(username))
put_account_queue(account)
else:
print('Trying to solve {}.'.format(username))
if await solve_captcha(challenge_url, api, driver, timestamp):
account['time'] = time()
account['captcha'] = False
print('Solved CAPTCHA for {}, putting back in rotation.'.format(username))
put_account_queue(account)
else:
account['time'] = time()
print('Failed to solve for {}'.format(username))
put_captcha_queue(account)
except KeyboardInterrupt:
put_captcha_queue(account)
break
except KeyError:
print('Unexpected or empty response for {}, putting back on queue.'.format(username))
put_captcha_queue(account)
try:
print(response)
except Exception:
pass
await sleep(3)
except (ex.AuthException, ex.AuthTokenExpiredException) as e:
print('Authentication error on {}: {}'.format(username, e))
put_captcha_queue(account)
await sleep(3)
except ex.AiopogoError as e:
print('aiopogo error on {}: {}'.format(username, e))
put_captcha_queue(account)
await sleep(3)
except Exception:
put_captcha_queue(account)
raise
finally:
try:
driver.close()
close_sessions()
except Exception:
pass
if __name__ == '__main__':
loop = get_event_loop()
loop.run_until_complete(main())
|
[
"rsayson31@gmail.com"
] |
rsayson31@gmail.com
|
7eb6478f752ca6bb7866a7e1f80dbd03688794f0
|
e01389a37561a0eff13eaab40e12acea6935fe32
|
/app/main.py
|
46702cb928707337e19f711c90360a3715c2f2d9
|
[] |
no_license
|
fabianbrash/py-fastapi-01
|
7f777ea29f72e7b6803c87bf750fabc21153e2d7
|
3c6f0cf10de8505d408b759aa216f13640d9e6f4
|
refs/heads/main
| 2023-03-14T22:19:23.589554
| 2021-03-25T17:00:45
| 2021-03-25T17:00:45
| 351,512,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from fastapi import FastAPI
from typing import Optional
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
origins = [
"http://localhost",
"https://localhost",
"http://localhost:3000",
"https://localhost:3000"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["GET"],
allow_headers=["*"],
)
@app.get("/")
async def read_root():
return {"message": "Hello World"}
@app.get("/items/{item_id}")
async def read_item(item_id: int, q: Optional[str] = None):
return {"item_id" : item_id, "q" : q}
|
[
"fabianbrash@gmail.com"
] |
fabianbrash@gmail.com
|
1856c7c864ac34d62c6c9bc7de93fbbd76a236f0
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2009.1/x11/terminal/xterm/actions.py
|
0bc996b345bff3ffad1468eeeffc1e93bc0c3d83
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.export("CC", get.CC())
autotools.configure(" \
--disable-full-tgetent \
--with-app-defaults=/usr/share/X11/app-defaults \
--disable-desktop \
--with-utempter \
--with-tty-group=tty \
--enable-256-color \
--enable-exec-xterm \
--enable-freetype \
--enable-luit \
--enable-wide-chars \
--enable-warnings \
")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.removeDir("/usr/share/pixmaps")
pisitools.dodoc("README.i18n", "xterm.log.html", "ctlseqs.txt", "16colors.txt")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
60a2a263c9e08bdab0158b8361014feb52d55f62
|
816226d25e4a70fd29c7761f87c11a2d748e5a3f
|
/L7_Stacks and Queues/7.2_Fish.py
|
4090247ff189f1b667f611c34350766a3e234730
|
[] |
no_license
|
yuelan313/Codility-Lessons-By-Python3
|
6e1595af58c3af2c9cb638aefbfffb4c2f916714
|
ccd0aae6af0fd9d68f080c468cad207f34c36b12
|
refs/heads/master
| 2023-03-16T13:29:22.270786
| 2019-03-25T05:56:25
| 2019-03-25T05:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
# -*- coding:utf-8 -*-
# &Author AnFany
# Lesson 7:Stacks and Queues
# P 7.2 Fish
def solution(A, B):
"""
相遇的鱼大鱼吃小鱼
:param A: A表示鱼的大小
:param B: 表示鱼游的方向
:return: 活鱼的数目
"""
alive_fish = 0
fish_down = [] # 存储向下游的鱼
for index, value in enumerate(B):
if value == 0:
if len(fish_down) == 0:
alive_fish += 1
else:
# 开始判断吃鱼
try:
while fish_down[-1] < A[index]:
fish_down.pop(-1)
except IndexError:
alive_fish += 1
else:
fish_down.append(A[index])
return alive_fish + len(fish_down)
|
[
"noreply@github.com"
] |
yuelan313.noreply@github.com
|
57fec9d9d6b9b4b108b47c61da6a3c3f9a922f9f
|
42cd876257807de1c851543cabcad9101b6d8170
|
/cogs/events/TwitterLike.py
|
8482388dc93006012444e1177e8585518edda9a2
|
[
"MIT"
] |
permissive
|
RuCybernetic/CyberTyanBot
|
3e6b7a340b3b2e7ba01a780a97ad42117a38b677
|
b2eee274d20833073cceb9bff31c713b2116ffd2
|
refs/heads/master
| 2022-04-06T04:10:50.474026
| 2020-02-02T07:50:53
| 2020-02-02T07:50:53
| 236,075,801
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from discord.ext import commands
import discord
class TwitCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if message.content.startswith('https://twitter.com/'):
await message.add_reaction('<:like:656406179471294465>')
await message.add_reaction('<:dislike:656406199490576384>')
def setup(bot):
bot.add_cog(TwitCog(bot))
|
[
"noreply@github.com"
] |
RuCybernetic.noreply@github.com
|
f700a0aaf43c7cb416ac0aacb1ffd9172706fe7c
|
822f463760c085503bb5bb77b467fea7c050bd7a
|
/main.py
|
58358fc68ec2af633f388f484b2a38db9e29ac05
|
[] |
no_license
|
andreYoo/AutoCrwaler_fixed
|
c3969aa2805d0adcfabf2026fec4b692d8339714
|
9317c69ef7132afa7fd023b8ab2c2660971384d5
|
refs/heads/master
| 2021-01-02T15:37:28.648044
| 2020-02-11T05:45:11
| 2020-02-11T05:45:11
| 239,685,403
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,392
|
py
|
"""
Copyright 2018 YoongiKim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import requests
import shutil
from multiprocessing import Pool
import argparse
from collect_links import CollectLinks
import imghdr
class Sites:
GOOGLE = 1
NAVER = 2
GOOGLE_FULL = 3
NAVER_FULL = 4
@staticmethod
def get_text(code):
if code == Sites.GOOGLE:
return 'google'
elif code == Sites.NAVER:
return 'naver'
elif code == Sites.GOOGLE_FULL:
return 'google'
elif code == Sites.NAVER_FULL:
return 'naver'
@staticmethod
def get_face_url(code):
if code == Sites.GOOGLE or Sites.GOOGLE_FULL:
return "&tbs=itp:face"
if code == Sites.NAVER or Sites.NAVER_FULL:
return "&face=1"
class AutoCrawler:
def __init__(self, skip_already_exist=True, n_threads=4, do_google=True, do_naver=True, download_path='download',
full_resolution=False, face=False):
"""
:param skip_already_exist: Skips keyword already downloaded before. This is needed when re-downloading.
:param n_threads: Number of threads to download.
:param do_google: Download from google.com (boolean)
:param do_naver: Download from naver.com (boolean)
:param download_path: Download folder path
:param full_resolution: Download full resolution image instead of thumbnails (slow)
:param face: Face search mode
"""
self.skip = skip_already_exist
self.n_threads = n_threads
self.do_google = do_google
self.do_naver = do_naver
self.download_path = download_path
self.full_resolution = full_resolution
self.face = face
os.makedirs('./{}'.format(self.download_path), exist_ok=True)
@staticmethod
def all_dirs(path):
paths = []
for dir in os.listdir(path):
if os.path.isdir(path + '/' + dir):
paths.append(path + '/' + dir)
return paths
@staticmethod
def all_files(path):
paths = []
for root, dirs, files in os.walk(path):
for file in files:
if os.path.isfile(path + '/' + file):
paths.append(path + '/' + file)
return paths
@staticmethod
def get_extension_from_link(link, default='jpg'):
splits = str(link).split('.')
if len(splits) == 0:
return default
ext = splits[-1].lower()
if ext == 'jpg' or ext == 'jpeg':
return 'jpg'
elif ext == 'gif':
return 'gif'
elif ext == 'png':
return 'png'
else:
return default
@staticmethod
def validate_image(path):
ext = imghdr.what(path)
if ext == 'jpeg':
ext = 'jpg'
return ext # returns None if not valid
@staticmethod
def make_dir(dirname):
current_path = os.getcwd()
path = os.path.join(current_path, dirname)
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def get_keywords(keywords_file='keywords.txt'):
# read search keywords from file
with open(keywords_file, 'r', encoding='utf-8-sig') as f:
text = f.read()
lines = text.split('\n')
lines = filter(lambda x: x != '' and x is not None, lines)
keywords = sorted(set(lines))
print('{} keywords found: {}'.format(len(keywords), keywords))
# re-save sorted keywords
with open(keywords_file, 'w+', encoding='utf-8') as f:
for keyword in keywords:
f.write('{}\n'.format(keyword))
return keywords
@staticmethod
def save_object_to_file(object, file_path):
try:
with open('{}'.format(file_path), 'wb') as file:
shutil.copyfileobj(object.raw, file)
except Exception as e:
print('Save failed - {}'.format(e))
def download_images(self, keyword, links, site_name):
self.make_dir('{}/{}'.format(self.download_path, keyword))
total = len(links)
print(total)
for index, link in enumerate(links):
try:
print('Downloading {} from {}: {} / {}'.format(keyword, site_name, index + 1, total))
response = requests.get(link, stream=True)
ext = self.get_extension_from_link(link)
no_ext_path = '{}/{}/{}_{}'.format(self.download_path, keyword, site_name, str(index).zfill(4))
path = no_ext_path + '.' + ext
self.save_object_to_file(response, path)
del response
ext2 = self.validate_image(path)
if ext2 is None:
print('Unreadable file - {}'.format(link))
os.remove(path)
else:
if ext != ext2:
path2 = no_ext_path + '.' + ext2
os.rename(path, path2)
print('Renamed extension {} -> {}'.format(ext, ext2))
except Exception as e:
print('Download failed - ', e)
continue
def download_from_site(self, keyword, site_code):
site_name = Sites.get_text(site_code)
add_url = Sites.get_face_url(site_code) if self.face else ""
try:
collect = CollectLinks() # initialize chrome driver
except Exception as e:
print('Error occurred while initializing chromedriver - {}'.format(e))
return
try:
print('Collecting links... {} from {}'.format(keyword, site_name))
if site_code == Sites.GOOGLE:
links = collect.google(keyword, add_url)
elif site_code == Sites.NAVER:
links = collect.naver(keyword, add_url)
elif site_code == Sites.GOOGLE_FULL:
links = collect.google_full(keyword, add_url)
elif site_code == Sites.NAVER_FULL:
links = collect.naver_full(keyword, add_url)
else:
print('Invalid Site Code')
links = []
print('Downloading images from collected links... {} from {}'.format(keyword, site_name))
print(links)
self.download_images(keyword, links, site_name)
print('Done {} : {}'.format(site_name, keyword))
except Exception as e:
print('Exception {}:{} - {}'.format(site_name, keyword, e))
def download(self, args):
self.download_from_site(keyword=args[0], site_code=args[1])
def do_crawling(self):
keywords = self.get_keywords()
tasks = []
for keyword in keywords:
dir_name = '{}/{}'.format(self.download_path, keyword)
'''
if os.path.exists(os.path.join(os.getcwd(), dir_name)) and self.skip:
print('Skipping already existing directory {}'.format(dir_name))
continue
'''
if self.do_google:
if self.full_resolution:
tasks.append([keyword, Sites.GOOGLE_FULL])
else:
tasks.append([keyword, Sites.GOOGLE])
if self.do_naver:
if self.full_resolution:
tasks.append([keyword, Sites.NAVER_FULL])
else:
tasks.append([keyword, Sites.NAVER])
pool = Pool(self.n_threads)
pool.map_async(self.download, tasks)
pool.close()
pool.join()
print('Task ended. Pool join.')
self.imbalance_check()
print('End Program')
def imbalance_check(self):
print('Data imbalance checking...')
dict_num_files = {}
for dir in self.all_dirs(self.download_path):
n_files = len(self.all_files(dir))
dict_num_files[dir] = n_files
avg = 0
for dir, n_files in dict_num_files.items():
avg += n_files / len(dict_num_files)
print('dir: {}, file_count: {}'.format(dir, n_files))
dict_too_small = {}
for dir, n_files in dict_num_files.items():
if n_files < avg * 0.5:
dict_too_small[dir] = n_files
if len(dict_too_small) >= 1:
print('Data imbalance detected.')
print('Below keywords have smaller than 50% of average file count.')
print('I recommend you to remove these directories and re-download for that keyword.')
print('_________________________________')
print('Too small file count directories:')
for dir, n_files in dict_too_small.items():
print('dir: {}, file_count: {}'.format(dir, n_files))
print("Remove directories above? (y/n)")
answer = input()
if answer == 'y':
# removing directories too small files
print("Removing too small file count directories...")
for dir, n_files in dict_too_small.items():
shutil.rmtree(dir)
print('Removed {}'.format(dir))
print('Now re-run this program to re-download removed files. (with skip_already_exist=True)')
else:
print('Data imbalance not detected.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--skip', type=str, default='false',
help='Skips keyword already downloaded before. This is needed when re-downloading.')
parser.add_argument('--threads', type=int, default=3, help='Number of threads to download.')
parser.add_argument('--google', type=str, default='true', help='Download from google.com (boolean)')
parser.add_argument('--naver', type=str, default='true', help='Download from naver.com (boolean)')
parser.add_argument('--full', type=str, default='true', help='Download full resolution image instead of thumbnails (slow)')
parser.add_argument('--face', type=str, default='false', help='Face search mode')
args = parser.parse_args()
_skip = False if str(args.skip).lower() == 'false' else True
_threads = args.threads
_google = False if str(args.google).lower() == 'false' else True
_naver = False if str(args.naver).lower() == 'false' else True
_full = False if str(args.full).lower() == 'false' else True
_face = False if str(args.face).lower() == 'false' else True
print('Options - skip:{}, threads:{}, google:{}, naver:{}, full_resolution:{}, face:{}'.format(_skip, _threads, _google, _naver, _full, _face))
crawler = AutoCrawler(skip_already_exist=_skip, n_threads=_threads, do_google=_google, do_naver=_naver, full_resolution=_full, face=_face)
crawler.do_crawling()
|
[
"jm.andrew.yu@gmail.com"
] |
jm.andrew.yu@gmail.com
|
4464ee352e6953270b9b98f495b875594be09a66
|
5a81a8e9710298e4639b0f3fd9e22d9bb9d35560
|
/Servo_Driver/__init__.py
|
6393a3de99093c117c66de3fe708a246ea4a51e0
|
[] |
no_license
|
Limbotics/linux_dev
|
debf5b01eb1b5041b83987fb0fd1d0aac395a8ab
|
2879c9a361bf0f05cb90e76d736a479196e98130
|
refs/heads/main
| 2023-08-05T07:22:47.543617
| 2021-09-03T03:50:41
| 2021-09-03T03:50:41
| 366,222,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
# __init__.py
#from .servo import *
|
[
"jeredtbell@gmail.com"
] |
jeredtbell@gmail.com
|
187f7fe3a660bd7d1b2b24c4caec7670cda6b6d0
|
c4adf55f32a885ca73af9cc37a5276cd86f637ac
|
/gcj2010QRA.py
|
de09181308bacfae3c0d6115d3a2e7dfcf45837f
|
[] |
no_license
|
jasy/gcj
|
6f2c6a3df1535737ce0bea0937ba56de3a5500c9
|
0fa244f6d28f3486abd4cdc8e44fd4efc841dbca
|
refs/heads/master
| 2021-01-19T02:01:44.797406
| 2020-04-12T05:54:23
| 2020-04-12T05:54:23
| 3,659,840
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
#!/usr/bin/env python
import sys
def solve():
N,K = map(int,sys.stdin.readline().split())
b = (1<<N)-1
return "ON" if (K&b)==b else "OFF"
for x in range(int(sys.stdin.readline())):
print("Case #"+str(x+1)+": "+str(solve()))
|
[
"st@step.skr.jp"
] |
st@step.skr.jp
|
19a5eb94d0a3c8ccb52b085d6825e08f5a8062ca
|
51f2492a5c207e3664de8f6b2d54bb93e313ca63
|
/atcoder/soundhound2018-summer-qual/c.py
|
93091c2550ea9792540a7ddf7fe97eb7d9c2060f
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abeaumont/competitive-programming
|
23c5aabd587d7bb15a61efd3428838cb934233dd
|
a24c9b89941a59d344b51dc1010de66522b1a0dd
|
refs/heads/master
| 2023-09-01T09:50:58.267361
| 2023-07-31T18:00:10
| 2023-07-31T18:00:10
| 117,589,708
| 618
| 262
|
WTFPL
| 2023-07-12T17:36:20
| 2018-01-15T20:00:56
|
C++
|
UTF-8
|
Python
| false
| false
| 296
|
py
|
#!/usr/bin/env python3
# https://soundhound2018-summer-qual.contest.atcoder.jp/tasks/soundhound2018_summer_qual_c
n, m, d = map(int, input().split())
if d == 0: print('{:.10f}'.format((m - 1) / n))
else:
t = n * (n - 1) // 2
print('{:.10f}'.format((m - 1) * (n - 1) * (n - d) / (t * n)))
|
[
"alfredo.beaumont@gmail.com"
] |
alfredo.beaumont@gmail.com
|
950d053477144b65b625fcd8933469df6ad5558c
|
80a0f0352ce0938fd34f697d7af65ad6305ff796
|
/chapter2 variables and datatypes/05pr01add.py
|
16767d190cf0a392dac0f783d3c225e0528a5196
|
[] |
no_license
|
ramyashah27/chapter-2-and-3-strings-and-variables
|
76975d00dfe7b5d9a577b74caef96b27c00b26ef
|
e1b772672d6a575c7a18719a1b08939debe4c7f1
|
refs/heads/main
| 2023-02-10T19:23:37.863244
| 2021-01-04T16:05:01
| 2021-01-04T16:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
# a=34
# b=45
# print('the sum of two integer is' , a+b)
# c=4000
# d=5
# print("the division of 2 numbers is", c/d)l
a=45
b=50
a>b
|
[
"noreply@github.com"
] |
ramyashah27.noreply@github.com
|
8a2eb862ad50edda68a729d3dc9f11fc97df64e8
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/examples/docs_snippets/docs_snippets_tests/concepts_tests/resources_tests/test_resources.py
|
5e07b899452a7f25971b2a9d834e8dd7bb8a8a0f
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
from dagster import build_init_resource_context, build_op_context
from docs_snippets.concepts.resources.resources import (
cereal_fetcher,
connect,
db_connection,
db_resource,
do_database_stuff_dev,
do_database_stuff_job,
do_database_stuff_prod,
op_requires_resources,
test_cm_resource,
test_my_resource,
test_my_resource_with_context,
use_db_connection,
uses_db_connection,
)
def test_cereal_fetcher():
assert cereal_fetcher(None)
def test_database_resource():
class BasicDatabase:
def execute_query(self, query):
pass
op_requires_resources(build_op_context(resources={"database": BasicDatabase()}))
def test_resource_testing_examples():
test_my_resource()
test_my_resource_with_context()
test_cm_resource()
def test_resource_deps_job():
result = connect.execute_in_process()
assert result.success
def test_resource_config_example():
dbconn = db_resource(build_init_resource_context(config={"connection": "foo"}))
assert dbconn.connection == "foo"
def test_jobs():
assert do_database_stuff_job.execute_in_process().success
assert do_database_stuff_dev.execute_in_process().success
assert do_database_stuff_prod.execute_in_process().success
def test_cm_resource_example():
with db_connection() as db_conn:
assert db_conn
def test_cm_resource_op():
with build_op_context(resources={"db_connection": db_connection}) as context:
use_db_connection(context)
def test_build_resources_example():
uses_db_connection()
|
[
"noreply@github.com"
] |
dagster-io.noreply@github.com
|
4fc6e71150033e7e6982368eb672194ded2582d3
|
64cdb9e8fdcde8a71a16ce17cd822441d9533936
|
/_programmers/불량사용자.py
|
ef6e00fbff213c4501137c5169fd2ee1345645d2
|
[] |
no_license
|
heecheol1508/algorithm-problem
|
fa42769f0f2f2300e4e463c5731e0246d7b7643c
|
6849b355e15f8a538c9a071b0783d1789316d29d
|
refs/heads/main
| 2023-07-20T23:46:07.037975
| 2021-08-31T12:47:33
| 2021-08-31T12:47:33
| 302,830,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
import re
def solution(user_id, banned_id):
user_number = {}
for i in range(len(user_id)):
user_number[user_id[i]] = str(i)
banned_list = ['' for _ in range(len(banned_id))]
for i in range(len(banned_id)):
ban = banned_id[i].replace('*', '.')
pat = re.compile(ban)
for j in range(len(user_id)):
if pat.match(user_id[j]) and len(ban) == len(user_id[j]):
banned_list[i] += str(j)
banned_list.sort(key=lambda x: len(x))
result = set()
def recursion(arr, k):
if k == len(banned_list):
temp = sorted(arr)
result.add(''.join(temp))
return
else:
for n in banned_list[k]:
if n not in arr:
recursion(arr+[n], k+1)
recursion([], 0)
answer = len(result)
return answer
print(solution(["frodo", "fradi", "crodo", "abc123", "frodoc"], ["fr*d*", "abc1**"]))
|
[
"52685275+heecheol1508@users.noreply.github.com"
] |
52685275+heecheol1508@users.noreply.github.com
|
922425380433ba1c7dffa8e22f154dd349c86484
|
0ffc74454f804f8b317dea202961c2bef5193085
|
/contrib/seeds/makeseeds.py
|
a6c40bae8266b05a8ad7834632aa3d92f774a0a2
|
[
"MIT"
] |
permissive
|
cryptobot123/learnium
|
1bc1b2e69215dab59ed9fee63a8bd9da2ed06121
|
2ce4867c5b80acd4fe1be9cd29b9cddfab9dea0a
|
refs/heads/main
| 2023-01-03T19:50:44.217685
| 2020-10-30T21:08:32
| 2020-10-30T21:08:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,518
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/LearniumCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
[
"72593731+learniumcoin@users.noreply.github.com"
] |
72593731+learniumcoin@users.noreply.github.com
|
d788fe2d0c7ace0bb7b8435095741e0a107647bd
|
8d8a354bf5fff4f7b0eca70c23b63dd494a90f74
|
/dev/services/wms/ows/wms_cfg.py
|
46eb0aa716b4fc1b0a016ab30d4e2ea95516956e
|
[
"Apache-2.0"
] |
permissive
|
omad/dea-config
|
0df4d5a715957f4e6b26a07c77690889904eb5ef
|
5767601584a1f2aec1ffea8983c3a226e32013b0
|
refs/heads/master
| 2020-03-26T12:08:35.979717
| 2018-08-09T01:51:31
| 2018-08-09T01:51:31
| 144,877,682
| 0
| 0
| null | 2018-08-15T16:34:41
| 2018-08-15T16:34:41
| null |
UTF-8
|
Python
| false
| false
| 60,688
|
py
|
# Static config for the wms metadata.
response_cfg = {
"Access-Control-Allow-Origin": "*", # CORS header
}
service_cfg = {
## Which web service(s) should be supported by this instance
"wcs": False,
"wms": True,
## Required config for WMS and/or WCS
# Service title - appears e.g. in Terria catalog
"title": "WMS server for Australian NBART Datacube products",
# Service URL. Should a fully qualified URL
"url": "https://ows.wms.gadevs.ga",
# Supported co-ordinate reference systems
"published_CRSs": {
"EPSG:3857": { # Web Mercator
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
"EPSG:4326": { # WGS-84
"geographic": True,
"vertical_coord_first": True
},
"EPSG:3577": { # GDA-94, internal representation
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
},
## Required config for WCS
# Must be a geographic CRS in the published_CRSs list. EPSG:4326 is recommended, but any geographic CRS should work.
"default_geographic_CRS": "EPSG:4326",
# Supported WCS formats
"wcs_formats": {
# Key is the format name, as used in DescribeCoverage XML
"GeoTIFF": {
# Renderer is the FQN of a Python function that takes:
# * A WCS Request object
# * Some ODC data to be rendered.
"renderer": "datacube_wms.wcs_utils.get_tiff",
# The MIME type of the image, as used in the Http Response.
"mime": "image/geotiff",
# The file extension to add to the filename.
"extension": "tif",
# Whether or not the file format supports multiple time slices.
"multi-time": False
},
"netCDF": {
"renderer": "datacube_wms.wcs_utils.get_netcdf",
"mime": "application/x-netcdf",
"extension": "nc",
"multi-time": True,
}
},
# The native wcs format must be declared in wcs_formats above.
"native_wcs_format": "GeoTIFF",
## Optional config for instances supporting WMS
# Max tile height/width. If not specified, default to 256x256
"max_width": 512,
"max_height": 512,
# Optional config for all services (WMS and/or WCS) - may be set to blank/empty, no defaults
"abstract": """Historic Landsat imagery for Australia.""",
"keywords": [
"geomedian",
"WOfS",
"mangrove",
"landsat",
"australia",
"time-series",
],
"contact_info": {
"person": "Digital Earth Australia",
"organisation": "Geoscience Australia",
"position": "",
"address": {
"type": "postal",
"address": "GPO Box 378",
"city": "Canberra",
"state": "ACT",
"postcode": "2609",
"country": "Australia",
},
"telephone": "+61 2 6249 9111",
"fax": "",
"email": "earth.observation@ga.gov.au",
},
"fees": "",
"access_constraints": "",
}
layer_cfg = [
# Layer Config is a list of platform configs
{
"name": "fractional cover",
"title": "Fractional Cover",
"abstract": "Fractional Cover",
"products": [
{
# Included as a keyword for the layer
"label": "FC",
# Included as a keyword for the layer
"type": "fractional cover",
# Included as a keyword for the layer
"variant": "terrain corrected",
# The WMS name for the layer
"name": "ls8_fc_albers",
# The Datacube name for the associated data product
"product_name": "ls8_fc_albers",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
"pq_dataset": "wofs_albers",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
"pq_band": "water",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"styles": [
{
"name": "simple_fc",
"title": "Fractional Cover",
"abstract": "Fractional cover representation, with green vegetation in green, dead vegetation in blue, and bare soil in red",
"components": {
"red": {
"BS": 1.0
},
"green": {
"PV": 1.0
},
"blue": {
"NPV": 1.0
}
},
# Used to clip off very bright areas.
"scale_factor": 0.39,
"pq_masks": [
{
"flags": {
'dry': True
},
},
{
"flags": {
"terrain_or_low_angle": False,
"high_slope": False,
"cloud_shadow": False,
"cloud": False,
"sea": False
}
},
]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_fc",
}
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "Geomedian_AU_NBART",
"title": "Geomedian_au_nbart_surface_reflectance",
"abstract": "Images from the Geomedian Surface Reflectance on Level2 Products",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "LANDSAT_8",
# Included as a keyword for the layer
"type": "SR",
# Included as a keyword for the layer
"variant": "Level 2",
# The WMS name for the layer
"name": "ls8_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls8_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour SWIR, NIR and green",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
},
{
"name": "ndwi",
"title": "NDWI",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"heat_mapped": True,
"index_function": lambda data: (data["green"] - data["nir"]) / (data["nir"] + data["green"]),
"needed_bands": ["green", "nir"],
"range": [0.0, 1.0],
},
{
"name": "ndbi",
"title": "NDBI",
"abstract": "Normalised Difference Buildup Index - a derived index that correlates with the existence of urbanisation",
"heat_mapped": True,
"index_function": lambda data: (data["swir2"] - data["nir"]) / (data["swir2"] + data["nir"]),
"needed_bands": ["swir2", "nir"],
"range": [0.0, 1.0],
},
{
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
"component_ratio": 0.6,
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "LANDSAT_7",
# Included as a keyword for the layer
"type": "SR",
# Included as a keyword for the layer
"variant": "Level 2",
# The WMS name for the layer
"name": "ls7_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls7_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour SWIR, NIR and green",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
},
{
"name": "ndwi",
"title": "NDWI",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"heat_mapped": True,
"index_function": lambda data: (data["green"] - data["nir"]) / (data["nir"] + data["green"]),
"needed_bands": ["green", "nir"],
"range": [0.0, 1.0],
},
{
"name": "ndbi",
"title": "NDBI",
"abstract": "Normalised Difference Buildup Index - a derived index that correlates with the existence of urbanisation",
"heat_mapped": True,
"index_function": lambda data: (data["swir2"] - data["nir"]) / (data["swir2"] + data["nir"]),
"needed_bands": ["swir2", "nir"],
"range": [0.0, 1.0],
},
{
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
"component_ratio": 0.6,
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "LANDSAT_5",
# Included as a keyword for the layer
"type": "SR",
# Included as a keyword for the layer
"variant": "Level 2",
# The WMS name for the layer
"name": "ls5_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls5_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour SWIR, NIR and green",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
"component_ratio": 0.6,
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
}
]
},
{
"name": "mangrove_cover",
"title": "Mangrove Cover",
"abstract": "Mangrove Cover",
"products": [
{
"label": "Mangrove Cover",
"type": "Level3",
"variant": "Level 3",
"name": "mangrove_cover",
"product_name": "mangrove_cover",
"min_zoom_factor": 500.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data["extent"] == 1,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["extent"],
"apply_solar_corrections": False,
"styles": [
{
"name": "mangrove",
"title": "Mangrove Cover",
"abstract": "Mangrove Cover",
"value_map": {
"canopy_cover_class": [
{
"flags": {
"woodland": True
},
"values": {
"red": 159,
"green": 255,
"blue": 76
}
},
{
"flags": {
"open_forest": True
},
"values": {
"red": 94,
"green": 204,
"blue": 0
}
},
{
"flags": {
"closed_forest": True
},
"values": {
"red": 59,
"green": 127,
"blue": 0
}
},
]
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "mangrove",
},
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "WOfS",
"title": "Water_Observation_from_Space",
"abstract": "WOfS",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "WOFLs",
# Included as a keyword for the layer
"type": "albers",
# Included as a keyword for the layer
"variant": "wofs",
# The WMS name for the layer
"name": "wofs_albers",
# The Datacube name for the associated data product
"product_name": "wofs_albers",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
"pq_band": "water",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [200, 180, 180, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
"pq_manual_merge": True,
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [
"nodata",
"noncontiguous",
],
"data_manual_merge": False,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "water",
"title": "Water",
"abstract": "Water",
"value_map": {
"water": [
{
"flags": {
"wet": True,
},
"values": {
"red": 79,
"green": 129,
"blue": 189
}
},
{
"flags": {
"sea": True,
},
"values": {
"red": 79,
"green": 129,
"blue": 189
}
},
{
"flags": {
"dry": True,
},
"values": {
"red": 217,
"green": 150,
"blue": 148
}
},
{
"flags": {
"terrain_or_low_angle": True,
},
"values": {
"red": 112,
"green": 112,
"blue": 112
}
},
{
"flags": {
"high_slope": True,
},
"values": {
"red": 112,
"green": 112,
"blue": 112
}
},
{
"flags": {
"cloud_shadow": True,
},
"values": {
"red": 112,
"green": 112,
"blue": 112
}
},
{
"flags": {
"cloud": True
},
"values": {
"red": 112,
"green": 112,
"blue": 112
}
}
]
}
},
{
"name": "water_masked",
"title": "Water (Masked)",
"abstract": "Water Data, Masked",
# Invert True: Show if no flags match
"value_map": {
"water": [
{
"flags": {
"wet": True
},
"values": {
"red": 79,
"green": 129,
"blue": 189
}
},
]
},
"pq_masks": [
{
"flags": {
'wet': True,
},
},
],
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "water",
},
{
# Included as a keyword for the layer
"label": "WOfS_Summary",
# Included as a keyword for the layer
"type": "WOfS_Summary",
# Included as a keyword for the layer
"variant": "Summary",
# The WMS name for the layer
"name": "wofs_summary",
# The Datacube name for the associated data product
"product_name": "wofs_summary",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
#"pq_dataset": "wofs_albers",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
#"pq_band": "water",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"styles": [
{
"name": "WOfS_frequency",
"title": " Wet and Dry Count",
"abstract": "WOfS summary determinig the count_wet and count_clear for WOfS product",
"heat_mapped": True,
"index_function": lambda data: data["frequency"] * 0.0 + 0.25,
"needed_bands": ["frequency"],
"range": [0.0, 1.0],
"components": {
"red": {
"frequency": 0.0
},
"green": {
"frequency": 0.0
},
"blue": {
"frequency": 1.0
}
},
"scale_range": [0, 3]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "WOfS_frequency",
}
],
},
]
|
[
"harshu.rampur@ga.gov.au"
] |
harshu.rampur@ga.gov.au
|
dfea14f587580d86c76f3dbc73c65587e1154af8
|
faaf12ab18978082233c09628b815a69e73868e4
|
/leetcode/algorithms/easy/keep_multiplying_found_values_by_two.py
|
9d03b8b29a25813877514664235bcbeb70bc846b
|
[
"WTFPL"
] |
permissive
|
ferhatelmas/algo
|
6826bcf0be782cb102c1ee20dce8d4345e1fd6d2
|
7b867f6d2c8a9fb896f464168b50dfc115617e56
|
refs/heads/master
| 2023-08-18T19:59:58.435696
| 2023-08-14T10:16:00
| 2023-08-14T10:16:00
| 3,813,734
| 27
| 16
|
WTFPL
| 2020-10-25T23:00:16
| 2012-03-23T23:43:31
|
Java
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
from typing import List
class Solution:
def findFinalValue(self, nums: List[int], original: int) -> int:
s = set(nums)
o = original
while o in s:
o *= 2
return o
|
[
"elmas.ferhat@gmail.com"
] |
elmas.ferhat@gmail.com
|
174901bbd58c43f7d01e0a887c3716d5e02f9196
|
1df7840dd8bbf64c44dee6c7040046913deb22f3
|
/Video_42_GeneratingHTMLTestRunner_Reports.py
|
3ef167432d5cf07baacb364563f230ea386efd03
|
[] |
no_license
|
manojgupta3051994/Selenium-Python
|
68c39a3a0c7a39fc0e039183c22c6a4c71fee8b8
|
4391b506fda8542106c596a5735a2989a5c3862f
|
refs/heads/main
| 2023-01-09T05:32:46.520988
| 2020-11-11T07:02:35
| 2020-11-11T07:02:35
| 306,115,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
from selenium import webdriver
import unittest
import HtmlTestRunner
class OrangeHrmTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome(executable_path=r"C:\Users\Manoj\Desktop\Python - Selenium Practice\Drivers\chromedriver.exe")
cls.driver.maximize_window()
def test_HomePageTitle(self):
self.driver.get('https://opensource-demo.orangehrmlive.com/')
self.assertEqual("OrangeHRM",self.driver.title,"Webpage Title is not same")
def test_login(self):
self.driver.get('https://opensource-demo.orangehrmlive.com/')
self.driver.find_element_by_id('txtUsername').send_keys('Admin')
self.driver.find_element_by_id('txtPassword').send_keys('admin123')
self.driver.find_element_by_id('btnLogin').click()
self.assertEqual("OrangeHRM",self.driver.title,"Webpage Title is not same")
@classmethod
def tearDownClass(cls):
cls.driver.close()
print("Test Completed")
if __name__ == '__main__':
unittest.main(testRunner= HtmlTestRunner.HTMLTestRunner(output='C:\\Users\Manoj\Desktop\Python - Selenium Practice\Reports'))
|
[
"noreply@github.com"
] |
manojgupta3051994.noreply@github.com
|
5a437fe980aa92a4f6d79f902a3eb02ad1419f90
|
28261a54a5e1feab2c7f739f3f0d82f9fe71f662
|
/Problem 4/Support Vector Machines.py
|
ec277648ab676ff437b741417a5c685c7a0225df
|
[] |
no_license
|
Wang-Yujue/Statistical-Machine-Learning
|
bd324a8c004d23f32af7912da57b4e8168b7cd06
|
5fa29e2ac918813641cf35bb82e3d52962d6b3b6
|
refs/heads/master
| 2021-04-27T07:47:28.426861
| 2018-02-23T18:41:59
| 2018-02-23T18:41:59
| 122,639,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# read data
def read_dataset(path):
data = []
txt = open(path)
for line in txt:
a,b,c = map(float,line.split())
data.append([a,b,c])
return np.asarray(data)
def gaussianKernel(x1, x2, sigma):
x1 = x1[:]
x2 = x2[:]
sim = np.exp(-sum((x1 - x2) ^ 2) / (2 * sigma ^ 2))
return sim
data = read_dataset('dataSets/iris-pca.txt')
label = data[:,2]
feature = data[:,[0,1]]
setosa_x1 = []
setosa_x2 = []
virginica_x1 = []
virginica_x2 = []
j = -1
for i in label:
i = int(i)
j = j + 1
if i == 0:
x1 = data[j,0]
x2 = data[j,1]
setosa_x1.append(x1)
setosa_x2.append(x2)
if i == 2:
x1 = data[j, 0]
x2 = data[j, 1]
virginica_x1.append(x1)
virginica_x2.append(x2)
plt.plot(setosa_x1,setosa_x2,'+')
plt.plot(virginica_x1,virginica_x2,'o')
plt.show()
|
[
"noreply@github.com"
] |
Wang-Yujue.noreply@github.com
|
a376ce5d3b46de8bb03f21b1ea3b28ec2fbc2536
|
17551b554f801789849878484778ec95b3aaf838
|
/softlearning/environments/gym/wrappers/__init__.py
|
4a31b62c5d1800a565b4caefc2706dfedca80a34
|
[
"MIT"
] |
permissive
|
anyboby/ConstrainedMBPO
|
679894c0c647d0e8cd7178571e602509d3d8d262
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
refs/heads/master
| 2022-12-14T05:14:57.913927
| 2021-02-28T21:20:38
| 2021-02-28T21:20:38
| 237,805,512
| 5
| 2
|
MIT
| 2022-12-08T03:46:46
| 2020-02-02T17:08:07
|
Python
|
UTF-8
|
Python
| false
| false
| 53
|
py
|
from .normalize_action import NormalizeActionWrapper
|
[
"janner@berkeley.edu"
] |
janner@berkeley.edu
|
a6d5bafda9ea9794e31e91f34db93a74533ee4b1
|
c722d4741988066a244af5df44e288e51bb8a342
|
/analysis.py
|
029ba1cd63cf51c577b7896894806168fd5264c2
|
[] |
no_license
|
laranea/KE-Census
|
6ae67ccd2d32ab6095a83005712ea12f92dfe407
|
cd8882f07effc5b464ba7948bb909673e2c87bb2
|
refs/heads/master
| 2022-04-09T13:32:29.592157
| 2020-03-15T11:51:04
| 2020-03-15T11:51:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
import pandas as pd
import streamlit as st
@st.cache()
def population_voter_analysis():
df = pd.read_csv('county_list.csv').set_index('COUNTY').drop(['code', 'MALE', 'FEMALE', 'INTERSEX'], axis=1)
percentage_of_voters = df['VOTERS'] / df["TOTAL"] * 100
df.insert(0, "% Voters", percentage_of_voters)
highest_voters_percentage = df.sort_values(by=['% Voters'], ascending=False).rename(
columns={'TOTAL': 'TOTAL POPULATION', 'VOTERS': 'REGISTERED VOTERS'}).head(10)
lowest_voters_percentage = df.sort_values(by=['% Voters'], ascending=True).rename(
columns={'TOTAL': 'TOTAL POPULATION', 'VOTERS': 'REGISTERED VOTERS'}).head(10)
return highest_voters_percentage, lowest_voters_percentage
@st.cache()
def gender_ratios():
df = pd.read_csv('county_list.csv')
male_to_female = df['MALE'] / df['FEMALE']
df.insert(1, "Male To Female Ratio", male_to_female)
m_f = df.sort_values(by=['Male To Female Ratio'], ascending=False).drop(['code', 'VOTERS', 'INTERSEX'
], axis=1).set_index('COUNTY').head(10)
female_to_male = df['FEMALE'] / df['MALE']
df.insert(1, "Female To Male Ratio", female_to_male)
f_m = df.sort_values(by=['Female To Male Ratio'], ascending=False).drop(['code', 'VOTERS', 'INTERSEX',
'Male To Female Ratio'], axis=1).set_index(
'COUNTY').head(10)
top_population = df.sort_values(by=['TOTAL'], ascending=False).drop(['code', 'VOTERS',
'Female To Male Ratio',
'Male To Female Ratio'], axis=1).set_index(
'COUNTY').head(10)
least_population = df.sort_values(by=['TOTAL'], ascending=False).drop(['code', 'VOTERS',
'Female To Male Ratio',
'Male To Female Ratio'], axis=1).set_index(
'COUNTY').tail(10)
return m_f, f_m, top_population, least_population
|
[
"pythonantole@gmail.com"
] |
pythonantole@gmail.com
|
41d3a3bb2bac87e3c68884b00f9224dfa4e75290
|
594055d2cf0ad9dbe9e06a219a7ddb07c4410459
|
/Amazon/LinkedList/138-Copy_List_with_Random_Pointer.py
|
162ab64857e6668274b22a682e3a4ee18489becd
|
[] |
no_license
|
ankurgokhale05/LeetCode
|
975ed3a496e039607773b5e94f1ff517fc01644d
|
0f3aacf35a28e8b85db1be0a7f945f2d7ece0bfc
|
refs/heads/master
| 2023-07-04T01:46:12.035053
| 2021-08-15T02:14:15
| 2021-08-15T02:14:15
| 275,960,404
| 0
| 2
| null | 2020-10-01T01:49:48
| 2020-06-30T00:55:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
"""
class Solution(object):
def copyRandomList(self, head):
dic, prev, node = {}, None, head
while node:
if node not in dic:
# Use a dictionary to map the original node to its copy
dic[node] = Node(node.val, node.next, node.random)
if prev:
# Make the previous node point to the copy instead of the original.
prev.next = dic[node]
else:
# If there is no prev, then we are at the head. Store it to return later.
head = dic[node]
if node.random:
if node.random not in dic:
# If node.random points to a node that we have not yet encountered, store it in the dictionary.
dic[node.random] = Node(node.random.val, node.random.next, node.random.random)
# Make the copy's random property point to the copy instead of the original.
dic[node].random = dic[node.random]
# Store prev and advance to the next node.
prev, node = dic[node], node.next
return head
|
[
"ankurgokhale@Ankurs-MBP.home"
] |
ankurgokhale@Ankurs-MBP.home
|
9dbc7ad450e959dc92b891102266be38ccadcf7b
|
8b6885ab324047048b4745a76fc8b9058e3e60ef
|
/mypkg/transform/audio.py
|
4f526cd13fe7454a038883aa3ce442908296e8e7
|
[] |
no_license
|
danielgg-coding/simple-python-package-demo
|
25973eee019433717988036f61ca18ecbda4843f
|
a6b6018296361dc07d5f171264e3eaf8b6d106ab
|
refs/heads/master
| 2022-04-09T01:15:33.357824
| 2020-03-14T11:17:15
| 2020-03-14T11:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
def transform():
return "squeeze"
|
[
"b03705013@ntu.edu.tw"
] |
b03705013@ntu.edu.tw
|
8f55e7fc73404cd650b20ca669fd313db96f1b3c
|
4c67112b8e4c1ed7fd2f636a0dcee4972eeb79e6
|
/deployment/GPT2/encoder.py
|
f6508e866e80f4de9aaa34474e404aae72cbb3bd
|
[
"MIT"
] |
permissive
|
t04glovern/gpt2-k8s-cloud-run
|
700cc8da97e8b42ca39fb0aed9a26f7edebb090b
|
687a20f76c3e53f917ea9553e569be52deb323d6
|
refs/heads/master
| 2023-06-04T14:07:50.532901
| 2022-09-03T12:58:48
| 2022-09-03T12:58:48
| 180,802,919
| 8
| 1
|
MIT
| 2023-05-22T21:56:35
| 2019-04-11T13:53:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,156
|
py
|
"""Byte pair encoding utilities"""
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder():
with open('./GPT2/encoder.json', 'r') as f:
encoder = json.load(f)
with open('./GPT2/vocab.bpe', 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
|
[
"nathan@glovers.id.au"
] |
nathan@glovers.id.au
|
648d19e1b5a5954f3467144178d13589f2879178
|
72583c48f2a9aae5a78989acc6634f03d2bfd897
|
/core/stats/uptime.py
|
bdaca056fa9e245f676f18d4aeb86dd411317267
|
[
"MIT"
] |
permissive
|
sustainable-computing/ODToolkit
|
4b914fb1722641146f1bd341fe6f5c82cff5bf0a
|
9328b930f7e522a89d82011e8ab91286a20bf66f
|
refs/heads/master
| 2023-05-06T05:39:00.514901
| 2021-05-17T20:55:48
| 2021-05-17T20:55:48
| 162,171,971
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
# Parameters:
# dataset: core.data.dataset.Dataset()
# threshold: maximum different second between two consecutive value
# gaps: All gaps detected by core.stats.gap_detect
# Return:
# {room_name: sensors} where sensors = {sensor_name: (uptime in string, uptime in seconds, uptime ratio)}
def uptime(dataset, threshold, gaps=None):
"""
Compute the uptime in the given dataset.
Uptime is the length of time a sensor reported value
:parameter dataset: Dataset object that want to compute the uptime
:type dataset: core.data.dataset.Dataset
:parameter threshold: the maximum time differences in seconds between two consecutive timestamp
to not mark them as a gap
:type threshold: int
:parameter gaps: a dictionary result from the core.stats.gap_detect
:type gaps: dict(str, list(str)) or dict(str, dict(str, list(str)))
:rtype: dict(str, tuple(str)) or dict(str, dict(str, tuple(str)))
:return: the room name corresponds to the name of sensor with its corresponding uptime
"""
from .gap_detect import gap_detect
from datetime import timedelta
if gaps is None:
gaps = gap_detect(dataset, threshold, sensor_level=True)
result = {}
time_col = dataset.time_column_index
for room in gaps.keys():
data = dataset[room][0][:, time_col]
total_uptime = data[-1] - data[0]
result[room] = {}
for sensor in gaps[room].keys():
for gap in gaps[room][sensor]:
result[room][sensor] = result[room].get(sensor, 0) + gap[2]
sensor_uptime = total_uptime - result[room].get(sensor, 0)
result[room][sensor] = (str(timedelta(0, sensor_uptime)), sensor_uptime, sensor_uptime / total_uptime)
return result
|
[
"skyu0221@gmail.com"
] |
skyu0221@gmail.com
|
c8564ccfba2c2734dff0999edd6fdfc7baedc87d
|
7b4393b00a8e603c1ea6d08fcb13501ba4825def
|
/mrjob_example.py
|
fd6bf872fecbe5b24af1e6b66a857b72f3fa719e
|
[] |
no_license
|
isash30/Play-with-python
|
9f634d6f1a7eac793c2bcfe9f03264ed732d0fa5
|
a37cfbaa310342296a6b17c1130741641069de43
|
refs/heads/master
| 2021-05-27T18:00:06.989422
| 2014-05-04T04:22:20
| 2014-05-04T04:22:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,437
|
py
|
#!/usr/bin/python3
"""Counts the frequencies of words in a document, and doubles the count just
for kicks. Works with Python 2.7 and Python 3.2+.
Usage:
python -m mrjob.launch wfc.py -r local <input files>
"""
from __future__ import print_function
import argparse
import itertools
import json
import re
import sys
WORD_RE = re.compile(r"[\w']+")
# Output lines should be written in the same way.
def _write(stdout, key, value):
stdout.write('%s\t%s\n' % (key, value))
def _group_by_key(in_file, sep='\t'):
"""Turn this:
['x\ta', 'x\tb', 'y\tc']
into this:
[('x', ['a', 'b']), ('y', ['c'])]
"""
group_key = lambda line: line.split(sep, 1)[0]
return itertools.groupby(in_file, key=group_key)
def lines_to_word_occurrences(in_file, stdout):
"""For each line of input, output (word, 1) for each word in the line"""
for line in in_file:
for word in WORD_RE.findall(line):
_write(stdout, word, 1)
def sum_word_occurrences(in_file, stdout):
"""Group input lines by key and output (key, sum(values))"""
for word, lines in _group_by_key(in_file):
value = sum(int(line.split('\t', 1)[1]) for line in lines)
_write(stdout, word, value)
def multiply_value_by_2(in_file, stdout):
"""Emit (key, 2*value) for each line in in_file"""
for line in in_file:
key, value = line.split('\t', 1)
_write(stdout, key, 2 * int(value))
def _run_task(task, paths, stdin, stdout):
"""Run *task* for each file in *paths*. Use stdin if '-' is an arg or there
are no args.
"""
for path in (paths or ['-']):
if path == '-':
task(stdin, stdout)
else:
with open(path, 'r') as f:
task(f, stdout)
def main(argv, stdin, stdout, stderr):
p = argparse.ArgumentParser()
p.add_argument('--steps', default=False, action='store_true')
p.add_argument('--mapper', default=False, action='store_true')
p.add_argument('--reducer', default=False, action='store_true')
p.add_argument('--step-num', default=None, type=int)
p.add_argument('files', nargs='*')
opts = p.parse_args(argv)
args = opts.files
# --steps behavior. This job has 2 steps, the first with a mapper and
# reducer and the second with only a mapper. They are all 'script' steps,
# meaning that they are run by invoking this file with --step-num=X and
# [--mapper|--reducer].
# The output of --steps tells mrjob what steps the job has.
if opts.steps:
if any((opts.mapper, opts.reducer, opts.step_num)):
print('--steps is mutually exclusive with all other options.',
file=stderr)
print(
json.dumps([
{'type': 'streaming',
'mapper': {'type': 'script'},
'reducer': {'type': 'script'}},
{'type': 'streaming',
'mapper': {'type': 'script'}}]),
file=stdout)
return 0
# --step-num is required if --steps not present
if opts.step_num is None:
print('You must specify --step-num if not using --steps.',
file=stderr)
return 1
# likewise for [--mapper|--reducer]
if ((opts.mapper and opts.reducer) or
(not opts.mapper and not opts.reducer)):
print (
'You must specify exactly one of either --mapper or --reducer'
' if not using --steps.',
file=stderr)
return 1
# decide which mapper to run based on --step-num
if opts.mapper:
if opts.step_num == 0:
_run_task(lines_to_word_occurrences, args, stdin, stdout)
return 0
elif opts.step_num == 1:
_run_task(multiply_value_by_2, args, stdin, stdout)
return 0
else:
print('There is no step %d mapper!' % opts.step_num, file=stderr)
return 1
# run reducer if --step-num is correct
if opts.reducer:
if opts.step_num == 0:
_run_task(sum_word_occurrences, args, stdin, stdout)
return 0
else:
print('There is no step %d reducer!' % opts.step_num, file=stderr)
return 1
raise Exception("How did we get here???")
if __name__ == '__main__':
# invoke with sys.argv, etc. Test cases might use different values.
sys.exit(main(None, sys.stdin, sys.stdout, sys.stderr))
|
[
"sas.2106@gmail.com"
] |
sas.2106@gmail.com
|
5a43f55a19e3c63e780c242dc3f5a1013c94a070
|
a951ccc03e99ae61178ab85f6db0fd5968709280
|
/prefix_sums/genomic_range.py
|
04f8f6028e9ab8f7e8919e44da513188dc5cd481
|
[] |
no_license
|
mmanishh/codilitysolution
|
37142e66c25f786ef7bedaebbe0b164e50ff7804
|
d3487be50e52861cc59d3651e996d4d23cb32613
|
refs/heads/master
| 2021-07-07T12:58:07.651699
| 2020-08-07T10:00:21
| 2020-08-07T10:00:21
| 163,286,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
def genomic_range(S,P,Q):
S = list(S)
new_s = []
result = []
impact = {'A':1,'C':2,'G':3,'T':4}
for s in S:
new_s.append(impact[s])
for i in range(len(P)):
l ,r = P[i] , Q[i]
sliced = new_s[l:r+1]
result.append(min(sliced))
return result
if __name__ == '__main__':
S = 'CAGCCTA'
P = [2,5,0]
Q = [4,5,6]
print(genomic_range(S,P,Q))
|
[
"dfrozenthrone@gmail.com"
] |
dfrozenthrone@gmail.com
|
16788fb6c4d87a3d199099337d60a972ac10c1d0
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631989306621952_1/Python/gvalli/2016-1A-A-lastword.py
|
becda986965852bb63622f5a8164983cb9663cf1
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
#! /#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
fh = open(sys.argv[1], 'r')
T = int(fh.readline()) # number of test cases
for t in range(T):
S = fh.readline().split()[0] # string of letters
res = ''
oldval = -1
for c in S:
val = ord(c)
if val >= oldval:
res = c + res
oldval = ord(c)
else:
res = res + c
print('Case #{:d}: {}'.format(t + 1, res))
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
e76321e70430d42255d94ec46ee794f6038d4b5d
|
99236a47562f2d63afb4559dc5bd44b358ea00aa
|
/accountDisable_AD.py
|
0be6491c574781e7fc7c7bdbc6f4b7fda5521977
|
[
"MIT"
] |
permissive
|
strohmy86/Scripts
|
3af7bf82abe6396a8d675b34f11a1a6c43083101
|
30f1eb1ff3a70ed16a674a1cf086a403fe0ecd49
|
refs/heads/master
| 2023-07-12T20:09:07.989313
| 2023-06-26T11:44:10
| 2023-06-26T11:44:10
| 97,958,183
| 1
| 0
|
MIT
| 2018-03-14T15:20:16
| 2017-07-21T14:45:45
|
Python
|
UTF-8
|
Python
| false
| false
| 14,072
|
py
|
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2020 Luke Strohm
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import datetime
import os
import time
import paramiko
from ldap3 import ALL, MODIFY_DELETE, MODIFY_REPLACE, Connection, Server, Tls
class Color:
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def cred():
print(
Color.DARKCYAN
+ "\n"
+ "**********************************\n"
+ "* Python 3 Script For Disabling *\n"
+ "* Accounts in Active Directory *\n"
+ "* Moving Them to the Disabled OU *\n"
+ "* *\n"
+ "* Written and maintained by *\n"
+ "* Luke Strohm *\n"
+ "* strohm.luke@gmail.com *\n"
+ "* https://github.com/strohmy86 *\n"
+ "**********************************\n"
+ "\n"
+ Color.END
)
# Connects and binds to LDAP server
f = open("/home/lstrohm/Scripts/ADcreds.txt", "r")
lines = f.readlines()
username = lines[0]
password = lines[1]
f.close()
tls = Tls(
local_private_key_file=None,
local_certificate_file=None,
)
s = Server("madhs01dc3.mlsd.local", use_ssl=True, get_info=ALL, tls=tls)
c = Connection(s, user=username.strip(),
password=password.strip())
c.bind()
# Global Variables
disabled_ou = ",ou=Disabled,ou=Madison,dc=mlsd,dc=local"
today = str(datetime.datetime.now())
today2 = datetime.datetime.strptime(today, "%Y-%m-%d %H:%M:%S.%f")
now = today2.strftime("%m-%d-%Y at %H:%M")
# Specify private key file
k = paramiko.RSAKey.from_private_key_file("/home/lstrohm/.ssh/id_rsa")
# Connects to gcds server via SSH
gcds = paramiko.SSHClient()
gcds.set_missing_host_key_policy(paramiko.AutoAddPolicy())
gcds.connect("madhs01gcds.mlsd.local", username="mlsd\\administrator", pkey=k)
def single(c, usr, disabled_ou, now, gcds):
# Search for user. Lists all usernames matching string provided.
try:
c.search(
"ou=Madison,dc=mlsd,dc=local",
"(&(objectclass=person)(cn=*" + usr + "*))",
attributes=[
"mail",
"title",
"displayName",
"lastLogon",
"userAccountControl",
"cn",
"description",
"memberOf",
],
)
users = c.entries
if len(users) <= 0:
raise IndexError
ent = 0 # Start of result list
print(
Color.BOLD
+ Color.CYAN
+ "I found the following user(s):\n"
+ Color.END
)
for i in users:
if "514" in str(
users[ent].userAccountControl.value
) or "546" in str(users[ent].userAccountControl.value):
status = "Disabled"
elif "512" in str(
users[ent].userAccountControl.value
) or "544" in str(users[ent].userAccountControl.value):
status = "Active"
else:
status = "Unknown"
print(
str(ent)
+ ") Name: "
+ Color.GREEN
+ str(users[ent].displayName.value)
+ Color.END
+ ", AD Location: "
+ Color.GREEN
+ str(users[ent].entry_dn)
+ Color.END
+ ", Title: "
+ Color.GREEN
+ str(users[ent].title)
+ Color.END
+ ", Status: "
+ Color.GREEN
+ status
+ Color.END
)
ent = ent + 1 # Moves to next in results list
# Prompts to select user from search results
usn = int(input(Color.BOLD + "\nPlease select a user: " + Color.END))
user = c.entries[usn]
print(
Color.YELLOW
+ "Disabling account "
+ Color.BOLD
+ user.cn.value
+ Color.END
+ Color.YELLOW
+ " and moving it "
+ "to the disabled OU."
+ Color.END
)
if isinstance(user.memberOf.value, list) is True:
c.modify(
str(user.entry_dn),
{
"description": [(MODIFY_DELETE, [])],
},
)
c.modify(
str(user.entry_dn),
{
"memberOf": [(MODIFY_DELETE, [])],
},
)
time.sleep(0.500)
for i in user.memberOf.value:
c.modify(
str(i),
{
"member": [(MODIFY_DELETE, [str(user.entry_dn)])],
},
)
time.sleep(0.300)
elif isinstance(user.memberOf.value, str) is True:
c.modify(
str(user.entry_dn),
{
"description": [(MODIFY_DELETE, [])],
},
)
c.modify(
str(user.entry_dn),
{
"memberOf": [(MODIFY_DELETE, [])],
},
)
time.sleep(0.500)
c.modify(
user.memberOf.value,
{"member": [(MODIFY_DELETE, [str(user.entry_dn)])]},
)
time.sleep(0.500)
c.modify(
str(user.entry_dn),
{
"userAccountControl": [(MODIFY_REPLACE, ["514"])],
},
)
c.modify(
str(user.entry_dn),
{
"description": [(MODIFY_REPLACE, ["Disabled - " + str(now)])],
},
)
time.sleep(0.500)
if "@madisonrams.net" in str(user.mail.value):
disabled_ou = (
"ou="
+ str(datetime.date.today().year)
+ ",ou=Disabled,"
+ "ou=Student,ou=Madison,dc=mlsd,dc=local"
)
else:
disabled_ou = "ou=Staff" + disabled_ou
c.modify_dn(
str(user.entry_dn),
"cn=" + str(user.cn.value),
new_superior=disabled_ou,
)
cmd = (
"/home/lstrohm/bin/gamadv-xtd3/gam user "
+ str(user.cn.value)
+ " deprov"
)
os.system(cmd)
print(
Color.CYAN
+ Color.BOLD
+ "Running GCDS. Please wait....."
+ Color.END
)
# Connects to madhs01gcds server via SSH and runs a Google Sync
stdin, stdout, stderr = gcds.exec_command("C:\Tools\gcds.cmd")
for line in stdout:
print(Color.YELLOW + line.strip("\n") + Color.END)
print(Color.GREEN + "\nDone!\n" + Color.END)
except IndexError: # Error received if empty search result
print(Color.RED + "No username found! Try again.\n" + Color.END)
except KeyboardInterrupt: # User exited script with CTRL + C
print(Color.CYAN + "\nExiting..." + Color.END)
exit()
def batch(c, file, disabled_ou, now, gcds):
try:
with open(file, "r") as f:
for i in f:
dis_ou = disabled_ou
i = str(i)[0:-1]
# i = i[2:-2]
c.search(
"ou=Madison,dc=mlsd,dc=local",
"(" + i + ")",
attributes=[
"mail",
"title",
"displayName",
"lastLogon",
"userAccountControl",
"cn",
"description",
"memberOf",
],
)
user = c.entries
if len(user) <= 0:
raise IndexError
user = user[0]
print(
Color.YELLOW
+ "Disabling account "
+ Color.BOLD
+ user.cn.value
+ Color.END
+ Color.YELLOW
+ " and moving it "
+ "to the disabled OU."
+ Color.END
)
if isinstance(user.memberOf.value, list) is True:
c.modify(
str(user.entry_dn),
{
"description": [(MODIFY_DELETE, [])],
},
)
c.modify(
str(user.entry_dn),
{
"memberOf": [(MODIFY_DELETE, [])],
},
)
time.sleep(0.500)
for i in user.memberOf.value:
c.modify(
str(i),
{
"member": [
(MODIFY_DELETE, [str(user.entry_dn)])
],
},
)
time.sleep(0.300)
elif isinstance(user.memberOf.value, str) is True:
c.modify(
str(user.entry_dn),
{
"description": [(MODIFY_DELETE, [])],
},
)
c.modify(
str(user.entry_dn),
{
"memberOf": [(MODIFY_DELETE, [])],
},
)
time.sleep(0.500)
c.modify(
user.memberOf.value,
{"member": [(MODIFY_DELETE, [str(user.entry_dn)])]},
)
time.sleep(0.500)
c.modify(
str(user.entry_dn),
{
"userAccountControl": [(MODIFY_REPLACE, ["514"])],
},
)
c.modify(
str(user.entry_dn),
{
"description": [
(MODIFY_REPLACE, ["Disabled - " + str(now)])
],
},
)
time.sleep(0.500)
if "@madisonrams.net" in str(user.mail.value):
disabled_ou = (
"ou="
+ str(datetime.date.today().year)
+ ",ou=Disabled,"
+ "ou=Student,ou=Madison,dc=mlsd,dc=local"
)
else:
dis_ou = "ou=Staff" + disabled_ou
c.modify_dn(
str(user.entry_dn),
"cn=" + str(user.cn.value),
new_superior=dis_ou,
)
print(c.result)
time.sleep(0.500)
cmd = (
"/home/lstrohm/bin/gamadv-xtd3/gam user "
+ str(user.cn.value)
+ " deprov"
)
os.system(cmd)
print(
Color.CYAN
+ Color.BOLD
+ "Running GCDS. Please wait....."
+ Color.END
)
# Connects to madhs01gcds server via SSH and runs a Google Sync
stdin, stdout, stderr = gcds.exec_command("C:\Tools\gcds.cmd")
for line in stdout:
print(Color.YELLOW + line.strip("\n") + Color.END)
f.close()
print(Color.GREEN + "\nDone!\n" + Color.END)
except IndexError: # Error received if empty search result
print(
Color.RED
+ "Error in file! User not found. Check your file"
+ "and try again.\n"
+ Color.END
)
# Sets up parser and adds arguement
parser = argparse.ArgumentParser(
description="Script to disable user accounts."
)
parser.add_argument(
"usr",
metavar="Username",
default="",
type=str,
help="Username or last name of user to disable.",
nargs="?",
)
parser.add_argument(
"-b",
"--batch",
metavar="Filename",
default="",
type=str,
help="Batch mode with a text file. File must contain full cn\
(one per line). EX: cn=some_user",
)
args = parser.parse_args()
usr = args.usr
file = args.batch
cred()
if file == "" and usr != "":
single(c, usr, disabled_ou, now, gcds)
c.unbind()
gcds.close()
elif file != "" and usr == "":
batch(c, file, disabled_ou, now, gcds)
c.unbind()
gcds.close()
else:
c.unbind()
gcds.close()
parser.print_help()
parser.exit(1)
|
[
"strohm.luke@gmail.com"
] |
strohm.luke@gmail.com
|
fac948d696d4a82b62dca8ce6557a6b4e27a4e6e
|
0ecb1763b4cab08a1fb80234639e46afc8921e2f
|
/further/routing_1.py
|
882cf1231be2c220621e4dd32a8a4aea3cdd9566
|
[] |
no_license
|
mach8686devops/pyside6-demo
|
4eed3713288ec21b0ec4b8561290f87925693b89
|
848302ff9c1536034cf5f225fa953944d011c2a4
|
refs/heads/main
| 2023-05-05T11:12:20.711846
| 2021-05-28T13:44:41
| 2021-05-28T13:44:41
| 371,714,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
import sys
from PySide6.QtCore import QSize, Qt
from PySide6.QtWidgets import QApplication, QLabel, QMainWindow
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.label = QLabel("Click in this window")
self.status = self.statusBar()
self.setFixedSize(QSize(200, 100))
self.setCentralWidget(self.label)
def mouseMoveEvent(self, e):
self.label.setText("mouseMoveEvent")
def mousePressEvent(self, e):
button = e.button()
if button == Qt.LeftButton:
self.label.setText("mousePressEvent LEFT")
if e.x() < 100:
self.status.showMessage("Left click on left")
self.move(self.x() - 10, self.y())
else:
self.status.showMessage("Left click on right")
self.move(self.x() + 10, self.y())
elif button == Qt.MiddleButton:
self.label.setText("mousePressEvent MIDDLE")
elif button == Qt.RightButton:
self.label.setText("mousePressEvent RIGHT")
if e.x() < 100:
self.status.showMessage("Right click on left")
print("Something else here.")
self.move(10, 10)
else:
self.status.showMessage("Right click on right")
self.move(400, 400)
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
|
[
"zhangjohn202@gmail.com"
] |
zhangjohn202@gmail.com
|
1ef4318bf988f6d48cc10b92c99d71e098603a54
|
027c5f1cdbc292e24695edc69421e65cb68608da
|
/speech_to_text.py
|
b139b58ad7dbe84114cbf0557a1dc25e46c68f54
|
[] |
no_license
|
kmitd/secklow-sounds-project
|
e3713656acea33097dacf04ef01dac27d740fff6
|
56559ae1420457edcd665a809cee845a83de9c3b
|
refs/heads/master
| 2021-01-21T08:32:44.215386
| 2016-09-15T16:27:45
| 2016-09-15T16:27:45
| 68,304,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,140
|
py
|
"""Google Cloud Speech API sample application using the REST API for batch
processing."""
import argparse
import base64
import json
import io,os
from googleapiclient import discovery
import httplib2
from oauth2client.client import GoogleCredentials
from read_phrases import wards
import time
import logging
DISCOVERY_URL = ('https://{api}.googleapis.com/$discovery/rest?'
'version={apiVersion}')
def get_speech_service():
credentials = GoogleCredentials.get_application_default().create_scoped(
['https://www.googleapis.com/auth/cloud-platform'])
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('speech', 'v1beta1', http=http, discoveryServiceUrl=DISCOVERY_URL)
def write_output(in_file, response):
file_dir = "data/transcripts/"+in_file.split("/")[2]
print file_dir
try:
os.mkdir(file_dir)
except OSError :
# then it exists
pass
with io.open(file_dir+'/'+in_file.split("/")[-1][:-4]+".json", 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(response, ensure_ascii=False, indent=2)))
def is_cached(speech_file):
file_path = "data/transcripts/"+speech_file.split("/")[2]+"/"+speech_file.split("/")[-1][:-4]+".json"
if os.path.exists(file_path):
# with open(file_path) as data_file:
# print "Trascript exists."
# print json.load(data_file)
return True
else :
# print "Trascript does not exist. Calling Google Speech API..."
return False
def asynch_request(speech_remote_file):
service = get_speech_service()
service_request = service.speech().asyncrecognize(
body={
'config': {
'encoding': 'LINEAR16', # raw 16-bit signed LE samples
'sampleRate': 16000, # 16 khz
'languageCode': 'en-GB', # a BCP-47 language tag,
"speech_context": {"phrases": wards }
},
'audio' : {
'uri': speech_remote_file
}
})
response = service_request.execute()
name = response['name']
# Construct a GetOperation request.
service_request = service.operations().get(name=name)
while True:
# Give the server a few seconds to process.
logging.debug('Waiting for Google Speech API processing...')
time.sleep(1)
# Get the long running operation with response.
response = service_request.execute()
if 'done' in response and response['done']: break
# logging.info(json.dumps(response['response']['results']))
return response
def synch_request(speech_file):
with open(speech_file, 'rb') as speech:
speech_content = base64.b64encode(speech.read())
service = get_speech_service()
service_request = service.speech().syncrecognize(
body={
'config': {
'encoding': 'LINEAR16', # raw 16-bit signed LE samples
'sampleRate': 16000, # 16 khz
'languageCode': 'en-GB' , # a BCP-47 language tag,
"speech_context":
{"phrases": wards }
},
'audio': {
'content': speech_content.decode('UTF-8')
}
})
response = service_request.execute()
# logging.info(json.dumps(response))
return response
def main(speech_file):
"""Transcribe the given audio file.
Args:
speech_file: the name of the audio file.
"""
# do not run it again if we have already the trascript
# if (is_cached(speech_file)) :
# return
response = synch_request(speech_content)
print(json.dumps(response))
write_output(speech_file,response)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'speech_file', help='Full path of audio file to be recognized')
args = parser.parse_args()
main(args.speech_file)
|
[
"ilaria.tiddi@open.ac.uk"
] |
ilaria.tiddi@open.ac.uk
|
b30480679c1e40bda865c6b29f644fd3bf852376
|
097b5839f33bfd7826ad51731b93349f5cb24056
|
/venv/Lib/site-packages/aliyun_python_sdk_core_v3-2.11.2-py3.6.egg/aliyunsdkcore/client.py
|
a42cdb2f19e4e28d0e67c0023c005c52545c0c34
|
[] |
no_license
|
P79N6A/xfz
|
eb2099051e13e2ea4f2a4862f77555630e4fc449
|
066607292c4e4107ae6425e1a889f014f0e731bc
|
refs/heads/master
| 2021-07-22T10:57:00.551292
| 2018-12-28T10:30:31
| 2018-12-28T10:30:31
| 163,398,844
| 2
| 2
| null | 2020-05-26T03:39:11
| 2018-12-28T10:39:52
|
Python
|
UTF-8
|
Python
| false
| false
| 10,306
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
import warnings
import aliyunsdkcore
from aliyunsdkcore.vendored.six.moves.urllib.parse import urlencode
from aliyunsdkcore.vendored.six.moves import http_client
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkcore.acs_exception import error_code, error_msg
from aliyunsdkcore.http.http_response import HttpResponse
from aliyunsdkcore.request import AcsRequest
from aliyunsdkcore.http import format_type
from aliyunsdkcore.auth.signers.signer_factory import SignerFactory
from aliyunsdkcore.request import CommonRequest
from aliyunsdkcore.endpoint.resolver_endpoint_request import ResolveEndpointRequest
from aliyunsdkcore.endpoint.default_endpoint_resolver import DefaultEndpointResolver
from aliyunsdkcore.compat import json
"""
Acs default client module.
Created on 6/15/2015
@author: alex jiang
"""
DEFAULT_SDK_CONNECTION_TIMEOUT_IN_SECONDS = 10
class AcsClient:
def __init__(
self,
ak=None,
secret=None,
region_id="cn-hangzhou",
auto_retry=True,
max_retry_time=3,
user_agent=None,
port=80,
timeout=DEFAULT_SDK_CONNECTION_TIMEOUT_IN_SECONDS,
public_key_id=None,
private_key=None,
session_period=3600,
credential=None,
debug=False):
"""
constructor for AcsClient
:param ak: String, access key id
:param secret: String, access key secret
:param region_id: String, region id
:param auto_retry: Boolean
:param max_retry_time: Number
:return:
"""
self.__max_retry_num = max_retry_time
self.__auto_retry = auto_retry
self.__ak = ak
self.__secret = secret
self.__region_id = region_id
self.__user_agent = user_agent
self._port = port
self._timeout = timeout
# if true, do_action() will throw a ClientException that contains URL
self._url_test_flag = False
credential = {
'ak': ak,
'secret': secret,
'public_key_id': public_key_id,
'private_key': private_key,
'session_period': session_period,
'credential': credential,
}
self._signer = SignerFactory.get_signer(credential, region_id, self.implementation_of_do_action, debug)
self._endpoint_resolver = DefaultEndpointResolver(self)
def get_region_id(self):
"""
:return: String
"""
return self.__region_id
def get_access_key(self):
"""
:return: String
"""
return self.__ak
def get_access_secret(self):
"""
:return: String
"""
return self.__secret
def is_auto_retry(self):
"""
:return:Boolean
"""
return self.__auto_retry
def get_max_retry_num(self):
"""
:return: Number
"""
return self.__max_retry_num
def get_user_agent(self):
return self.__user_agent
def set_region_id(self, region):
self.__region_id = region
def set_max_retry_num(self, num):
"""
set auto retry number
:param num: Numbers
:return: None
"""
self.__max_retry_num = num
def set_auto_retry(self, flag):
"""
set whether or not the client perform auto-retry
:param flag: Booleans
:return: None
"""
self.__auto_retry = flag
def set_user_agent(self, agent):
"""
User agent set to client will overwrite the request setting.
:param agent:
:return:
"""
self.__user_agent = agent
def get_port(self):
return self._port
def get_location_service(self):
return None
def _make_http_response(self, endpoint, request, specific_signer=None):
body_params = request.get_body_params()
if body_params:
body = urlencode(body_params)
request.set_content(body)
request.set_content_type(format_type.APPLICATION_FORM)
elif request.get_content() and "Content-Type" not in request.get_headers():
request.set_content_type(format_type.APPLICATION_OCTET_STREAM)
method = request.get_method()
signer = self._signer if specific_signer is None else specific_signer
header, url = signer.sign(self.__region_id, request)
if self.get_user_agent() is not None:
header['User-Agent'] = self.get_user_agent()
if header is None:
header = {}
header['x-sdk-client'] = 'python/2.0.0'
protocol = request.get_protocol_type()
response = HttpResponse(
endpoint,
url,
method,
header,
protocol,
request.get_content(),
self._port,
timeout=self._timeout)
if body_params:
body = urlencode(request.get_body_params())
response.set_content(body, "utf-8", format_type.APPLICATION_FORM)
return response
def implementation_of_do_action(self, request, signer=None):
if not isinstance(request, AcsRequest):
raise ClientException(
error_code.SDK_INVALID_REQUEST,
error_msg.get_msg('SDK_INVALID_REQUEST'))
# add core version
core_version = __import__('aliyunsdkcore').__version__
request.add_header('x-sdk-core-version', core_version)
if isinstance(request, CommonRequest):
request.trans_to_acs_request()
if request.endpoint:
endpoint = request.endpoint
else:
endpoint = self._resolve_endpoint(request)
http_response = self._make_http_response(endpoint, request, signer)
if self._url_test_flag:
raise ClientException("URLTestFlagIsSet", http_response.get_url())
# Do the actual network thing
try:
status, headers, body = http_response.get_response_object()
return status, headers, body
except IOError as e:
error_message = str(e)
error_message += "\nEndpoint: " + endpoint
error_message += "\nProduct: " + str(request.get_product())
error_message += "\nSdkCoreVersion: " + aliyunsdkcore.__version__
error_message += "\nHttpUrl: " + str(http_response.get_url())
error_message += "\nHttpHeaders: " + str(http_response.get_headers())
raise ClientException(error_code.SDK_HTTP_ERROR, error_message)
@staticmethod
def _parse_error_info_from_response_body(response_body):
try:
body_obj = json.loads(response_body)
if 'Code' in body_obj and 'Message' in body_obj:
return body_obj['Code'], body_obj['Message']
except ValueError:
pass
finally:
# failed to parse body as json format
# TODO handle if response_body is too big
error_message = "ServerResponseBody: " + str(response_body)
return error_code.SDK_UNKNOWN_SERVER_ERROR, error_message
def do_action_with_exception(self, acs_request):
# set server response format as json, because thie function will
# parse the response so which format doesn't matter
acs_request.set_accept_format('JSON')
status, headers, body = self.implementation_of_do_action(acs_request)
request_id = None
try:
body_obj = json.loads(body.decode('utf-8'))
request_id = body_obj.get('RequestId')
except (ValueError, TypeError, AttributeError):
# in case the response body is not a json string, return the raw
# data instead
pass
if status < http_client.OK or status >= http_client.MULTIPLE_CHOICES:
server_error_code, server_error_message = self._parse_error_info_from_response_body(
body)
raise ServerException(
server_error_code,
server_error_message,
http_status=status,
request_id=request_id)
return body
def _resolve_endpoint(self, request):
resolve_request = ResolveEndpointRequest(
self.__region_id,
request.get_product(),
request.get_location_service_code(),
request.get_location_endpoint_type(),
)
endpoint = self._endpoint_resolver.resolve(resolve_request)
if endpoint.endswith("endpoint-test.exception.com"):
# For endpoint testability, if the endpoint is xxxx.endpoint-test.special.com
# throw a client exception with this endpoint
raise ClientException(error_code.SDK_ENDPOINT_TESTABILITY, endpoint)
return endpoint
def do_action(self, acs_request):
warnings.warn(
"do_action() method is deprecated, please use do_action_with_exception() instead.",
DeprecationWarning)
status, headers, body = self.implementation_of_do_action(acs_request)
return body
def get_response(self, acs_request):
return self.implementation_of_do_action(acs_request)
def add_endpoint(self, region_id, product_code, endpoint):
self._endpoint_resolver.put_endpoint_entry(region_id, product_code, endpoint)
|
[
"281528675@qq.com"
] |
281528675@qq.com
|
0c3685cd9f60cf9fab17887921f148cea4932610
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortCalendarWindow.py
|
96f439b6351f27ea524c3190daac96e5559db5f9
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406
| 2017-05-04T13:51:43
| 2017-05-04T13:51:43
| 90,268,530
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 11,877
|
py
|
# 2017.05.04 15:23:16 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortCalendarWindow.py
import BigWorld
from collections import defaultdict
from helpers import time_utils
from helpers.i18n import makeString as _ms
from gui import makeHtmlString
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortViewHelper import FortViewHelper
from gui.Scaleform.daapi.view.meta.FortCalendarWindowMeta import FortCalendarWindowMeta
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS
from gui.Scaleform.locale.MENU import MENU
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.shared.utils import toLower
from gui.shared.fortifications.fort_seqs import BATTLE_ITEM_TYPE
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.fort_formatters import getDivisionIcon
class FortCalendarWindow(FortViewHelper, FortCalendarWindowMeta):
class TIME_LIMITS:
LOW = FORTIFICATION_ALIASES.ACTIVE_EVENTS_PAST_LIMIT * time_utils.ONE_DAY
HIGH = FORTIFICATION_ALIASES.ACTIVE_EVENTS_FUTURE_LIMIT * time_utils.ONE_DAY
def __init__(self, ctx):
super(FortCalendarWindow, self).__init__()
self.__selectedDate = ctx.get('dateSelected') or time_utils.getCurrentTimestamp()
def getCalendar(self):
return self.components.get(VIEW_ALIAS.CALENDAR)
def startCalendarListening(self):
calendar = self.getCalendar()
if calendar is not None:
calendar.onMonthChangedEvent += self.onMonthChanged
calendar.onDateSelectedEvent += self.onDateSelected
return
def stopCalendarListening(self):
calendar = self.getCalendar()
if calendar is not None:
calendar.onMonthChangedEvent -= self.onMonthChanged
calendar.onDateSelectedEvent -= self.onDateSelected
return
def onMonthChanged(self, timestamp):
self.__selectedDate = timestamp
self._populateMonthEvents()
self._populateCalendarMessage()
def onDateSelected(self, timestamp):
self.__selectedDate = timestamp
self._populatePreviewBlock()
def onWindowClose(self):
self.destroy()
def onFortBattleChanged(self, cache, item, battleItem):
self._update()
def onFortBattleRemoved(self, cache, battleID):
self._update()
def _populateMonthEvents(self):
calendar = self.getCalendar()
if calendar is not None:
result = []
for dayStartTimestamp, battles in self._getBattlesByDay().iteritems():
if time_utils.isFuture(dayStartTimestamp):
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_HEADER, count=len(battles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEFUTUREBG
elif time_utils.isToday(dayStartTimestamp):
finishedBattles = [ b for b in battles if b.isEnded() ]
upcomingBattles = [ b for b in battles if b.isPlanned() ]
if not upcomingBattles:
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_HEADER, count=len(finishedBattles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEPASTBG
else:
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_HEADER, count=len(upcomingBattles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_FUTURE_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEFUTUREBG
else:
tooltipHead = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_HEADER, count=len(battles))
tooltipBody = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_CALENDAR_DAYTOOLTIP_PAST_BODY)
iconSource = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEPASTBG
result.append({'tooltipHeader': tooltipHead,
'tooltipBody': tooltipBody,
'iconSource': iconSource,
'rawDate': dayStartTimestamp})
calendar.as_updateMonthEventsS(result)
return
def _populatePreviewBlock(self):
fort = self.fortCtrl.getFort()
localDateTime = time_utils.getDateTimeInLocal(self.__selectedDate)
targetDayStartTimestamp, _ = time_utils.getDayTimeBoundsForLocal(self.__selectedDate)
eventItems, dateInfo, noEventsText = [], None, None
dateString = _ms(MENU.DATETIME_SHORTDATEFORMATWITHOUTYEAR, weekDay=_ms('#menu:dateTime/weekDays/full/%d' % localDateTime.isoweekday()), monthDay=localDateTime.day, month=toLower(_ms('#menu:dateTime/months/full/%d' % localDateTime.month)))
if not self._isValidTime(self.__selectedDate):
noEventsText = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_EMPTY_NOTAVAILABLE)
else:
for dayStartTimestamp, battles in self._getBattlesByDay().iteritems():
if dayStartTimestamp == targetDayStartTimestamp:
for battle in sorted(battles):
startTimestamp = battle.getStartTime()
battleHasEnded = battle.isEnded()
opponentsClanInfo = battle.getOpponentClanInfo()
if battle.getType() == BATTLE_ITEM_TYPE.ATTACK:
if battleHasEnded:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_OFFENCEPAST
else:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_OFFENCEFUTURE
titleTpl = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_ITEM_TITLE_OFFENCE)
else:
if battleHasEnded:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEPAST
else:
icon = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_DEFENCEFUTURE
titleTpl = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_ITEM_TITLE_DEFENCE)
tankIconVO = getDivisionIcon(battle.defenderFortLevel, battle.attackerFortLevel, determineAlert=battle.getType() == BATTLE_ITEM_TYPE.ATTACK)
if battle.isWin():
background = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_BATTLEFORTVICTORY
resultLabel = 'win'
elif battle.isLose():
background = RES_ICONS.MAPS_ICONS_LIBRARY_FORTIFICATION_BATTLEFORTDEFEAT
resultLabel = 'lose'
else:
background, resultLabel = (None, None)
eventItem = {'icon': icon,
'title': titleTpl % {'clanName': '[%s]' % opponentsClanInfo[1]},
'clanID': opponentsClanInfo[0],
'direction': _ms(FORTIFICATIONS.GENERAL_DIRECTION, value=_ms('#fortifications:General/directionName%d' % battle.getDirection())),
'timeInfo': _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_ITEM_TIMEINFO) % {'startTime': BigWorld.wg_getShortTimeFormat(startTimestamp),
'endTime': BigWorld.wg_getShortTimeFormat(startTimestamp + time_utils.ONE_HOUR)},
'background': background,
'tankIconVO': tankIconVO,
'showTankIcon': not battleHasEnded}
if battleHasEnded and resultLabel:
resultText = makeHtmlString('html_templates:lobby/fortifications', 'battleResult', {'result': _ms(MENU.finalstatistic_commonstats_resultlabel(resultLabel))})
eventItem.update({'result': resultText})
eventItems.append(eventItem)
if not len(eventItems):
if fort.isOnVacationAt(self.__selectedDate):
noEventsText = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_EMPTY_VACATION, date=fort.getVacationDateStr())
else:
noEventsText = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_EMPTY_NOEVENTS)
if len(eventItems) > 0:
dateInfo = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_EVENTSLIST_INFO_BATTLESCOUNT, eventsCount=len(eventItems))
self.as_updatePreviewDataS({'dateString': dateString,
'dateInfo': dateInfo,
'noEventsText': noEventsText,
'events': eventItems})
return
def _populateCalendarMessage(self):
calendar = self.getCalendar()
if calendar is not None:
fort, message = self.fortCtrl.getFort(), ''
vacationStart, vacationEnd = fort.getVacationDate()
if self._isValidTime(vacationStart, self.__selectedDate) or self._isValidTime(vacationEnd, self.__selectedDate):
message = _ms(FORTIFICATIONS.FORTCALENDARWINDOW_MESSAGE_VACATION, date=fort.getVacationDateStr())
calendar.as_setCalendarMessageS(message)
return
def _populate(self):
super(FortCalendarWindow, self)._populate()
self.startFortListening()
self.startCalendarListening()
self._update()
def _dispose(self):
self.stopFortListening()
self.stopCalendarListening()
super(FortCalendarWindow, self)._dispose()
def _update(self):
calendar = self.getCalendar()
if calendar is not None:
lowerTimeBound = time_utils.getCurrentLocalServerTimestamp() - self.TIME_LIMITS.LOW
higherTimeBound = time_utils.getCurrentLocalServerTimestamp() + self.TIME_LIMITS.HIGH
calendar.as_setMinAvailableDateS(lowerTimeBound)
calendar.as_setMaxAvailableDateS(higherTimeBound)
calendar.as_openMonthS(self.__selectedDate)
calendar.as_selectDateS(self.__selectedDate)
self._populateMonthEvents()
self._populatePreviewBlock()
self._populateCalendarMessage()
return
@classmethod
def _isValidTime(cls, timestampToCheck, rootTimestamp = None):
rootTimestamp = rootTimestamp or time_utils.getCurrentTimestamp()
minLimit = rootTimestamp - cls.TIME_LIMITS.LOW
dayStart, _ = time_utils.getDayTimeBoundsForLocal(minLimit)
minLimit = dayStart
maxLimit = rootTimestamp + cls.TIME_LIMITS.HIGH
_, dayEnd = time_utils.getDayTimeBoundsForLocal(maxLimit)
maxLimit = dayEnd
return minLimit < timestampToCheck < maxLimit
def _getBattlesByDay(self):
result, fort = defaultdict(list), self.fortCtrl.getFort()
for battle in fort.getAttacks() + fort.getDefences():
startTimestamp = battle.getStartTime()
if self._isValidTime(startTimestamp):
dayStartTimestamp, _ = time_utils.getDayTimeBoundsForLocal(startTimestamp)
result[dayStartTimestamp].append(battle)
return result
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\fortifications\FortCalendarWindow.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:23:17 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
2000039431e4cd0505c1aa8c3ae0e29f734712d9
|
195b4a1d9c5aa632f46c993d1ec4db45f6130b91
|
/question-094.py
|
a757ae3bdcec335ade4ff81ab63597c1988756dd
|
[] |
no_license
|
superzarin/lessons-1-2-3-4
|
54e5594a120577e410cebbccc6f9397bc78b5a72
|
64d7a27d8c1f2ea42f8a63c6fc036ba80ddb0d01
|
refs/heads/master
| 2022-11-07T22:48:35.906934
| 2020-06-26T04:37:35
| 2020-06-26T04:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from array import *
givenArray = array('i', [7, 3, 12, 1, 8])
while True:
selectedValue = int(input('Select one of a number from the array: '))
if(selectedValue in givenArray):
print(givenArray.index(selectedValue))
break
else:
print("Try again")
|
[
"exemptionhunters@gmail.com"
] |
exemptionhunters@gmail.com
|
f9a3bff56e5ed0ba4f874a6571ecf9e908e79f95
|
de1f9d660cfb738afdb66e4a2d63a4577c07d9c6
|
/xcube/webapi/defaults.py
|
e2f0580e213aeaa838812aab943976b33b2c918e
|
[
"MIT"
] |
permissive
|
rabaneda/xcube
|
db47eb416db85df891a924063482a7943cae9d4f
|
0d38ca513987184dbc4a37da1616e4076964d0f1
|
refs/heads/master
| 2020-11-24T00:11:17.107630
| 2020-02-11T10:11:34
| 2020-02-11T10:11:34
| 227,877,138
| 0
| 0
|
MIT
| 2019-12-13T16:14:51
| 2019-12-13T16:14:50
| null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
# The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
SERVER_NAME = 'xcube Server'
SERVER_DESCRIPTION = f'WMTS, catalogue, data access, tile, feature, time-series services for' \
' xarray-enabled data cubes'
DEFAULT_ADDRESS = 'localhost'
DEFAULT_PORT = 8080
DEFAULT_TILE_CACHE_SIZE = "512M"
DEFAULT_UPDATE_PERIOD = 2.
DEFAULT_LOG_PREFIX = 'xcube-serve.log'
DEFAULT_TILE_COMP_MODE = 0
DEFAULT_TRACE_PERF = False
DEFAULT_CMAP_NAME = 'viridis'
DEFAULT_CMAP_VMIN = 0.
DEFAULT_CMAP_VMAX = 1.
DEFAULT_CMAP_WIDTH = 1
DEFAULT_CMAP_HEIGHT = 5
_GIGAS = 1000 * 1000 * 1000
FILE_TILE_CACHE_CAPACITY = 20 * _GIGAS
FILE_TILE_CACHE_ENABLED = False
FILE_TILE_CACHE_PATH = './image-cache'
MEM_TILE_CACHE_CAPACITY = 2 * _GIGAS
|
[
"norman.fomferra@gmail.com"
] |
norman.fomferra@gmail.com
|
408cbad0d506e1bd58f09db1ea6c6e52da7dad9b
|
6d9af7eade7ddc239ee6839a3766cb40c27f619d
|
/src/SimpleNN/utils/__init__.py
|
87538fa722eaeb1a1e91e46f7a4876a7b07fd807
|
[] |
no_license
|
lmj1029123/SingleNN
|
24dfe40c8d920e2a777742c885907c27484976f4
|
701752c3378e537387fa0dc2b410aec44577b7a3
|
refs/heads/master
| 2021-11-27T04:42:46.229290
| 2021-11-09T00:40:10
| 2021-11-09T00:40:10
| 246,651,678
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
def _gen_2Darray_for_ffi(arr, ffi, cdata="double"):
# Function to generate 2D pointer for cffi
shape = arr.shape
arr_p = ffi.new(cdata + " *[%d]" % shape[0])
for i in range(shape[0]):
arr_p[i] = ffi.cast(cdata + " *", arr[i].ctypes.data)
return arr_p
|
[
"mingjie1@andrew.cmu.edu"
] |
mingjie1@andrew.cmu.edu
|
84579276ee1fb2606f7bcec64999a267eb075557
|
5802a63c9144cc4d6177de49a2187156899add4e
|
/count_coin.py
|
414378ffa8e83e7ececc30d3668de9ce06c7d019
|
[
"MIT"
] |
permissive
|
kyh980909/keras-yolo4
|
b82cb053f1f11092273db633b73e82d2ddcd7acd
|
da9fb99e5ef10ffd6301dd16f35cb888ee121d15
|
refs/heads/master
| 2022-12-29T16:31:23.428770
| 2020-10-15T10:08:03
| 2020-10-15T10:08:03
| 291,690,546
| 2
| 0
|
MIT
| 2020-08-31T10:57:49
| 2020-08-31T10:57:48
| null |
UTF-8
|
Python
| false
| false
| 6,940
|
py
|
import os
import colorsys
import collections
import io
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from yolo4.model import yolo_eval, yolo4_body
from yolo4.utils import letterbox_image
from PIL import Image, ImageFont, ImageDraw
from timeit import default_timer as timer
from PIL import Image
import cv2
import base64
import matplotlib.pyplot as plt
from decode_np import Decode
def get_class(classes_path):
classes_path = os.path.expanduser(classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
anchors_path = os.path.expanduser(anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def init():
# if (country == 'KRW'):
model_path = 'KRW_weight.h5'
anchors_path = 'model_data/yolo4_anchors.txt'
classes_path = 'model_data/KRW_classes.txt'
class_names = get_class(classes_path)
anchors = get_anchors(anchors_path)
num_anchors = len(anchors)
num_classes = len(class_names)
model_image_size = (416, 416)
# 分数阈值和nms_iou阈值
conf_thresh = 0.2
nms_thresh = 0.45
yolo4_model = yolo4_body(Input(shape=model_image_size + (3,)), num_anchors // 3, num_classes)
model_path = os.path.expanduser(model_path)
yolo4_model.load_weights(model_path)
_decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names) # 위 과정의 시간이 오래걸림
# else:
# model_path = 'JPY_weight.h5'
# anchors_path = 'model_data/yolo4_anchors.txt'
# classes_path = 'model_data/JPY_classes.txt'
return _decode
def jpy_count_coin(img): # img : str
model_path = 'JPY_weight.h5'
anchors_path = 'model_data/yolo4_anchors.txt'
classes_path = 'model_data/JPY_classes.txt'
jpy_classes = ['JPY_500', 'JPY_100', 'JPY_50', 'JPY_10', 'JPY_1', 'JPY_5']
count = {}
result = {}
total = 0
class_names = get_class(classes_path)
anchors = get_anchors(anchors_path)
num_anchors = len(anchors)
num_classes = len(class_names)
model_image_size = (416, 416)
# 分数阈值和nms_iou阈值
conf_thresh = 0.2
nms_thresh = 0.8
yolo4_model = yolo4_body(Input(shape=model_image_size + (3,)), num_anchors // 3, num_classes)
model_path = os.path.expanduser(model_path)
yolo4_model.load_weights(model_path)
_decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names)
try:
encoded_img = np.fromstring(base64.b64decode(img), dtype = np.uint8)
img = cv2.imdecode(encoded_img, cv2.IMREAD_COLOR)
except:
print('Open Error! Try again!')
else:
image, boxes, scores, classes = _decode.detect_image(img, True)
cv2.imwrite('predict.png',image)
with open('predict.png', 'rb') as img:
base64_string = base64.b64encode(img.read()).decode('utf-8')
count = collections.Counter(classes)
for key in tuple(count.keys()): # 딕셔너리 키 이름 변경
count[jpy_classes[key]] = count.pop(key)
for key, value in count.items():
total += int(key[str(key).find('_') + 1:]) * value
result['result'] = count
result['total'] = total
result['image'] = base64_string
# yolo4_model.close_session()
return result
def krw_count_coin(img, _decode): # img : str
# model_path = 'KRW_weight.h5'
# anchors_path = 'model_data/yolo4_anchors.txt'
# classes_path = 'model_data/KRW_classes.txt'
krw_classes = ['KRW_500', 'KRW_100', 'KRW_50', 'KRW_10']
count = {}
result = {}
total = 0
# class_names = get_class(classes_path)
# anchors = get_anchors(anchors_path)
# num_anchors = len(anchors)
# num_classes = len(class_names)
# model_image_size = (416, 416)
# conf_thresh = 0.2
# nms_thresh = 0.45
# yolo4_model = yolo4_body(Input(shape=model_image_size + (3,)), num_anchors // 3, num_classes)
# model_path = os.path.expanduser(model_path)
# yolo4_model.load_weights(model_path)
# _decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names) # 위 과정의 시간이 오래걸림
print(_decode)
try:
encoded_img = np.fromstring(base64.b64decode(img), dtype = np.uint8)
img = cv2.imdecode(encoded_img, cv2.IMREAD_COLOR)
except:
print('Open Error! Try again!')
else:
image, boxes, scores, classes = _decode.detect_image(img, True) # predict 부분
cv2.imwrite('predict.png',image)
with open('predict.png', 'rb') as img:
base64_string = base64.b64encode(img.read()).decode('utf-8')
count = collections.Counter(classes)
for key in tuple(count.keys()): # 딕셔너리 키 이름 변경
count[krw_classes[key]] = count.pop(key)
for key, value in count.items():
total += int(key[str(key).find('_') + 1:]) * value
result['result'] = count
result['total'] = total
result['image'] = base64_string
# yolo4_model.close_session()
return result
if __name__ == '__main__':
model_path = 'JPY_weight.h5'
anchors_path = 'model_data/yolo4_anchors.txt'
classes_path = 'model_data/JPY_classes.txt'
jpy_classes = ['JPY_500', 'JPY_100', 'JPY_50', 'JPY_10', 'JPY_1', 'JPY_5']
count = {}
result = {}
total = 0
class_names = get_class(classes_path)
anchors = get_anchors(anchors_path)
num_anchors = len(anchors)
num_classes = len(class_names)
model_image_size = (416, 416)
# 分数阈值和nms_iou阈值
conf_thresh = 0.2
nms_thresh = 0.45
yolo4_model = yolo4_body(Input(shape=model_image_size + (3,)), num_anchors // 3, num_classes)
model_path = os.path.expanduser(model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
yolo4_model.load_weights(model_path)
_decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names)
img = input('Input image filename:')
try:
image = cv2.imread(img)
except:
print('Open Error! Try again!')
else:
image, boxes, scores, classes = _decode.detect_image(image, True)
count = collections.Counter(classes)
for key in tuple(count.keys()): # 딕셔너리 키 이름 변경
count[jpy_classes[key]] = count.pop(key)
for key, value in count.items():
total += int(key[str(key).find('_') + 1:]) * value
result['result'] = count
result['total'] = total
result['image'] = image
cv2.imwrite('result.png', image)
yolo4_model.close_session()
|
[
"kyh980909@gmail.com"
] |
kyh980909@gmail.com
|
45c06cf8191f1ab31422d1f210f7aeab0d004174
|
30adaebf1b0c1aa9fabc071de3e0ddc8904d6def
|
/curdpro/curdapp/models.py
|
ca76cf2baa1bebef37d63a7c4497b8492cb725ee
|
[] |
no_license
|
ashvani98/RemaoteRepo
|
74920d832a4dda1754ded60e8052732962208476
|
b60a99b12146a67563bc3a23cfadf03eafa8b187
|
refs/heads/master
| 2020-07-13T18:54:15.941855
| 2019-08-29T10:55:42
| 2019-08-29T10:55:42
| 205,134,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django.db import models
class ProductData(models.Model):
product_id=models.IntegerField()
product_name=models.CharField(max_length=100)
product_cost=models.IntegerField()
product_color=models.CharField(max_length=100)
product_class=models.CharField(max_length=100)
|
[
"ashvani151998@gmail.com"
] |
ashvani151998@gmail.com
|
578defba3d381285781d5f20e54fc89d5560e111
|
6787ba6d82efe5152726799f073b291bf25d7846
|
/12 Ticketing Teatrale.py
|
90303f71401fd0ae5dbc0fe94887ef96ebbcc10c
|
[] |
no_license
|
stefanin/Python
|
0cfc4b484ac21bf3c2ddc4841e0a6070a422ded3
|
b10772fdece4d26d10c1a9fd804e3d56bcf3a98a
|
refs/heads/master
| 2021-06-14T10:01:01.076015
| 2021-01-18T20:59:57
| 2021-01-18T20:59:57
| 73,856,077
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
'''
Sistema di ticketing
diversi Spettacoli
min 3 max 5 Ticket
c'è un Bottegnino
la Mascherina controlla l'acceso e da il via allo spettacolo
simulare nel Main le istanze degli oggetti in loco con acquisti di diversi spettacoli
'''
#Ticket
class Ticket:
stato="Valido"
NomeSpettacolo=""
def __init__(self, Spettacolo):
self.NomeSpettacolo=Spettacolo.Nome
def convalida(self):
self.stato="Strappato"
def nullo(self):
self.stato="Nullo"
def __str__(self):
return str(self.stato)
#Spettacolo
class Spettacolo:
Nome=""
Partecipanti_min=0
Partecipanti_max=0
nTicket=[]
def __init__(self,Nome,Partecipanti_min,Partecipanti_max):
'''idSpettacolo,Nome,Partecipanti_min,Partecipanti_max'''
self.Nome=Nome
self.Partecipanti_min=Partecipanti_min
self.Partecipanti_max=Partecipanti_max
def __str__(self):
#return "Nome Spettacolo :",str(self.Nome), " Partecipanti : ",str(self.Partecipanti)," Capienza sala : ",str(self.Partecipanti_max) con le virgole restituisce una TUPLA !!!!!!!
return "Nome Spettacolo :"+str(self.Nome)+" Partecipanti : "+str(len(self.nTicket))+" Capienza sala : "+str(self.Partecipanti_max)
def addTicket(self,Ticket):
self.nTicket.append(Ticket)
def verifica(self):
if len(self.nTicket) <= self.Partecipanti_max:
return True
else:
return False
#Botteghino
class Botteghino:
def vendeTicket(self,Spettacolo):
TK=Ticket(Spettacolo)
if Spettacolo.verifica():
Spettacolo.addTicket(TK)
return TK
else:
TK.nullo()
return TK
#Mascherina
SP1=Spettacolo("primo Spettacolo",3,5)
SP2=Spettacolo("secondo Spettacolo",3,5)
Bott=Botteghino()
print(SP1)
print(SP2)
print("vendo primo biglietto SP1")
tk1=Bott.vendeTicket(SP1)
print(tk1)
print(SP1)
|
[
"stefano.cornelli@gmail.com"
] |
stefano.cornelli@gmail.com
|
e86d1bf7639d80fb97b5fcf5f7062d7aafd28c43
|
c0fb3a3cd5a04377c7777c569732d24baed610c5
|
/Source/400hzSine.py
|
f3754344da930c4060d5f5f842d984d7f4568ae5
|
[] |
no_license
|
city028/AD9833
|
6391b434fc58a792ea24d41bc8a24f893aac42a8
|
587afabe2467f2292ba2865965e85a5b5ddf18da
|
refs/heads/master
| 2023-03-02T23:47:54.500167
| 2021-02-09T07:58:16
| 2021-02-09T07:58:16
| 299,289,985
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
import spidev
spi=spidev.SpiDev()
spi.open(0,0) #bus 0, device 0
spi.max_speed_hz=500000
def send_data(input):
tx_msb=input>>8
tx_lsb=input & 0xFF
spi.xfer([tx_msb,tx_lsb])
print(input)
send_data(0x0100) # Send a reset
send_data(0x1000) #MSB
send_data(0x4000) #Freq 0 reg for 400hz and 1400hz
send_data(0x0000) #LSB
send_data(0x5100) #Freq 0 reg = 400Hz
send_data(0x0008) # Sine
#send_data(0x0028) # Block
#send_data(0x000A) # Sawtooth
|
[
"noreply@github.com"
] |
city028.noreply@github.com
|
15abe32223879f445b92f406dc47cd0d5b9c1336
|
92ac28cb6c7f152acce172b99fecbfab5f33406f
|
/setup.py
|
fac3b612dce2b7c35f1574c0907cf8a37876c7bf
|
[
"MIT"
] |
permissive
|
UCSC-nanopore-cgl/NaRLE
|
d208cd0354d30716e72ea1b637ff6a804fc32ef7
|
d6676437044966cb1cf91dfa88b83a877f6e17d3
|
refs/heads/master
| 2020-03-30T17:58:44.652474
| 2019-08-02T17:59:43
| 2019-08-02T17:59:43
| 151,477,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
from version import version, required_versions
from setuptools import find_packages, setup
kwargs = dict(
name='toil-narle',
version=version,
description="UCSC CGL Nanopore Toil pipeiline",
author='UCSC Computational Genomics Lab',
author_email='tpesout@ucsc.edu',
url="https://github.com/",
install_requires=[x + y for x, y in required_versions.iteritems()],
tests_require=['pytest==2.8.3'],
package_dir={'': 'src'},
packages=find_packages('src'),
entry_points={
'console_scripts': ['toil-narle = narle.narle_pipeline:main']})
setup(**kwargs)
|
[
"tpesout@ucsc.edu"
] |
tpesout@ucsc.edu
|
bdbeeade7f8afe21aa4b98c1a0ede56b66180ff8
|
b751a7bd94a3c834c7385ac02abfc82d24813abb
|
/tg_bot/ugc/management/commands/bot.py
|
5c4014df28cfe79b47d012c7964f4ae743983281
|
[] |
no_license
|
Dreik2001/Telegram-bot
|
5f7d8ed6976d41e128a4b399d86ab189341e2b4e
|
18e14747f3f8e735248d0cfee1e07dc45818a4d8
|
refs/heads/master
| 2023-07-22T06:16:52.113158
| 2021-08-18T13:54:28
| 2021-08-18T13:54:28
| 341,983,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,564
|
py
|
from django.core.management.base import BaseCommand
from django.conf import settings
from telegram import Bot
from telegram import Update
from telegram.ext import CallbackContext
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.utils.request import Request
from django.db import migrations, transaction
from django.db import models
from ugc.models import Message
from ugc.models import Profile
from ugc.models import*
def log_errors(f):
def inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
error_message = f"Error : {e}"
raise e
return inner
@log_errors
def do_echo(update: Update, context: CallbackContext):
chat_id = update.message.chat_id
text = update.message.text
name = update.message.from_user
update_id = update.update_id
message_id = update.message.message_id
date = update.message.date
user_name = update.message.from_user.name
print(update_id)
p, created = Profile.objects.get_or_create(
external_id=chat_id,
defaults={
'name': update.message.from_user.username,
}
)
p.save()
m = Message(
profile=p,
text=text,
)
m.save()
reply_text = """ "update_id": "{}"\n"message": \n\t"message_id": "{}",\n\t"from":\t {}\n\t"chat": \n\t"id": "{}",\n\t"first_name": "{}",\n\t"type": "{}" \n\t\t"date": "{}",\n\t\t"text": "{}" \n \t""".format(update_id, message_id, name, chat_id, user_name, chat_id, date, text)
update.message.reply_text(
text=reply_text,
)
@log_errors
def do_count(update: Update, context: CallbackContext):
chat_id = update.message.chat_id
p, created = Profile.objects.get_or_create(
external_id=chat_id,
defaults={
'name': update.message.from_user.username,
})
p.save()
count = Message.objects.filter(profile=p).count()
update.message.reply_text(
text=f'You have {count} messages',
)
class Command(BaseCommand):
help = 'Telegram-bot'
def handle(self, *args, **options):
request = Request(
connect_timeout=0.5,
read_timeout=1.0,
)
bot = Bot(
request=request,
token=settings.TOKEN,
base_url=settings.PROXY_URL,
)
print(bot.get_me())
updater = Updater(
bot=bot,
use_context=True,
)
message_handler = MessageHandler(Filters.text, do_echo)
updater.dispatcher.add_handler(message_handler)
message_handler2 = CommandHandler('count', do_count)
updater.dispatcher.add_handler(message_handler2)
updater.start_polling()
updater.idle()
|
[
"59831615+Dreik2001@users.noreply.github.com"
] |
59831615+Dreik2001@users.noreply.github.com
|
f645a1274b73ae88aa32ba6e11a861d4572d1d9e
|
1d0a223b743b005cd2ecd904337178e322e63534
|
/Project 1/ship.py
|
dc9ece1ccc021393510afbd14d201033e54678b0
|
[] |
no_license
|
Stefanroets180/all-my-Python-work
|
285607ce1ef50aac4897e0721ead4daca01fa6e0
|
d7937b51a309ebd051bef90e78154447b6e9a8ea
|
refs/heads/main
| 2023-03-27T10:15:54.793489
| 2021-03-18T12:26:20
| 2021-03-18T12:26:20
| 349,063,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
"""Initialise the ship and set it's initial position"""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
self.speed_up_scale = 1.1
# Load ship's image & get a rectangle
self.image = pygame.image.load('images/ships.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Every ship should appear at bottom of screen
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Save center ship coordinates
self.center = float(self.rect.centerx)
# Move flag
self.moving_right = False
self.moving_left = False
def update(self):
"""Update ship position considering the flag"""
# update center attribute not rect
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect attribute based on self.center
self.rect.centerx = self.center
def blitme(self):
"""Draw the ship in current position"""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Place the ship at the center of bottom side"""
self.center = self.screen_rect.centerx
|
[
"61413955+Stefanroets180@users.noreply.github.com"
] |
61413955+Stefanroets180@users.noreply.github.com
|
07103324f226a2fd634420711359e4642804f32a
|
f5812c5cc411780c98a26179b105d6e35e9e5dbc
|
/abstract_syntax_tree_implementation/mypy/cases/special/9.py
|
eb53e97b9ff1130994ad0e866a1e637b6a4a3331
|
[] |
no_license
|
simeonbabatunde/python2-interpreter
|
157751aa18e5106f0e11b8cbf65fa2202a5c82b9
|
8f70ce8860b55cbd209c7a6f77ccbdb3abf1a5b7
|
refs/heads/master
| 2020-04-08T02:53:49.120509
| 2019-07-21T22:54:31
| 2019-07-21T22:54:31
| 158,952,902
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
a=-2
b=3
a**=b+(4/2)-----4+(+4)
print a
x=3+2
z=x**-2
l=x*(z+.5*4)**0.5
l//=a+x*z
d=9
k=4+d
d**=k---10.7*(d//(k-2.3))
k+=-19%2.6-(+d)
a%=b*-d
z/=l**2
print z
x-= a+z-----l*(x**0.9)
|
[
"babatunde.simeon@gmail.com"
] |
babatunde.simeon@gmail.com
|
def6c18b46463b5c3cd481ceefdafb7b8c4e49d6
|
98a936d5372294ed892a9bf9cf98646c72af515c
|
/usage/lab/explorer_usage.py
|
fd4a3b0be9636dbe6d5abd61ffe6a45858e3c81c
|
[
"MIT"
] |
permissive
|
edublancas/pipeline
|
f6d22ad07b134be98c139d1de6ca7d8321072ba8
|
5bef04d77fdadc1dc4ec22b9b346f0a062cca1ce
|
refs/heads/master
| 2021-05-15T01:09:50.072378
| 2016-12-29T05:45:48
| 2016-12-29T05:45:48
| 59,692,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
from pipeline import ExperimentExplorer
# load everything
explorer = ExperimentExplorer()
# just load results from my_experiment_a
explorer = ExperimentExplorer('my_experiment_a')
# load results from my_experiment_a and my_experiment_b
explorer = ExperimentExplorer(['my_experiment_a', 'my_experiment_b'])
# compute new metric for every model
explorer.apply(lambda m: m.compute_new_metric)
# store this new metric for every model affected
explorer.save()
# after plotting, analyzing results, I want to get the
# trained model
model = explorer.get('some_id')
metric = model.compute_metric()
print 'metric is {}'.format(metric)
# the problem is: should I pickle models? I should NOT pickle everything
# buf it logger is smart enoigh I may be able to just pickle the top models
# another option is to just re-train the model...
# independent of the options the API should be transparent for the user
# since he does not need to know and just be able to recover the object
# - problem with re-training: I need the data. Assuming the data is still the
# same I can do that, but if the numbers have changed and the columns
# are named the same I'm gonna have a baaad time
|
[
"edu.blancas@gmail.com"
] |
edu.blancas@gmail.com
|
2b74c60edf0de797825dd1369704f2e7b7152f70
|
12e93d6308a5b90e5ef3d23bf85dcc5e05a5f160
|
/Automatic summarization/keyword2_summary.py
|
6075d37ef52dc94fcbd20ab766e6a52ca58f1733
|
[] |
no_license
|
787264137/ks
|
b7795df1e6d85a0bc68318c112cab151032a2ed2
|
0a8613fdc220eeaa8c1bca2bf029b33971acceb9
|
refs/heads/master
| 2020-03-25T02:05:09.710199
| 2018-08-09T09:24:49
| 2018-08-09T09:24:49
| 143,273,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
import jieba
from heapq import nlargest
from collections import defaultdict
import re
from jieba.analyse import textrank
def is_chinese(uchar):
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
else:
return False
def get_sentences(doc):
line_break = re.compile('[\r\n]')
delimiter = re.compile('[。?!]')
sentences = []
for line in line_break.split(doc):
line = line.strip()
if not line:
continue
for sent in delimiter.split(line):
sent = sent.strip()
if not sent:
continue
sentences.append(sent)
return sentences
def get_ch_stopwords(filepath):
with open(filepath, 'r', encoding='utf-8') as f:
chinese_stopwords = f.read().split()
return chinese_stopwords
def summarize(text, n):
freq = dict(textrank(text, topK=15, withWeight=True))
print(freq)
sents = get_sentences(text)
assert n <= len(sents)
word_sent = [jieba.lcut(s) for s in sents]
ranking = defaultdict(int)
for i, word in enumerate(word_sent):
for w in word:
if w in freq:
ranking[i] += freq[w]
sents_idx = rank(ranking, n)
return [sents[j] for j in sents_idx]
def rank(ranking, n):
return nlargest(n, ranking, key=ranking.get)
if __name__ == '__main__':
with open("data/news3.txt", "r", encoding='utf-8') as myFile:
text = myFile.read().replace('\n', '')
stopwords = get_ch_stopwords('data/chinese_stopwords')
res = summarize(text, 2)
f = open("data/keyword2_summary3.txt", "w", encoding='utf-8')
print('Extracted key sentences:\n')
for i in range(len(res)):
print(res[i])
f.write(res[i] + '\n')
f.close()
|
[
"787264137@qq.com"
] |
787264137@qq.com
|
b53210f45388c5820faf0c133ad6ef73039b955b
|
9a034b12c845d01f36aff2e5fdbf8486a9e8a642
|
/faketrudy/trudy_api/migrations/0005_child_tweets.py
|
58e5ce1a352b4425e107065b667d213e62e02fbe
|
[] |
no_license
|
piyush6191996/Django-Rest-Framework
|
2d1cd89de700e7aa68f93f9104418c05c70e800a
|
3950a72bed52fd4bcbec3de439fe9f1130df10f9
|
refs/heads/master
| 2020-03-15T06:00:31.362680
| 2018-05-07T19:09:17
| 2018-05-07T19:09:17
| 131,998,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
# Generated by Django 2.0.2 on 2018-04-10 08:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('trudy_api', '0004_auto_20180410_1229'),
]
operations = [
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('twitter_token', models.CharField(blank=True, max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tweets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tweets', models.TextField()),
('sentiment', models.CharField(max_length=255)),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trudy_api.Child')),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
ce4cba3c23046221904b82fbb36ea24824d9a0fa
|
ddd7553bb79d76c461af7ba51a538803f1faf987
|
/google-cloud-media_translation/synth.py
|
da47dee2a67bc54b775fbeca1bbaa7c54879f51c
|
[
"Apache-2.0"
] |
permissive
|
Mukesh23singh/google-cloud-ruby
|
af160add68337f84f9aa65c2eacc2cd16fea359f
|
b3c69da0654de3a4b87a35c1f010d74c7e26415b
|
refs/heads/master
| 2021-11-05T01:20:53.088104
| 2021-10-21T22:03:22
| 2021-10-21T22:03:22
| 86,056,707
| 1
| 0
|
Apache-2.0
| 2021-10-22T01:41:21
| 2017-03-24T10:34:10
|
Ruby
|
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICMicrogenerator()
library = gapic.ruby_library(
"mediatranslation", "v1beta1",
generator_args={
"ruby-cloud-gem-name": "google-cloud-media_translation",
"ruby-cloud-title": "Media Translation",
"ruby-cloud-description": "Media Translation API delivers real-time speech translation to your content and applications directly from your audio data. Leveraging Google’s machine learning technologies, the API offers enhanced accuracy and simplified integration while equipping you with a comprehensive set of features to further refine your translation results. Improve user experience with low-latency streaming translation and scale quickly with straightforward internationalization.",
"ruby-cloud-env-prefix": "MEDIA_TRANSLATION",
"ruby-cloud-wrapper-of": "v1beta1:0.0",
"ruby-cloud-product-url": "https://cloud.google.com/media-translation/",
"ruby-cloud-api-id": "mediatranslation.googleapis.com",
"ruby-cloud-api-shortname": "mediatranslation",
}
)
s.copy(library, merge=ruby.global_merge)
|
[
"noreply@github.com"
] |
Mukesh23singh.noreply@github.com
|
dfd0bf57dfb3c58a777557d9cfebea1f99a530e9
|
03d416a5c425d044db2b308f2af8f91be46bf78e
|
/src/tests/pipelines/data_science/test_pipeline.py
|
0c92effde1409f4880a043de0fa78c4fd4737676
|
[] |
no_license
|
stroblme/partiqleDTR
|
13a854c9a3577535f3e8311b1b87a539c64b3994
|
d87e5652085bcb1848f30aadde848fd530e984c2
|
refs/heads/main
| 2023-05-12T10:24:36.614872
| 2023-03-24T09:23:21
| 2023-03-24T09:23:21
| 468,662,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
"""
This is a boilerplate test file for pipeline 'data_science'
generated using Kedro 0.17.7.
Please add your pipeline tests here.
Kedro recommends using `pytest` framework, more info about it can be found
in the official documentation:
https://docs.pytest.org/en/latest/getting-started.html
"""
|
[
"melvin.strobl@kit.edu"
] |
melvin.strobl@kit.edu
|
819b55755f8fcd9e90785574f832d92631a04d84
|
3cc431d1ac836bfa0363d51a1c1198dec3e82b5c
|
/jnt/matching/classifier.py
|
8561aa10c9426594cea9767d82aa71e3b5746435
|
[
"Apache-2.0"
] |
permissive
|
tudarmstadt-lt/vec2synset
|
6a1a19e39210b96799fee8f36b859c65c6d0f967
|
ea99ea80d258127a0cd6c7688d6b277eed22104d
|
refs/heads/master
| 2021-01-18T02:00:51.004042
| 2016-05-26T22:05:00
| 2016-05-26T22:05:00
| 44,816,882
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,174
|
py
|
import xml.etree.ElementTree as et
from jnt.common import load_voc
from jnt.wn import sense2offset
import codecs
from pandas import read_csv, merge, Series
import argparse
from os.path import splitext
from os.path import join
from jnt.common import exists
from subprocess import Popen, PIPE
import os
from os.path import splitext
from jnt.morph import get_stoplist
from jnt.patterns import re_number
ADAGRAM_VOC = "/Users/alex/tmp/adagram/HugeModel-voc.csv"
DEFAULT_MAPPING = "/Users/alex/work/joint/src/data/best-matching-out.csv"
DYLD_LIBRARY = "/Users/alex/tmp/adagram/AdaGram.jl/lib/"
ADAGRAM_SCRIPTS_DIR = "/Users/alex/work/joint/src/jnt/adagram/"
_adagram_voc = load_voc(ADAGRAM_VOC, silent=True)
_stoplist = get_stoplist()
def filter_voc(text):
text_adagram = [w.lower() for w in text.split(" ") if w in _adagram_voc]
return " ".join(text_adagram)
TARGET_BEG = "((("
TARGET_END = ")))"
def filter_context(context, target, remove_target, context_size):
context = [w for w in context.split(" ") if w.strip() != "" and w not in _stoplist and not re_number.match(w)]
if remove_target:
context = [w for w in context if w != target]
context = list(set(context))
context = ' '.join(context[-context_size:])
return context
def get_context(context, remove_target, context_size):
x = context.split(TARGET_BEG)
if len(x) == 2:
left = x[0]
y = x[1].split(TARGET_END)
if len(y) == 2:
target = y[0].strip()
right = y[1]
left = filter_context(left, target, remove_target, context_size)
right = filter_context(right, target, remove_target, context_size)
res = left + " " + right
return res
else:
return context
else:
return context
def semeval_xml2csv(train_fpath, output_fpath, remove_target=True, context_size=100):
tree = et.parse(train_fpath)
root = tree.getroot()
with codecs.open(output_fpath, "w", "utf-8") as out:
for child in root:
if child.tag == "lexelt":
if child.attrib["pos"] != "n": continue
word = child.attrib["item"][:-2]
for gchild in child:
if gchild.tag != "instance": continue
context = {"word": word}
for ggchild in gchild:
if ggchild.tag == "context":
context["context"] = filter_voc(get_context(ggchild.text, remove_target, context_size))
elif ggchild.tag == "answer":
context["wn_ids"] = sense2offset(word, ggchild.attrib["wn"]).strip()
if len(context["wn_ids"]) == 0: continue
out.write("%(word)s\t%(wn_ids)s\t%(context)s\n" % context)
print "Output:", output_fpath
def evaluate_disambiguated(mapping_fpath, disambiguated_fpath, output_fpath):
# Merge predictions and golden standard data
mapping_df = read_csv(mapping_fpath, encoding='utf-8', delimiter="\t", error_bad_lines=False)
disambiguated_df = read_csv(disambiguated_fpath, encoding='utf-8', delimiter="\t", error_bad_lines=False)
res_df = merge(disambiguated_df, mapping_df, how='inner', on=["word","adagram_id"])
# Calculate performance metrics
res_df = res_df.fillna("")
res_df["gold_wn_match"] = Series("", res_df.index)
res_df["gold_bn_match"] = Series("", res_df.index)
for i, row in res_df.iterrows():
golden_ids = row.golden_id.split(",")
res_df.loc[i, "gold_wn_match"] = row.wordnet_id in golden_ids
res_df.loc[i, "gold_bn_match"] = row.babelnet_id in golden_ids
print "# input texts:", len(disambiguated_df)
print "# babelnet mappings: %d, %.2f%%" % ((i+1), 100*(float(i+1)/ len(disambiguated_df)))
print "Accuracy (wordnet all babelnet): %.3f" % (float(sum(res_df.gold_wn_match)) / (i+1))
print "# wordnet mappings: %d, %.2f%%" % (sum(res_df.wordnet_id != ""), 100.* sum(res_df.wordnet_id != "") / len(disambiguated_df))
print "Accuracy (wordnet): %.3f, %d" % (float(sum(res_df.gold_wn_match))/sum(res_df.wordnet_id != ""), sum(res_df.gold_wn_match))
print "Accuracy (babelnet): %.3f, %d" % (float(sum(res_df.gold_bn_match))/sum(res_df.babelnet_id != ""), sum(res_df.gold_bn_match))
print sum(res_df.golden_id == res_df.babelnet_id), len(res_df)
# Save results
res_df.to_csv(output_fpath, sep="\t", encoding="utf-8", float_format='%.3f', index=False)
print "Output:", output_fpath
return res_df
def groupby_evaluation(res_df, output_fpath):
with codecs.open(output_fpath, "w", "utf-8") as out:
out.write("word\tgolden_id\tadagram_id\tcontext\tadagram_prob\tbabelnet_id\twordnet_id\tbabelnet_match\twordnet_match\n")
babelnet_match_num = 0.
wordnet_match_num = 0.
text_num = 0.
for key, rows in res_df.groupby(["word","golden_id","adagram_id","context","adagram_prob"]):
text_num += 1
babelnet_ids = set()
wordnet_ids = set()
for i, row in rows.iterrows():
if row.babelnet_id != "": babelnet_ids.add(row.babelnet_id)
if row.wordnet_id != "": wordnet_ids.add(row.wordnet_id)
golden_ids = set(key[1].split(","))
babelnet_match = int(len(golden_ids.intersection(babelnet_ids)) > 0)
if babelnet_match: babelnet_match_num += 1
wordnet_match = int(len(golden_ids.intersection(wordnet_ids)) > 0)
if wordnet_match: wordnet_match_num += 1
if len(wordnet_ids) == 0:
continue
out.write("%s\t%s\t%s\t%s\t%.3f\t%s\t%s\t%d\t%d\n" % (key[0], ",".join(golden_ids), key[2], key[3], key[4],
",".join(babelnet_ids), ",".join(wordnet_ids), babelnet_match, wordnet_match))
print "Accuracy (babelnet): %.2f" % (babelnet_match_num/text_num)
print "Accuracy (wordnet): %.2f" % (wordnet_match_num/text_num)
print "Output:", output_fpath
def adagram_disambiguate(contexts_fpath, model_fpath, output_fpath, nearest_neighbors="false"):
env = dict(os.environ)
env["DYLD_LIBRARY_PATH"] = DYLD_LIBRARY
p = Popen(["julia",
join(ADAGRAM_SCRIPTS_DIR, "matching.jl"),
contexts_fpath,
model_fpath,
output_fpath,
nearest_neighbors],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env)
stdout, err = p.communicate(b"")
rc = p.returncode
print stdout
print err
print "Output:", output_fpath
print "Output exits:", exists(output_fpath)
def classify(contexts_fpath, model_fpath, mapping_fpath, output_fpath=""):
""" Performs WSD of the contexts provided in the format 'word<TAB>sense_id<TAB>context' """
base_name = splitext(contexts_fpath)[0] if output_fpath == "" else output_fpath
ag_fpath = base_name + "-ag.csv"
adagram_disambiguate(contexts_fpath, model_fpath, ag_fpath)
print "Disambiguated:", ag_fpath
ag_wn_bn_fpath = base_name + "-ag-bn-wn.csv"
res_df = evaluate_disambiguated(mapping_fpath, ag_fpath, ag_wn_bn_fpath)
print "Disambiguated with mappings:", ag_wn_bn_fpath
ag_wn_bn_group_fpath = base_name + "-ag-bn-wn-group.csv"
groupby_evaluation(res_df, ag_wn_bn_group_fpath)
print "Disambiguated with mapping, grouped:", ag_wn_bn_group_fpath
return ag_wn_bn_group_fpath
def main():
parser = argparse.ArgumentParser(description='Perform disambiguation with BabelNet/WordNet sense labels.')
parser.add_argument('input', help='Path to a file with input file "word<TAB>golden-sense-ids<TAB>context".')
parser.add_argument('-o', help='Output file. Default -- next to input file.', default="")
args = parser.parse_args()
output_fpath = splitext(args.input)[0] + "-disambiguated.csv" if args.o == "" else args.o
print "Input: ", args.input
print "Output: ", output_fpath
print "Mapping:", DEFAULT_MAPPING
classify(args.input, DEFAULT_MAPPING, output_fpath)
if __name__ == '__main__':
main()
|
[
"panchenko.alexander@gmail.com"
] |
panchenko.alexander@gmail.com
|
16be4290b3fdfd3f6eb08c72a466b2c6d6190bc4
|
81d24bba8ebf75cbc2d3ec0739785f7a878f9943
|
/pythonServer/Env/bin/python-config
|
d90237bfd7a840a1990ee24b2bd6183aa1491ada
|
[] |
no_license
|
jrunzer26/DS-Project
|
7524c5b42440d89a705fbe9f312845c1e97bf01d
|
11b9ca96bd8ca42e1f203a73299006555adb1909
|
refs/heads/master
| 2021-05-01T18:49:22.418868
| 2016-12-02T17:04:24
| 2016-12-02T17:04:24
| 71,805,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
#!/home/jason/git-projects/todoist-python/Env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"jason.runzer@uoit.net"
] |
jason.runzer@uoit.net
|
|
a5fe3ba649ac6b4571630741be2938955695f3ce
|
d9a1364eb329a13baa46256474d50beeb8213803
|
/os_moudle/os_moudle.py
|
116b11f667167e070c90fcc62c40f43669beea90
|
[] |
no_license
|
fengchenzi/python_script
|
c650adb65583f9daae6fb706b6d7937bc806aa49
|
dad02682df0f5d5622958336b1e02441ef579d5d
|
refs/heads/master
| 2021-08-24T07:25:43.952158
| 2017-12-08T16:30:46
| 2017-12-08T16:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,235
|
py
|
#--**coding:utf-8**--
import os
#获取当前工作目录
path = os.getcwd()
#获取目录下所有文件名
all_text = os.listdir(path)
#检验某路径指向的是否是个文件,可以用于检验某路径是下否存在该文件
file_name = "2.txt"
split_sign = "/"
last_path = path + split_sign + file_name
another_name = "4.txt"
def write_file(filepath,filename,gradle_content):
'''
:param filepath: 文件路径
判断路径下是否存在该文件,若存在,则写入信息
若不存在,则新建文件,再写入信息
'''
if os.path.isfile(filepath):
file = open(filename,"w")
file.write(gradle_content)
file.close()
else:
file = open(filename, "w")
file.write(gradle_content)
if len(file.write(gradle_content)) > 0:
print ""
file.close()
else:
print ""
file.close()
file.close()
def remove_file(filepath,filename):
'''
:param filepath:路径
:param filename:文件名
判断路径下是否存在该文件,若存在,则删除文件
'''
if os.path.isfile(filepath):
os.remove(filepath)
print "成功删除文件"
else:
print "该路径为空"
def is_dir(filepath):
'''
判断该路径是否是一个目录
'''
dir_path = os.path.isdir(filepath)
if dir_path:
print "该路径是目录"
else:
print "该目录是不是目录"
os.listdir(filepath)
def return_dir_and_filename(path):
'''
调用该方法,返回一个路径下对文件夹及所有文件名
'''
dir_name = os.path.split(path)
return dir_name
def excute_shell(path):
dir_name = os.path.split(path)
dir_name = dir_name[1]
dd = os.system()
print dir_name
def printdd():
JDB = {'name': 'jDB', 'path': 'PATH_CODE'+'/jDB', 'srcdir': 'src:adp:toolkit'}
PROJECT_MAIN = [JDB]
PATH_MANIFEST = PROJECT_MAIN[0]['path'] + '/src/main/AndroidManifest.xml'
if __name__ == '__main__':
#write_file(last_path,"2.txt","gradle2.14.1-all")
#remove_file(last_path,file_name)
#is_dir(last_path)
dir_name =return_dir_and_filename(path)
excute_shell(path)
|
[
"hemq@jiedaibao.com"
] |
hemq@jiedaibao.com
|
1366aa1de8129754f664f4f8f93049b9743d4ae2
|
0242ad0d80fb96664efef2a60d22a7792f0a31b6
|
/better_game_images/make_labels.py
|
0752dd72fbfa036023a2635ca8a07ececa7d2824
|
[] |
no_license
|
jlwatson/game-SET-match
|
fbc7e6b957bdb3ef8afaa55effb36d522c084bb4
|
61520495359d56a0a227cf456bdc2fc22f104856
|
refs/heads/master
| 2021-03-27T20:15:24.484540
| 2017-06-10T06:11:56
| 2017-06-10T06:11:56
| 90,781,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from os import walk
for (dirpath, dirnames, filenames) in walk('./'):
for filename in filenames:
if filename.endswith(".JPG"):
first_part = filename.split('.')[0]
f= open(first_part + '_labels.txt',"w+")
|
[
"kmblake@stanford.edu"
] |
kmblake@stanford.edu
|
219448833a4b9ad9f26eaf0f891f257abf72202a
|
8360669dfe430c74a1f3c60f1e4bc9e8c41837bc
|
/arduino_project/api/models.py
|
198d56dd91044290c5abe126af5fbf9636442009
|
[] |
no_license
|
BaptistG/object_connect
|
9ae47e42173cf866019cd4b3bf3d430ca2ad8069
|
f633c7caa39e93dab0ea9c3747de1e051e458bb9
|
refs/heads/master
| 2021-02-03T21:28:53.624573
| 2020-03-02T14:22:28
| 2020-03-02T14:22:28
| 243,544,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
from django.db import models
# Create your models here.
class Alerts(models.Model):
id = models.AutoField(primary_key=True)
user_id = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'id: {}, user: {}'.format(self.id, self.user_id)
class Users(models.Model):
id = models.AutoField(primary_key=True)
username = models.TextField()
address = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'id: {}, username: {}'.format(self.id, self.username)
|
[
"baptist.guerin@gmail.com"
] |
baptist.guerin@gmail.com
|
c7eb9200f3645abd0e6d3d2dc3a84af2b4d742d6
|
937c0d7c0ed0224fed676fe630b78d8c6cdc1cfe
|
/usr/share/dh-python/dhpython/pydist.py
|
7a98f0c242099edaae612380e294c69aee2c3624
|
[] |
no_license
|
Sayardiss/filesystem-rpi-projet2su
|
5ec5aad1704dbe37d18b50ba83ab67a87199af16
|
b7b7a1d93dec4f96673ecf11cd290e1db0657d59
|
refs/heads/master
| 2022-11-25T14:20:35.867296
| 2018-02-07T13:24:37
| 2018-02-07T13:24:37
| 118,009,115
| 2
| 1
| null | 2022-11-21T04:32:49
| 2018-01-18T16:36:17
|
Python
|
UTF-8
|
Python
| false
| false
| 13,315
|
py
|
# Copyright © 2010-2013 Piotr Ożarowski <piotr@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import re
from functools import partial
from os.path import exists, isdir, join
from subprocess import PIPE, Popen
from dhpython import PKG_PREFIX_MAP, PUBLIC_DIR_RE,\
PYDIST_DIRS, PYDIST_OVERRIDES_FNAMES, PYDIST_DPKG_SEARCH_TPLS
from dhpython.version import get_requested_versions, Version
from dhpython.tools import memoize
log = logging.getLogger('dhpython')
PYDIST_RE = re.compile(r"""
(?P<name>[A-Za-z][A-Za-z0-9_.\-]*) # Python distribution name
\s*
(?P<vrange>(?:-?\d\.\d+(?:-(?:\d\.\d+)?)?)?) # version range
\s*
(?P<dependency>(?:[a-z][^;]*)?) # Debian dependency
(?: # optional upstream version -> Debian version translator
;\s*
(?P<standard>PEP386)? # PEP-386 mode
\s*
(?P<rules>(?:s|tr|y).*)? # translator rules
)?
""", re.VERBOSE)
REQUIRES_RE = re.compile(r'''
(?P<name>[A-Za-z][A-Za-z0-9_.]*) # Python distribution name
\s*
(?P<enabled_extras>(?:\[[^\]]*\])?) # ignored for now
\s*
\(? # optional parenthesis
(?: # optional minimum/maximum version
(?P<operator><=?|>=?|==|!=)
\s*
(?P<version>(\w|[-.])+)
(?: # optional interval minimum/maximum version
\s*
,
\s*
(?P<operator2><=?|>=?|==|!=)
\s*
(?P<version2>(\w|[-.])+)
)?
)?
\)? # optional closing parenthesis
''', re.VERBOSE)
DEB_VERS_OPS = {
'==': '=',
'<': '<<',
'>': '>>',
}
def validate(fpath):
"""Check if pydist file looks good."""
with open(fpath, encoding='utf-8') as fp:
for line in fp:
line = line.strip('\r\n')
if line.startswith('#') or not line:
continue
if not PYDIST_RE.match(line):
log.error('invalid pydist data in file %s: %s',
fpath.rsplit('/', 1)[-1], line)
return False
return True
@memoize
def load(impl):
"""Load iformation about installed Python distributions.
:param impl: interpreter implementation, f.e. cpython2, cpython3, pypy
:type impl: str
"""
fname = PYDIST_OVERRIDES_FNAMES.get(impl)
if exists(fname):
to_check = [fname] # first one!
else:
to_check = []
dname = PYDIST_DIRS.get(impl)
if isdir(dname):
to_check.extend(join(dname, i) for i in os.listdir(dname))
fbname = '/usr/share/dh-python/dist/{}_fallback'.format(impl)
if exists(fbname): # fall back generated at dh-python build time
to_check.append(fbname) # last one!
result = {}
for fpath in to_check:
with open(fpath, encoding='utf-8') as fp:
for line in fp:
line = line.strip('\r\n')
if line.startswith('#') or not line:
continue
dist = PYDIST_RE.search(line)
if not dist:
raise Exception('invalid pydist line: %s (in %s)' % (line, fpath))
dist = dist.groupdict()
name = safe_name(dist['name'])
dist['versions'] = get_requested_versions(impl, dist['vrange'])
dist['dependency'] = dist['dependency'].strip()
if dist['rules']:
dist['rules'] = dist['rules'].split(';')
else:
dist['rules'] = []
result.setdefault(name, []).append(dist)
return result
def guess_dependency(impl, req, version=None, bdep=None,
accept_upstream_versions=False):
bdep = bdep or {}
log.debug('trying to find dependency for %s (python=%s)',
req, version)
if isinstance(version, str):
version = Version(version)
# some upstreams have weird ideas for distribution name...
name, rest = re.compile('([^!><= \(\)\[]+)(.*)').match(req).groups()
# TODO: check stdlib and dist-packaged for name.py and name.so files
req = safe_name(name) + rest
data = load(impl)
req_d = REQUIRES_RE.match(req)
if not req_d:
log.info('please ask dh_python3 author to fix REQUIRES_RE '
'or your upstream author to fix requires.txt')
raise Exception('requirement is not valid: %s' % req)
req_d = req_d.groupdict()
name = req_d['name']
details = data.get(name.lower())
if details:
for item in details:
if version and version not in item.get('versions', version):
# rule doesn't match version, try next one
continue
if not item['dependency']:
return # this requirement should be ignored
if item['dependency'].endswith(')'):
# no need to translate versions if version is hardcoded in
# Debian dependency
return item['dependency']
if req_d['version'] and (item['standard'] or item['rules']) and\
req_d['operator'] not in (None, '!='):
o = _translate_op(req_d['operator'])
v = _translate(req_d['version'], item['rules'], item['standard'])
d = "%s (%s %s)" % (item['dependency'], o, v)
if req_d['version2'] and req_d['operator2'] not in (None,'!='):
o2 = _translate_op(req_d['operator2'])
v2 = _translate(req_d['version2'], item['rules'], item['standard'])
d += ", %s (%s %s)" % (item['dependency'], o2, v2)
return d
elif accept_upstream_versions and req_d['version'] and \
req_d['operator'] not in (None,'!='):
o = _translate_op(req_d['operator'])
d = "%s (%s %s)" % (item['dependency'], o, req_d['version'])
if req_d['version2'] and req_d['operator2'] not in (None,'!='):
o2 = _translate_op(req_d['operator2'])
d += ", %s (%s %s)" % (item['dependency'], o2, req_d['version2'])
return d
else:
if item['dependency'] in bdep:
if None in bdep[item['dependency']] and bdep[item['dependency']][None]:
return "{} ({})".format(item['dependency'], bdep[item['dependency']][None])
# if arch in bdep[item['dependency']]:
# TODO: handle architecture specific dependencies from build depends
# (current architecture is needed here)
return item['dependency']
# search for Egg metadata file or directory (using dpkg -S)
query = PYDIST_DPKG_SEARCH_TPLS[impl].format(ci_regexp(safe_name(name)))
log.debug("invoking dpkg -S %s", query)
process = Popen("/usr/bin/dpkg -S %s" % query,
shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
result = set()
stdout = str(stdout, 'utf-8')
for line in stdout.split('\n'):
if not line.strip():
continue
result.add(line.split(':')[0])
if len(result) > 1:
log.error('more than one package name found for %s dist', name)
else:
return result.pop()
else:
log.debug('dpkg -S did not find package for %s: %s', name, stderr)
pname = sensible_pname(impl, name)
log.info('Cannot find package that provides %s. '
'Please add package that provides it to Build-Depends or '
'add "%s %s" line to %s or add proper '
' dependency to Depends by hand and ignore this info.',
name, safe_name(name), pname, PYDIST_OVERRIDES_FNAMES[impl])
# return pname
def parse_pydep(impl, fname, bdep=None, options=None,
depends_sec=None, recommends_sec=None, suggests_sec=None):
depends_sec = depends_sec or []
recommends_sec = recommends_sec or []
suggests_sec = suggests_sec or []
public_dir = PUBLIC_DIR_RE[impl].match(fname)
ver = None
if public_dir and public_dir.groups() and len(public_dir.group(1)) != 1:
ver = public_dir.group(1)
guess_deps = partial(guess_dependency, impl=impl, version=ver, bdep=bdep,
accept_upstream_versions=getattr(
options, 'accept_upstream_versions', False))
result = {'depends': [], 'recommends': [], 'suggests': []}
modified = section = False
processed = []
with open(fname, 'r', encoding='utf-8') as fp:
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
processed.append(line)
continue
if line.startswith('['):
section = line[1:-1].strip()
processed.append(line)
continue
if section:
if section in depends_sec:
result_key = 'depends'
elif section in recommends_sec:
result_key = 'recommends'
elif section in suggests_sec:
result_key = 'suggests'
else:
processed.append(line)
continue
else:
result_key = 'depends'
dependency = guess_deps(req=line)
if dependency:
result[result_key].append(dependency)
modified = True
else:
processed.append(line)
if modified and public_dir:
with open(fname, 'w', encoding='utf-8') as fp:
fp.writelines(i + '\n' for i in processed)
return result
def safe_name(name):
"""Emulate distribute's safe_name."""
return re.compile('[^A-Za-z0-9.]+').sub('_', name).lower()
def sensible_pname(impl, egg_name):
"""Guess Debian package name from Egg name."""
egg_name = safe_name(egg_name).replace('_', '-')
if egg_name.startswith('python-'):
egg_name = egg_name[7:]
return '{}-{}'.format(PKG_PREFIX_MAP[impl], egg_name.lower())
def ci_regexp(name):
"""Return case insensitive dpkg -S regexp."""
return ''.join("[%s%s]" % (i.upper(), i) if i.isalpha() else i for i in name.lower())
PRE_VER_RE = re.compile(r'[-.]?(alpha|beta|rc|dev|a|b|c)')
GROUP_RE = re.compile(r'\$(\d+)')
def _pl2py(pattern):
"""Convert Perl RE patterns used in uscan to Python's
>>> print(_pl2py('foo$3'))
foo\g<3>
"""
return GROUP_RE.sub(r'\\g<\1>', pattern)
def _translate(version, rules, standard):
"""Translate Python version into Debian one.
>>> _translate('1.C2betac', ['s/c//gi'], None)
'1.2beta'
>>> _translate('5-fooa1.2beta3-fooD',
... ['s/^/1:/', 's/-foo//g', 's:([A-Z]):+$1:'], 'PEP386')
'1:5~a1.2~beta3+D'
>>> _translate('x.y.x.z', ['tr/xy/ab/', 'y,z,Z,'], None)
'a.b.a.Z'
"""
for rule in rules:
# uscan supports s, tr and y operations
if rule.startswith(('tr', 'y')):
# Note: no support for escaped separator in the pattern
pos = 1 if rule.startswith('y') else 2
tmp = rule[pos + 1:].split(rule[pos])
version = version.translate(str.maketrans(tmp[0], tmp[1]))
elif rule.startswith('s'):
# uscan supports: g, u and x flags
tmp = rule[2:].split(rule[1])
pattern = re.compile(tmp[0])
count = 1
if tmp[2:]:
flags = tmp[2]
if 'g' in flags:
count = 0
if 'i' in flags:
pattern = re.compile(tmp[0], re.I)
version = pattern.sub(_pl2py(tmp[1]), version, count)
else:
log.warn('unknown rule ignored: %s', rule)
if standard == 'PEP386':
version = PRE_VER_RE.sub(r'~\g<1>', version)
return version
def _translate_op(operator):
"""Translate Python version operator into Debian one.
>>> _translate_op('==')
'='
>>> _translate_op('<')
'<<'
>>> _translate_op('<=')
'<='
"""
return DEB_VERS_OPS.get(operator, operator)
|
[
"sayardiss@gmail.com"
] |
sayardiss@gmail.com
|
4237fae63bb4894a098e704c02963bb2bbb19c3b
|
535e7dee7621ea71aeb2aae6c564b1ab6e090445
|
/fin project/dist/Student DB.app/Contents/Resources/gui_search.py
|
4e5acd3dbe7ab00b84840f519810c14f250ecf23
|
[] |
no_license
|
tunguyen17/CSC271Final
|
29eca736a996ac6739ee79a3386047711fa4503b
|
32bb77177177d701e1e5ae10325ba366b94bc666
|
refs/heads/master
| 2021-01-11T04:18:08.802878
| 2017-03-15T04:45:28
| 2017-03-15T04:45:28
| 71,211,090
| 0
| 2
| null | 2016-12-09T05:46:58
| 2016-10-18T05:09:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,600
|
py
|
import Tkinter as tk
import Database as DB
import Widgets as wd
import gui_list as gl
import NewStudent as ns
import EditTopics as et
import Export_CSV as EC
import tkFileDialog
class gui_search:
'App for creating searching window'
################# CONSTRUCTOR #################
def __init__(self, db):
'''
Initialize a gui for the insertion of students infomation'
INPUT: db - the databse
'''
#create a root container
self.root = tk.Tk()
self.root.title("CSC-271 Database Concept App")
#Labels: to the left of the window
search_label = wd.LabelWidget(self.root, 0, 0, "Search Past Records or Add New")
search_label.grid(columnspan=4)
search_label.config(width=30)
#Entries: to the right of the window
name_label = wd.LabelWidget(self.root, 0, 1, "Name")
name_bar = wd.EntryWidget(self.root, 1, 1, "")
name_bar.grid(columnspan=3)
name_bar.config(width=20)
#Topic
topic_label = wd.LabelWidget(self.root, 0, 2, "Topic")
OPTIONS = [i[0] for i in db.getTopics()]
topic_bar = wd.OptionsWidget(self.root, OPTIONS ,1, 2)
topic_bar.grid(columnspan=3)
topic_bar.config(width=20)
#Date
date_label = wd.LabelWidget(self.root, 0, 3, "Date (YMD)")
mm_bar = wd.EntryWidget(self.root, 2, 3, "")
dd_bar = wd.EntryWidget(self.root, 3, 3, "")
yy_bar = wd.EntryWidget(self.root, 1, 3, "")
# dd_bar.grid(columnspan=1)
# mm_bar.grid(columnspan=1)
# yy_bard.grid(columnspan=1)
mm_bar.config(width=4)
dd_bar.config(width=4)
yy_bar.config(width=7)
show_var = tk.StringVar()
show_checkbox = tk.Checkbutton(self.root, variable=show_var, \
onvalue="yes", offvalue = "no", text="No show")
show_checkbox.deselect() #set the check button to offvalue
show_checkbox.grid(column = 2, row=4)
show_checkbox.grid(columnspan=2)
# no_show_label = wd.LabelWidget(self.root, 0, 4, "No show")
# no_show_label.grid(columnspan=3)
show_checkbox.config(state = tk.DISABLED)
showpref_var = tk.StringVar()
def prefchange():
if showpref_var.get() == 'yes':
show_checkbox.config(state = tk.ACTIVE)
else:
show_checkbox.config(state = tk.DISABLED)
#check button for the show preference
showpref_checkbox = tk.Checkbutton(self.root, variable=showpref_var, \
onvalue="yes", offvalue = "no", text="Show preference", command=prefchange)
showpref_checkbox.deselect() #set the check button to offvalue
showpref_checkbox.grid(column = 0, row=4)
showpref_checkbox.grid(columnspan=2)
#Log display to the gui
log = wd.LabelWidget(self.root, 0, 7, "Status")
log.config(width = 30)
#having the log display to span 2 columns
log.grid(columnspan = 4)
## todo: reimplement
def search_fn():
'method to call for the search button'
name_text = name_bar.getVal()
topic_text = topic_bar.getVal()
dd_text = dd_bar.getVal()
mm_text = mm_bar.getVal()
yy_text = yy_bar.getVal()
if showpref_var.get() == 'yes':
noshow_val = show_var.get()
else:
noshow_val = 'maybe'
try:
if (yy_text == '' and (mm_text + dd_text) != '') or \
(mm_text == '' and dd_text != ''):
raise ValueError('not a valid date!')
#interaction with the Database object
gl.GuiList(self.root).draw_table(db, \
db.search_general(name_text, topic_text, dd_text,\
mm_text, yy_text, noshow_val))
#report that the insertion is success
log.set("Success")
except Exception as err:
#If insertion fail, report to the Log display
print 'ERROR!', err
# raise err
log.set(str(err))
def add_fn():
'method to call for the add button'
ns.NewStudent(self.root, db)
def edit_tp_fn():
'method to call for the add button'
et.EditTopics(self.root, db, topic_bar)
def export_csv():
'method to call for the add button'
EC.Export_CSV(self.root, db)
#A Submit button
search_button = tk.Button(self.root, text="Search", command = search_fn)
search_button.grid(column = 0, row=5, columnspan=2)
add_button = tk.Button(self.root, text="Add Student", command = add_fn)
add_button.grid(column = 2, row=5, columnspan=2)
add_button = tk.Button(self.root, text="Edit Topics", command = edit_tp_fn)
add_button.grid(column = 0, row=6, columnspan=2)
add_button = tk.Button(self.root, text="Export/Reset DB", command = export_csv)
add_button.grid(column = 2, row=6, columnspan=2)
self.root.grab_set()
# self.root.lift()
#make the window appears
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
self.root.geometry("290x230+%d+%d" % (screen_width/2-285, screen_height/2-230))
self.root.lift ()
self.root.mainloop()
if __name__ == "__main__":
#connecting with the database
db = DB.Database('database/cup.db')
new = gui_search(db)
|
[
"tanguyen17@wabash.edu"
] |
tanguyen17@wabash.edu
|
45d362a2454044a2fdcb122385e78d244e3206c1
|
a9d4dcc11c909fbf57432c8236c9ae293c38e3c7
|
/search/binary_search_rotation_point.py
|
1a970e1452286449a1dc2a205a132defec60788f
|
[] |
no_license
|
shuaib88/algorithms_review
|
8a375c81c1f3e3813da7429e4b26bdb4b44b13c8
|
fb8274fdfa6894c5be02b1233169b3a7c01b7fa9
|
refs/heads/master
| 2021-01-20T03:04:29.448197
| 2017-08-31T01:02:55
| 2017-08-31T01:02:55
| 101,346,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
def rotation_point_of(word_array, left_index=0, right_index=None):
if not right_index:
right_index = len(word_array) - 1
midpoint = (right_index + left_index)/2
if right_index - left_index == 0:
return midpoint
if word_array[midpoint]>word_array[midpoint+1]:
return midpoint
else:
if word_array[midpoint] > word_array[right_index]:
return rotation_point_of(word_array,midpoint,right_index)
else:
return rotation_point_of(word_array,left_index,midpoint)
# test array
# word_array = [
# 'ptolemaic',
# 'retrograde',
# 'supplant',
# 'undulate',
# 'xenoepist',
# 'asymptote', # <-- rotates here!
# 'babka',
# 'banoffee',
# 'engender',
# 'karpatka',
# 'othellolagkage',
# ]
word_array = [
'xenoepist',
'asymptote', # <-- rotates here!
]
#execute
# word_dict = known_word_dict(word_array)
# word_dict.rotation_point_of(word_array)
# print len(word_array)
print rotation_point_of(word_array,right_index=len(word_array)-1)
|
[
"shuaib.sva@gmail.com"
] |
shuaib.sva@gmail.com
|
c1671d32787dc01342e8a26cc279a71bb969e491
|
f6a83325e44b59f74ea5783f4ee605bad877f771
|
/mqc/models/mqc_dialysis.py
|
bbf5b85ae96ee10516437f1c431530e1bd9b380f
|
[] |
no_license
|
js-superion/addons-custom
|
4a4e4fd19790122568dba72ed184f6e327db51c1
|
53c36a586ecbbc37484bda1fe6aac0c4db5ab740
|
refs/heads/master
| 2020-06-14T13:58:00.732334
| 2017-11-06T03:44:18
| 2017-11-06T03:44:18
| 75,174,024
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,508
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields,api
class Dialysis(models.Model):
_name = "mqc.dialysis" #dialysis 透析
_description = u"肾病学质控"
_inherits = {'mqc.mqc': 'mqc_id'}
mqc_id = fields.Many2one('mqc.mqc', u'报表id', required=True,
ondelete='cascade')
_rec_name = 'year_month'
year_month = fields.Char(u'年月', default=lambda self: self.env['utils'].get_zero_time().strftime('%Y-%m'))
dept_doc_num = fields.Integer(u'肾内科专科医师人数')
dept_nur_num = fields.Integer(u'专科护士人数')
#肾小球疾病(非CKD5期)
out_case = fields.Integer(u'出院病人数')
avg_charge = fields.Float(u'出院者平均医疗费用')
avg_days = fields.Integer(u'出院者平均住院日')
accord_diag_case = fields.Integer(u'入出院诊断符合数')
kidney_exam_case = fields.Integer(u'肾活检患者数')
exam_complications = fields.Integer(u'肾活检术后并发症例数')
pressure_control_case = fields.Integer(u'目标血压控制例数')
iga_rate = fields.Float(u'初治IgA肾病患者进入肾活检临床路径百分率(%)')
ln_rate = fields.Float(u'初治狼疮性肾炎进入肾活检临床路径百分率(%)') #狼疮性肾炎lupus nephritis
#急性肾衰竭
out_case1 = fields.Integer(u'出院病人数')
cured_case = fields.Integer(u'治愈好转例数')
avg_charge1 = fields.Float(u'出院者平均医疗费用')
avg_days1 = fields.Float(u'出院者平均住院日')
kidney_exam_case1 = fields.Integer(u'肾活检患者数')
exam_complications1 = fields.Integer(u'肾活检术后并发症例数')
finish_cp_case = fields.Integer(u'开展完成临床路径例数')#cp clinic pathway
acpt_dialysis_case = fields.Float(u'接受血液净化治疗患者百分率(%)')
#慢性肾衰竭CKD5期
out_case2 = fields.Integer(u'出院病人数')
avg_charge2 = fields.Float(u'出院者平均医疗费用')
avg_days2 = fields.Float(u'出院者平均住院日')
acpt_pd_case = fields.Integer(u'接受腹透管置入术患者数')
acpt_iaf_case = fields.Integer(u'接受动静脉内瘘成形术患者数')#Internal arteriovenous fistula
acpt_dvt_case = fields.Integer(u'接受血透长期深静脉导管置入患者数')#cp acpt accept缩写 dvt 深静脉置入
#非住院维持性血液透析 mohc Minister of Health of the People's Republic of China 中国卫生部
hd_num = fields.Integer(u'HD台数', )
hdf_num = fields.Integer(u'HDF台', )
crrt_num = fields.Integer(u'CRRT台', )
dialysis_doc_num= fields.Integer(u'血液净化专职医生总数', )
dialysis_nurse_num= fields.Integer(u'血液净化护士总数', )
dialysis_pat_num = fields.Integer(u'长期血透患者数', )
new_pat_num = fields.Integer(u'新增患者数', )
dead_pat = fields.Integer(u'死亡患者数', )
total_case = fields.Integer(u'血透总例次', )
mohc_newpats= fields.Integer(u'新报患者数', )
mohc_uppats = fields.Integer(u'更新患者数', )
mohc_val_rate = fields.Float(u'填报合格率(%)', ) #validate rate
dialyzer_reuse_rate = fields.Float(u'透析器复用患者百分率(%)', )
week_excess12h_rate = fields.Float(u'血透时间>12h/周患者百分率(%)', )
weight_val_rate = fields.Float(u'千体重达标率(%)', )
weight_excess3kg_rate = fields.Integer(u'透析间期体重增加>3公斤患者数', )
#非住院长期腹膜透析 #pd 缩写 Peritoneal dialysis
create_type = fields.Selection([('1', u'是'), ('0', u'否')],
u'是否开展腹膜透析')
long_pd_case = fields.Integer(u'长期腹膜透析患者数', )
pd_newpats = fields.Integer(u'新增患者数', )
pd_cured_case = fields.Integer(u'退出患者数(不含死亡)', )
pd_death_case = fields.Integer(u'死亡患者数', )
pd_mohc_newpats = fields.Integer(u'新报患者数', )
pd_mohc_uppats = fields.Integer(u'更新患者数', )
pd_mohc_rate = fields.Float(u'填报合格率(%)', )
peritonitis_case = fields.Integer(u'腹透相关腹膜炎发生例数', ) #peritonitis 腹膜炎
@api.multi
def unlink(self):
for dialysis in self:
dialysis.mqc_id.unlink()
return super(Dialysis, self).unlink()
# _sql_constraints = [
# ('year_month_uniq',
# 'UNIQUE (year_month)',
# u'本月只能上报一次数据')
# ]
|
[
"zhixiao.jiang1984@gmail.com"
] |
zhixiao.jiang1984@gmail.com
|
1d6940ef1927469941d56d4883b2b0b4d2c2da54
|
fcf704aaa4ec7827aa826c341c89f7d5fcb9477e
|
/lang/programming/python/深入理解神经网络:从逻辑回归到CNN/neural_network-neural_network_code-master/neural_network_code/画图脚本/6-15.py
|
5f3b4eae99092bd5c51caa217cbf698fd9eb1f23
|
[
"MIT"
] |
permissive
|
dlxj/doc
|
afe470f465617bd239b5b4dc8b546eb82cf115c6
|
b4a9ddcc2820fd0e3c9bbd81c26a8fa35f348c23
|
refs/heads/master
| 2023-08-05T06:20:05.573100
| 2023-08-05T03:48:29
| 2023-08-05T03:48:29
| 203,584,726
| 10
| 0
| null | 2022-12-15T08:14:58
| 2019-08-21T12:58:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 20:34:43 2018
@author: chaos
"""
from book_draw_util import *
leaky_alpha=0.1
def leaky_relu(x):
if x >= 0:
result = x
else:
result = leaky_alpha * x
return result
def leaky_relu_d(x):
return 1 if x >= 0 else leaky_alpha
fun = np.vectorize(leaky_relu)
derivative = np.vectorize(leaky_relu_d)
xrange = [-1, 1.01]
yrange = [-0.1, 1.01]
fig = plt.figure(figsize=SQUARE_FIG_SIZE)
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
ax.axis[:].set_visible(False)
ax.axis["x"] = ax.new_floating_axis(0,0)
ax.axis["x"].set_axisline_style("-|>", size = 1.0)
ax.axis["y"] = ax.new_floating_axis(1,0)
ax.axis["y"].set_axisline_style("-|>", size = 1.0)
ax.axis["x"].set_axis_direction("bottom")
ax.axis["y"].set_axis_direction("right")
x = np.arange(xrange[0], xrange[1], 0.0001)
ax.plot(x, fun(x), "k")
ax.plot(x, derivative(x), "k--")
ax.legend([r"$f\left(x\right)$", r"$f^{'}\left(x\right)$"], fontsize=LEGEND_FONT_SIZE)
ax.grid(True)
ax.set_ylim(xrange)
ax.set_ylim(yrange)
ax.margins(0)
plt.savefig(os.path.join(all_pic_path, '6-15.png'), format='png')
|
[
"123435@qq.com"
] |
123435@qq.com
|
4e89f39175e02454086fc58f75665f0d3696b569
|
422f6d1af9473ead5e4e2d1bae6c07799c7b8350
|
/hw9/reps.py
|
b0923fa950177a20db9f4d51af6449c57c30f6b4
|
[
"MIT"
] |
permissive
|
jenyu7/hw9
|
467c2f9e748ff624c72d61bd317981db6c405a38
|
cadb2ae1b31e3608d9ad1d977fbc697390004884
|
refs/heads/main
| 2023-01-28T19:34:37.926463
| 2020-12-10T01:31:41
| 2020-12-10T01:31:41
| 320,022,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
import numpy as np
from tqdm import tqdm
#Set up Numpy random generator
rg = np.random.default_rng()
def draw_parametric_bs_reps_mle(
mle_fun, gen_fun, data, args=(), size=1, progress_bar=False
):
"""Draw parametric bootstrap replicates of maximum likelihood estimator.
Parameters
----------
mle_fun : function
Function with call signature mle_fun(data, *args) that computes
a MLE for the parameters
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*params, size)`.
data : one-dimemsional Numpy array
Array of measurements
args : tuple, default ()
Arguments to be passed to `mle_fun()`.
size : int, default 1
Number of bootstrap replicates to draw.
progress_bar : bool, default False
Whether or not to display progress bar.
Returns
-------
output : numpy array
Bootstrap replicates of MLEs.
"""
params = mle_fun(data, *args)
if progress_bar:
iterator = tqdm(range(size))
else:
iterator = range(size)
return np.array(
[mle_fun(gen_fun(*params, size=len(data), *args)) for _ in iterator]
)
#Generates samples from the model distribution.
def sp_gamma(beta, alpha, size):
return rg.gamma(alpha, 1/beta, size=size)
|
[
"jenyu@caltech.edu"
] |
jenyu@caltech.edu
|
4242a0c891ffa7bfd2f82e35dca78d0aafa55245
|
0dd17de11c792ff64c4582595cee786b80ab4fcd
|
/ml/pred.py
|
40b00a04eb6aa546ccd9f3c8f52e881f52a613b3
|
[
"Apache-2.0"
] |
permissive
|
birenbaruah/FakeBananas
|
12861272b6911acc447c28d7ba8cc1f010d9a882
|
ac8a95c37f495045f074ae20a768a465bd990da0
|
refs/heads/master
| 2021-03-02T10:58:57.466215
| 2020-03-08T21:17:43
| 2020-03-08T21:17:43
| 245,863,260
| 0
| 0
| null | 2020-03-08T18:05:35
| 2020-03-08T18:05:34
| null |
UTF-8
|
Python
| false
| false
| 4,956
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import relevant packages and modules
from ml.util import *
# ex: from test.add import add
import random
import tensorflow as tf
def mlPred():
# Prompt for mode
mode = input('mode (load / train)? ')
# Set file names
file_train_instances = "ml/train_stances.csv"
file_train_bodies = "ml/train_bodies.csv"
file_test_instances = "ml/test_stances_unlabeled.csv"
file_test_bodies = "ml/test_bodies.csv"
file_predictions = 'ml/predictions_test.csv'
# Initialise hyperparameters
r = random.Random()
lim_unigram = 5000
target_size = 4
hidden_size = 100
train_keep_prob = 0.6
l2_alpha = 0.00001
learn_rate = 0.01
clip_ratio = 5
batch_size_train = 500
epochs = 90
# Load data sets
raw_train = FNCData(file_train_instances, file_train_bodies)
raw_test = FNCData(file_test_instances, file_test_bodies)
n_train = len(raw_train.instances)
# Process data sets
train_set, train_stances, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer = \
pipeline_train(raw_train, raw_test, lim_unigram=lim_unigram)
feature_size = len(train_set[0])
test_set = pipeline_test(raw_test, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer)
# Define model
# Create placeholders
features_pl = tf.placeholder(tf.float32, [None, feature_size], 'features')
stances_pl = tf.placeholder(tf.int64, [None], 'stances')
keep_prob_pl = tf.placeholder(tf.float32)
# Infer batch size
batch_size = tf.shape(features_pl)[0]
# Define multi-layer perceptron
hidden_layer = tf.nn.dropout(tf.nn.relu(tf.contrib.layers.linear(features_pl, hidden_size)), keep_prob=keep_prob_pl)
logits_flat = tf.nn.dropout(tf.contrib.layers.linear(hidden_layer, target_size), keep_prob=keep_prob_pl)
logits = tf.reshape(logits_flat, [batch_size, target_size])
# Define L2 loss
tf_vars = tf.trainable_variables()
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf_vars if 'bias' not in v.name]) * l2_alpha
# Define overall loss
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, stances_pl) + l2_loss)
# Define prediction
softmaxed_logits = tf.nn.softmax(logits)
predict = tf.arg_max(softmaxed_logits, 1)
# Load model
if mode == 'load':
with tf.Session() as sess:
load_model(sess)
print("Model loaded.")
print("Now running predictions...")
# Predict
test_feed_dict = {features_pl: test_set, keep_prob_pl: 1.0}
# run predictions
test_pred = sess.run(predict, feed_dict=test_feed_dict)
print("Test_pred:", test_pred)
print("Preditions complete.")
# Train model
if mode == 'train':
# Define optimiser
opt_func = tf.train.AdamOptimizer(learn_rate)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tf_vars), clip_ratio)
opt_op = opt_func.apply_gradients(zip(grads, tf_vars))
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Perform training
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
total_loss = 0
indices = list(range(n_train))
r.shuffle(indices)
for i in range(n_train // batch_size_train):
batch_indices = indices[i * batch_size_train: (i + 1) * batch_size_train]
batch_features = [train_set[i] for i in batch_indices]
batch_stances = [train_stances[i] for i in batch_indices]
batch_feed_dict = {features_pl: batch_features, stances_pl: batch_stances, keep_prob_pl: train_keep_prob}
_, current_loss = sess.run([opt_op, loss], feed_dict=batch_feed_dict)
total_loss += current_loss
# save model to disk
save_path = saver.save(sess, "ml/teamB/model.ckpt")
print("Model saved in file: %s" % save_path)
# Predict
test_feed_dict = {features_pl: test_set, keep_prob_pl: 1.0}
test_pred = sess.run(predict, feed_dict=test_feed_dict)
# Save predictions
save_predictions(test_pred, file_predictions)
return test_pred
|
[
"kastanvday@gmail.com"
] |
kastanvday@gmail.com
|
b1299976449f3acc5b3b422a4394e675d89a1d16
|
ddc67279fee4305cddb663b2d76e5a82b0f35ba4
|
/ftp_server.py
|
85c98ff980fcd5b9fd09f196f4a24357d34e4f33
|
[] |
no_license
|
whisperscan/ftpv2.0
|
6b6fa331a7b8d9e65931de15f4ce56c8227afc4b
|
734491e76b153e6beb88bd09e42e63498e0a170c
|
refs/heads/master
| 2021-01-22T06:02:30.510709
| 2015-04-30T15:44:54
| 2015-04-30T15:44:54
| 34,862,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,980
|
py
|
#
# writed by caramel
#
import SocketServer
import os
import sys
import stat
import time
import MySQLdb
#-- send error --
def send_err(sock_handler,err_info):
#{
try:
#{
sock_handler.request.sendall(err_info)
#}
except Exception,e:
#{
print e
#}
#}
#-- list process --
# data struct
# __________________________
# | R | request data |
# --------------------------
# ______ request data struct _______
# | |
# | option | S |
# |----------------------------------|
# | filename | value |
# |__________________________________|
# ______ request data struct _______
# | |
# | option | begin |
# |----------------------------------|
# | filename | value |
# |__________________________________|
# _____ transport data struct ______
# | |
# | option | value |
# |----------------------------------|
# | filename | value |
# |----------------------------------|
# | path | value |
# |__________________________________|
# _______ acking data struct _______
# | |
# | option | value |
# |----------------------------------|
# | result | value |
# |__________________________________|
def list_process(data_list):
#{
ret_data = {}
print data_list
for i in range(1,len(data_list),2):
#{
ret_data[data_list[i]] = data_list[i + 1]
#}
return(ret_data)
#}
#-- ret_value --
def ret_value(sock_handler,r_value):
#{
value_for_return = "A,option,A,result," + str(r_value)
sock_handler.request.sendall(value_for_return)
#}
#-- is exists file --
def is_exist_file(filename):
#{
if(filename):
#{
isExists = os.path.exists(filename)
return(isExists)
#}
else:
#{
return(0)
#}
#}
#-- cmd list --
def cmd_list(sock_handler,cmd_string):
#{
list_string = os.popen(cmd_string)
if(list_string):
#{
send_string = list_string.read()
#}
else:
#{
send_string = 'no items'
#}
print send_string
sock_handler.request.sendall(send_string)
#}
#-- get file from client --
def get_file_from_client(sock_handler,filename):
#{
if(filename):
#{
sock_handler.request.sendall('begin')
with open(filename,'wb') as t_file_fd:
#{
while True:
#
rcv_buff = sock_handler.request.recv(65535)
if(rcv_buff == 'EOF'):
#{
print 'put < %s > success' %(filename)
break;
#}
t_file_fd.write(rcv_buff)
#}
#}
#}
else:
#{
sock_handler.request.sendall('null')
#}
#}
#-- begin transport file --
def send_file_to_client(sock_handler,filename):
#{
if(filename):
#{
with open(filename,'rb') as t_file_fd:
#{
read_buff = t_file_fd.read()
sock_handler.request.sendall(read_buff)
#}
time.sleep(1)
sock_handler.request.sendall('EOF')
print 'send ' + filename
#}
else:
#{
sock_handler.request.sendall('null')
#}
#}
#-- create a directory --
def mk_dir(sock_handler,dir_name):
#{
print is_exist_file(dir_name)
if(not is_exist_file(dir_name)):
#{
os.mkdir(dir_name)
os.chdir(dir_name)
#create welcome file if not exists
with open('Welcome','wa'):
#{
os.utime('Welcome',None)
#}
path = os.path.abspath('.')
os.chdir(os.path.dirname(path))
ret_value(sock_handler,1)
#}
else:
#{
ret_value(sock_handler,0)
#}
#}
#-- enter the directory --
def cd_dir(sock_handler,dir_name):
#{
if(is_exist_file(dir_name)):
#{
if(dir_name == '..'):
#{
path = os.path.abspath('.')
par_path = os.path.dirname(path)
base_name = os.path.dirname(par_path)
os.chdir(par_path)
if(os.path.basename(par_path) == base_name):
#{
ret_value(sock_handler,1)
#}
else:
#{
ret_value(sock_handler,0)
#}
#}
else:
#{
os.chdir(dir_name)
path = os.path.abspath('.')
print path
if(os.path.basename(path) == dir_name):
#{
ret_value(sock_handler,1)
#}
else:
#{
ret_value(sock_handler,0)
#}
#}
#}
else:
#{
ret_value(sock_handler,0)
#}
#}
#-- request_func --
def request_func(sock_handler,dict_data):
#{
if(dict_data['option'] == 'ls'):
#{
cmd_list(sock_handler,dict_data['cmd'])
#}
elif(dict_data['option'] == 'S'):
#{
ret_value(sock_handler,is_exist_file(dict_data['filename']))
#}
elif(dict_data['option'] == 'begin'):
#{
send_file_to_client(sock_handler,dict_data['filename'])
#}
elif(dict_data['option'] == 'put'):
#{
get_file_from_client(sock_handler,dict_data['filename'])
#}
elif(dict_data['option'] == 'mkdir'):
#{
mk_dir(sock_handler,dict_data['dir_name'])
#}
elif(dict_data['option'] == 'cd'):
#{
print 'enter'
cd_dir(sock_handler,dict_data['dir_name'])
#}
else:
#
send_err(sock_handler,'error option')
return(0)
#}
#}
#-- SocketServer class --
class MyTCPHandler(SocketServer.BaseRequestHandler):
#{
def handle(self):
#{
while True:
#{
self.data = self.request.recv(65535).strip()
if not self.data:
break
print self.client_address
data_list = self.data.split(',')
dict_data = list_process(data_list)
print dict_data
if(data_list[0] == 'R'):
request_func(self,dict_data)
else:
return(0)
#}
#}
#}
#-- listening_func --
def listening_func():
#{
print 'waiting a connecting'
ADDR,PORT = '',50001
sockfd = SocketServer.ThreadingTCPServer((ADDR,PORT),MyTCPHandler)
sockfd.serve_forever()
#}
#-- main function --
# begin main()
if __name__ == '__main__':
#{
#judge ftp path
isExists = os.path.exists('/tmp/ftp')
if(not isExists):
#{
os.makedirs('/tmp/ftp')
os.chmod('/tmp/ftp',stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
#}
os.chdir('/tmp/ftp')
#create welcome file if not exists
isExists = os.path.exists('/tmp/ftp/Welcome')
if(not isExists):
#{
with open('/tmp/ftp/Welcome','wa'):
os.utime('/tmp/ftp/Welcome',None)
#}
#listening
listening_func()
#}
# end main()
|
[
"skynets@yeah.net"
] |
skynets@yeah.net
|
e5d9f30b7435e9196bc3d0df1cdc962e0012a99c
|
e6f1e23409bfcba563dcfc9dbf6d19c5c99fc0d5
|
/binary classification.py
|
666e40eb2fd9dd8d43b579935fd778cfc3d6c99d
|
[] |
no_license
|
AmiraHmd/gdML
|
4378fae056f5ff88cdd1a7d86c68c28f5d16e80d
|
a4e6c3f495d02c2b0c43700843290c89c30f2fc1
|
refs/heads/master
| 2022-10-24T01:13:48.355828
| 2020-06-24T10:12:35
| 2020-06-24T10:12:35
| 272,714,651
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import SGDClassifier
# In[4]:
# Génération de données aléatoires: 100 exemples, 2 classes, 2 features x0 et x1
np.random.seed(1)
x, y = make_classification(n_samples=100,n_features=2, n_redundant=0, n_informative=1, n_clusters_per_class=1)
# In[5]:
# Visualisation des données
plt.figure(num=None, figsize=(8, 6))
plt.scatter(x[:,0], x[:, 1], marker = 'o', c=y, edgecolors='k')
plt.xlabel('X0')
plt.ylabel('X1')
x.shape
# In[6]:
# Génération d'un modele en utilisant la fonction cout 'log' pour Logistic Regression
model = SGDClassifier(max_iter=1000, eta0=0.001, loss='log')
# In[7]:
model.fit(X, y)
print('score:', model.score(x, y))
# In[8]:
# Visualisation des données
h = .02
colors = "bry"
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# In[9]:
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# In[10]:
for i, color in zip(model.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired, edgecolor='black', s=20)
# In[ ]:
|
[
"hamadiamira2@gmail.com"
] |
hamadiamira2@gmail.com
|
e99b6fff29c79ce050c083f47a26f60657b1e4de
|
55a947cddcac5188c557e175aec98df19485f623
|
/tests/integration/test_customer.py
|
811fcaa2b0bc6edf8f9dbe8c3288e900ea02b3db
|
[
"MIT"
] |
permissive
|
pfrantz/braintree_python
|
d02c1691049df68d87f7738e53d489682db94a7e
|
055e7400dd70a79ec18e5a30476dc77827bc465d
|
refs/heads/master
| 2021-01-16T20:07:31.664653
| 2013-10-30T00:19:16
| 2013-10-30T00:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,730
|
py
|
from tests.test_helper import *
import braintree.test.venmo_sdk as venmo_sdk
class TestCustomer(unittest.TestCase):
def test_all(self):
collection = Customer.all()
self.assertTrue(collection.maximum_size > 100)
customer_ids = [c.id for c in collection.items]
self.assertEquals(collection.maximum_size, len(TestHelper.unique(customer_ids)))
self.assertEquals(Customer, type(collection.first))
def test_create(self):
result = Customer.create({
"first_name": "Bill",
"last_name": "Gates",
"company": "Microsoft",
"email": "bill@microsoft.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com"
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Bill", customer.first_name)
self.assertEqual("Gates", customer.last_name)
self.assertEqual("Microsoft", customer.company)
self.assertEqual("bill@microsoft.com", customer.email)
self.assertEqual("312.555.1234", customer.phone)
self.assertEqual("614.555.5678", customer.fax)
self.assertEqual("www.microsoft.com", customer.website)
self.assertNotEqual(None, customer.id)
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", customer.id))
def test_create_with_device_session_id(self):
result = Customer.create({
"first_name": "Bill",
"last_name": "Gates",
"company": "Microsoft",
"email": "bill@microsoft.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100",
"device_session_id": "abc123"
}
})
self.assertTrue(result.is_success)
def test_create_with_unicode(self):
result = Customer.create({
"first_name": u"Bill<&>",
"last_name": u"G\u1F00t\u1F18s",
"company": "Microsoft",
"email": "bill@microsoft.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com"
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual(u"Bill<&>", customer.first_name)
self.assertEqual(u"G\u1f00t\u1F18s", customer.last_name)
self.assertEqual("Microsoft", customer.company)
self.assertEqual("bill@microsoft.com", customer.email)
self.assertEqual("312.555.1234", customer.phone)
self.assertEqual("614.555.5678", customer.fax)
self.assertEqual("www.microsoft.com", customer.website)
self.assertNotEqual(None, customer.id)
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", customer.id))
found_customer = Customer.find(customer.id)
self.assertEqual(u"G\u1f00t\u1F18s", found_customer.last_name)
def test_create_with_no_attributes(self):
result = Customer.create()
self.assertTrue(result.is_success)
self.assertNotEqual(None, result.customer.id)
def test_create_with_special_chars(self):
result = Customer.create({"first_name": "XML Chars <>&'\""})
self.assertTrue(result.is_success)
self.assertEqual("XML Chars <>&'\"", result.customer.first_name)
def test_create_returns_an_error_response_if_invalid(self):
result = Customer.create({
"email": "@invalid.com",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"billing_address": {
"country_code_alpha2": "MX",
"country_code_alpha3": "USA"
}
}
})
self.assertFalse(result.is_success)
self.assertEquals(2, result.errors.size)
self.assertEquals(ErrorCodes.Customer.EmailIsInvalid, result.errors.for_object("customer").on("email")[0].code)
self.assertEquals(
ErrorCodes.Address.InconsistentCountry,
result.errors.for_object("customer").for_object("credit_card").for_object("billing_address").on("base")[0].code
)
def test_create_customer_and_payment_method_at_the_same_time(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100"
}
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Mike", customer.first_name)
self.assertEqual("Jones", customer.last_name)
credit_card = customer.credit_cards[0]
self.assertEqual("411111", credit_card.bin)
self.assertEqual("1111", credit_card.last_4)
self.assertEqual("05/2010", credit_card.expiration_date)
def test_create_customer_and_verify_payment_method(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4000111111111115",
"expiration_date": "05/2010",
"cvv": "100",
"options": {"verify_card": True}
}
})
self.assertFalse(result.is_success)
self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, result.credit_card_verification.status)
def test_create_customer_with_check_duplicate_payment_method(self):
attributes = {
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4000111111111115",
"expiration_date": "05/2010",
"cvv": "100",
"options": {"fail_on_duplicate_payment_method": True}
}
}
Customer.create(attributes)
result = Customer.create(attributes)
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.CreditCard.DuplicateCardExists, result.errors.for_object("customer").for_object("credit_card").on("number")[0].code)
self.assertEquals("Duplicate card exists in the vault.", result.message)
def test_create_customer_with_payment_method_and_billing_address(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100",
"billing_address": {
"street_address": "123 Abc Way",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60622",
"country_code_alpha2": "US",
"country_code_alpha3": "USA",
"country_code_numeric": "840",
"country_name": "United States of America"
}
}
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Mike", customer.first_name)
self.assertEqual("Jones", customer.last_name)
address = customer.credit_cards[0].billing_address
self.assertEqual("123 Abc Way", address.street_address)
self.assertEqual("Chicago", address.locality)
self.assertEqual("Illinois", address.region)
self.assertEqual("60622", address.postal_code)
self.assertEqual("US", address.country_code_alpha2)
self.assertEqual("USA", address.country_code_alpha3)
self.assertEqual("840", address.country_code_numeric)
self.assertEqual("United States of America", address.country_name)
def test_create_with_customer_fields(self):
result = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"custom_fields": {
"store_me": "custom value"
}
})
self.assertTrue(result.is_success)
self.assertEquals("custom value", result.customer.custom_fields["store_me"])
def test_create_returns_nested_errors(self):
result = Customer.create({
"email": "invalid",
"credit_card": {
"number": "invalid",
"billing_address": {
"country_name": "invalid"
}
}
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Customer.EmailIsInvalid,
result.errors.for_object("customer").on("email")[0].code
)
self.assertEquals(
ErrorCodes.CreditCard.NumberHasInvalidLength,
result.errors.for_object("customer").for_object("credit_card").on("number")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryNameIsNotAccepted,
result.errors.for_object("customer").for_object("credit_card").for_object("billing_address").on("country_name")[0].code
)
def test_create_returns_errors_if_custom_fields_are_not_registered(self):
result = Customer.create({
"first_name": "Jack",
"last_name": "Kennedy",
"custom_fields": {
"spouse_name": "Jacqueline"
}
})
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Customer.CustomFieldIsInvalid, result.errors.for_object("customer").on("custom_fields")[0].code)
def test_create_with_venmo_sdk_session(self):
result = Customer.create({
"first_name": "Jack",
"last_name": "Kennedy",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"options": {
"venmo_sdk_session": venmo_sdk.Session
}
}
})
self.assertTrue(result.is_success)
self.assertTrue(result.customer.credit_cards[0].venmo_sdk)
def test_create_with_venmo_sdk_payment_method_code(self):
result = Customer.create({
"first_name": "Jack",
"last_name": "Kennedy",
"credit_card": {
"venmo_sdk_payment_method_code": venmo_sdk.generate_test_payment_method_code("4111111111111111")
}
})
self.assertTrue(result.is_success)
self.assertEquals("411111", result.customer.credit_cards[0].bin)
def test_delete_with_valid_customer(self):
customer = Customer.create().customer
result = Customer.delete(customer.id)
self.assertTrue(result.is_success)
@raises(NotFoundError)
def test_delete_with_invalid_customer(self):
customer = Customer.create().customer
Customer.delete(customer.id)
Customer.delete(customer.id)
def test_find_with_valid_customer(self):
customer = Customer.create({
"first_name": "Joe",
"last_name": "Cool"
}).customer
found_customer = Customer.find(customer.id)
self.assertEquals(customer.id, found_customer.id)
self.assertEquals(customer.first_name, found_customer.first_name)
self.assertEquals(customer.last_name, found_customer.last_name)
def test_find_with_invalid_customer(self):
try:
Customer.find("badid")
self.assertTrue(False)
except NotFoundError, e:
self.assertEquals("customer with id badid not found", str(e))
def test_update_with_valid_options(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"company": "Apple",
"email": "steve@apple.com",
"phone": "312.555.5555",
"fax": "614.555.5555",
"website": "www.apple.com"
}).customer
result = Customer.update(customer.id, {
"first_name": "Bill",
"last_name": "Gates",
"company": "Microsoft",
"email": "bill@microsoft.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.microsoft.com"
})
self.assertTrue(result.is_success)
customer = result.customer
self.assertEqual("Bill", customer.first_name)
self.assertEqual("Gates", customer.last_name)
self.assertEqual("Microsoft", customer.company)
self.assertEqual("bill@microsoft.com", customer.email)
self.assertEqual("312.555.1234", customer.phone)
self.assertEqual("614.555.5678", customer.fax)
self.assertEqual("www.microsoft.com", customer.website)
self.assertNotEqual(None, customer.id)
self.assertNotEqual(None, re.search("\A\d{6,7}\Z", customer.id))
def test_update_with_nested_values(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "10/10",
"billing_address": {
"postal_code": "11111"
}
}
}).customer
credit_card = customer.credit_cards[0]
address = credit_card.billing_address
updated_customer = Customer.update(customer.id, {
"first_name": "Bill",
"last_name": "Gates",
"credit_card": {
"expiration_date": "12/12",
"options": {
"update_existing_token": credit_card.token
},
"billing_address": {
"postal_code": "44444",
"country_code_alpha2": "US",
"country_code_alpha3": "USA",
"country_code_numeric": "840",
"country_name": "United States of America",
"options": {
"update_existing": True
}
}
}
}).customer
updated_credit_card = CreditCard.find(credit_card.token)
updated_address = Address.find(customer.id, address.id)
self.assertEqual("Bill", updated_customer.first_name)
self.assertEqual("Gates", updated_customer.last_name)
self.assertEqual("12/2012", updated_credit_card.expiration_date)
self.assertEqual("44444", updated_address.postal_code)
self.assertEqual("US", updated_address.country_code_alpha2)
self.assertEqual("USA", updated_address.country_code_alpha3)
self.assertEqual("840", updated_address.country_code_numeric)
self.assertEqual("United States of America", updated_address.country_name)
def test_update_with_nested_billing_address_id(self):
customer = Customer.create().customer
address = Address.create({
"customer_id": customer.id,
"postal_code": "11111"
}).address
updated_customer = Customer.update(customer.id, {
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/12",
"billing_address_id": address.id
}
}).customer
credit_card = updated_customer.credit_cards[0]
self.assertEqual(address.id, credit_card.billing_address.id)
self.assertEqual("11111", credit_card.billing_address.postal_code)
def test_update_with_invalid_options(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"company": "Apple",
"email": "steve@apple.com",
"phone": "312.555.5555",
"fax": "614.555.5555",
"website": "www.apple.com"
}).customer
result = Customer.update(customer.id, {
"email": "@microsoft.com",
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Customer.EmailIsInvalid,
result.errors.for_object("customer").on("email")[0].code
)
def test_create_from_transparent_redirect_with_successful_result(self):
tr_data = {
"customer": {
"first_name": "John",
"last_name": "Doe",
"company": "Doe Co",
}
}
post_params = {
"tr_data": Customer.tr_data_for_create(tr_data, "http://example.com/path"),
"customer[email]": "john@doe.com",
"customer[phone]": "312.555.2323",
"customer[fax]": "614.555.5656",
"customer[website]": "www.johndoe.com",
"customer[credit_card][number]": "4111111111111111",
"customer[credit_card][expiration_date]": "05/2012",
"customer[credit_card][billing_address][country_code_alpha2]": "MX",
"customer[credit_card][billing_address][country_code_alpha3]": "MEX",
"customer[credit_card][billing_address][country_code_numeric]": "484",
"customer[credit_card][billing_address][country_name]": "Mexico",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_create_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
customer = result.customer
self.assertEquals("John", customer.first_name)
self.assertEquals("Doe", customer.last_name)
self.assertEquals("Doe Co", customer.company)
self.assertEquals("john@doe.com", customer.email)
self.assertEquals("312.555.2323", customer.phone)
self.assertEquals("614.555.5656", customer.fax)
self.assertEquals("www.johndoe.com", customer.website)
self.assertEquals("05/2012", customer.credit_cards[0].expiration_date)
self.assertEquals("MX", customer.credit_cards[0].billing_address.country_code_alpha2)
self.assertEquals("MEX", customer.credit_cards[0].billing_address.country_code_alpha3)
self.assertEquals("484", customer.credit_cards[0].billing_address.country_code_numeric)
self.assertEquals("Mexico", customer.credit_cards[0].billing_address.country_name)
def test_create_from_transparent_redirect_with_error_result(self):
tr_data = {
"customer": {
"company": "Doe Co",
}
}
post_params = {
"tr_data": Customer.tr_data_for_create(tr_data, "http://example.com/path"),
"customer[email]": "john#doe.com",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_create_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Customer.EmailIsInvalid, result.errors.for_object("customer").on("email")[0].code)
def test_update_from_transparent_redirect_with_successful_result(self):
customer = Customer.create({
"first_name": "Jane",
}).customer
tr_data = {
"customer_id": customer.id,
"customer": {
"first_name": "John",
}
}
post_params = {
"tr_data": Customer.tr_data_for_update(tr_data, "http://example.com/path"),
"customer[email]": "john@doe.com",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_update_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
customer = result.customer
self.assertEquals("John", customer.first_name)
self.assertEquals("john@doe.com", customer.email)
def test_update_with_nested_values_via_transparent_redirect(self):
customer = Customer.create({
"first_name": "Steve",
"last_name": "Jobs",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "10/10",
"billing_address": {
"postal_code": "11111"
}
}
}).customer
credit_card = customer.credit_cards[0]
address = credit_card.billing_address
tr_data = {
"customer_id": customer.id,
"customer": {
"first_name": "Bill",
"last_name": "Gates",
"credit_card": {
"expiration_date": "12/12",
"options": {
"update_existing_token": credit_card.token
},
"billing_address": {
"postal_code": "44444",
"options": {
"update_existing": True
}
}
}
}
}
post_params = {
"tr_data": Customer.tr_data_for_update(tr_data, "http://example.com/path"),
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_update_url())
updated_customer = Customer.confirm_transparent_redirect(query_string).customer
updated_credit_card = CreditCard.find(credit_card.token)
updated_address = Address.find(customer.id, address.id)
self.assertEqual("Bill", updated_customer.first_name)
self.assertEqual("Gates", updated_customer.last_name)
self.assertEqual("12/2012", updated_credit_card.expiration_date)
self.assertEqual("44444", updated_address.postal_code)
def test_update_from_transparent_redirect_with_error_result(self):
customer = Customer.create({
"first_name": "Jane",
}).customer
tr_data = {
"customer_id": customer.id,
"customer": {
"first_name": "John",
}
}
post_params = {
"tr_data": Customer.tr_data_for_update(tr_data, "http://example.com/path"),
"customer[email]": "john#doe.com",
}
query_string = TestHelper.simulate_tr_form_post(post_params, Customer.transparent_redirect_update_url())
result = Customer.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.Customer.EmailIsInvalid, result.errors.for_object("customer").on("email")[0].code)
|
[
"code@getbraintree.com"
] |
code@getbraintree.com
|
9cd4aaea9c9d50f79dd06603c2a9f6541239a683
|
fd770a37044ebbf116857650d0ee8957066fe57a
|
/14-Python/Demos/Day-02/file_handle.py
|
3f7ec6f310781ad10d2d59523a4208b9a3406794
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
helghareeb/OSTrack2019
|
1c5a8214e9cc7662bfe7be256f1168ef02c5fffe
|
3ef5af0f56f8640e92c1f3c3b3d76b8df2783f48
|
refs/heads/master
| 2020-06-28T22:20:35.496173
| 2020-01-04T17:05:58
| 2020-01-04T17:05:58
| 200,356,515
| 6
| 14
|
MIT
| 2019-08-24T07:45:51
| 2019-08-03T09:22:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
# بسم الله الرحمن الرحيم
f = open('output','w')
f.write('This is new content')
f.seek(8)
f.write('old')
# f.write('This is line-01 \n')
# f.write('This is line-02 \n')
# f.write('This is line-03 \n')
# f.write('This is line-04 \n')
# for i in f.readlines():
# print(i)
|
[
"helghareeb@gmail.com"
] |
helghareeb@gmail.com
|
b5580f165cc9ba88e6ed7815a4cc90b627cea69d
|
8d292cad82e85e58e98f673aef549027ab247668
|
/notebooks/Golden_Pass_1/18_Spine_Allen/Spine_classifier.py
|
3a7b614dccc6c6f2baba145d29377d15f597eb96
|
[] |
no_license
|
celiibrendan/Complete_Pinky100_Pipeline
|
56ba1133469f9ea9f80525253f6e7318ea980de4
|
51bf305ec87139059ce8bf5c42d33c54c24a0991
|
refs/heads/master
| 2021-07-21T18:11:01.592799
| 2020-05-18T22:32:03
| 2020-05-18T22:32:03
| 170,215,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83,685
|
py
|
import bpy
#This one will pull down some of the larger segments from the datajoint
#table and then apply the automatic segmentation to them
#######Steps##############
'''1) Get the neuron the person wants to look at
2) Import the neuron and generate edges
3) Get the compartment_type person wants
4) Find the component_index that corresponds to the biggest one because that is the one we want
5) Delete all the edges, faces and vertices that do not correspond to these labels
6) Generate an OFF file for the current segment
7) Run the OFF file through the CGAL segmentation algorithm using the INPUT PARAMETERS
8) Run the auto spine labeler using the CGAL segmentation list
9) Label the colors of the auto labeled spines and show the final product
10) Output stats to a csv so they can be analyzed'''
####How to import from the segment table
import datajoint as dj
import numpy as np
import datetime
import math
from mathutils import Vector
dj.config['database.host'] = '10.28.0.34'
dj.config['database.user'] = 'celiib'
dj.config['database.password'] = 'newceliipass'
#will state whether words are shown or not
dj.config['safemode']=True
print(dj.conn(reset=True))
def select_Neuron(ob_name):
# deselect all
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active = None
# selection
obj = bpy.data.objects[ob_name]
bpy.context.scene.objects.active = obj
"""for obj in bpy.data.objects:
if "neuron" in obj.name:
obj.select = True
bpy.context.scene.objects.active = obj
print("object was found and active")
break"""
#1) Get the neuron the person wants to look at
#2) Import the neuron and generate edges
def filter_verts_and_faces(key,verts,faces):
#go and get the triangles and the vertices from the database
"""compartment_type
decimation_ratio
segmentation
segment_id"""
component_key = dict(segmentation=key["segmentation"],
segment_id=key["segment_id"],
decimation_ratio=float(key["decimation_ratio"]),
compartment_type=key["compartment_type"],
component_index=key["component_index"])
verts_label, triangles_label = (ta3p100.CompartmentFinal.ComponentFinal & component_key).fetch('vertex_indices','triangle_indices')
verts_label = verts_label.tolist()[0]
triangles_label = triangles_label.tolist()[0]
verts_keep = []
faces_keep = []
verts_lookup = {}
for i,ver in enumerate(verts_label):
verts_keep.append(verts[ver])
verts_lookup[ver] = i
#generate the new face labels
for fac in triangles_label:
faces_with_verts = faces[fac]
new_tuple = []
for v in faces_with_verts:
new_tuple.append(verts_lookup[v])
faces_keep.append(new_tuple)
#check that the new verts and faces to return are same length as the indices
"""if len(triangles_label) != len(faces_keep) or len(verts_label) != len(verts_keep):
print("ERROR THE FILTERED LABELS ARE NOT THE SAME SIZE AS THE INDICES LISTS!")"""
return verts_keep,faces_keep
whole_neuron_dicts = dict()
def load_Neuron_automatic_spine(key):
ID = key['segment_id']
compartment_type = key['compartment_type']
compartment_index = key['component_index']
print("inside load Neuron")
#neuron_data = ((mesh_Table & "segment_ID="+ID).fetch(as_dict=True))[0]
if ID not in whole_neuron_dicts:
whole_neuron_dicts[ID] = (ta3p100.CleansedMesh & 'decimation_ratio=0.35' & dict(segment_id=ID)).fetch1()
verts = whole_neuron_dicts[ID]['vertices'].astype(dtype=np.uint32).tolist()
faces = whole_neuron_dicts[ID]['triangles'].astype(dtype=np.uint32).tolist()
#could filter the verts and the faces here for just the ones we want
verts,faces = filter_verts_and_faces(key,verts,faces)
mymesh = bpy.data.meshes.new("neuron-"+str(ID))
mymesh.from_pydata(verts, [], faces)
mymesh.update(calc_edges=True)
mymesh.calc_normals()
object = bpy.data.objects.new("neuron-"+str(ID), mymesh)
#object.location = bpy.context.scene.cursor_location
object.location = Vector((0,0,0))
bpy.context.scene.objects.link(object)
object.lock_location[0] = True
object.lock_location[1] = True
object.lock_location[2] = True
object.lock_scale[0] = True
object.lock_scale[1] = True
object.lock_scale[2] = True
object.rotation_euler[0] = 1.5708
object.rotation_euler[1] = 0
object.rotation_euler[2] = 0
object.lock_rotation[0] = True
object.lock_rotation[1] = True
object.lock_rotation[2] = True
#set view back to normal:
#set_View()
#run the setup color command
#bpy.ops.object.select_all(action='TOGGLE')
#create_local_colors(object)
#make sure in solid mode
for area in bpy.context.screen.areas: # iterate through areas in current screen
if area.type == 'VIEW_3D':
for space in area.spaces: # iterate through spaces in current VIEW_3D area
if space.type == 'VIEW_3D': # check if space is a 3D view
space.viewport_shade = 'SOLID' # set the viewport shading to rendered
return object.name
##write the OFF file for the neuron
def write_Part_Neuron_Off_file(verts_for_off,faces_for_off,faces_indexes_for_off,segment_id,compartment_type_name,found_component_index,file_loc):
print('inside write_Part_neuron')
num_vertices = (len(verts_for_off))
num_faces = len(faces_indexes_for_off)
file_location = file_loc
filename = "neuron_" + str(segment_id) + "_" + str(compartment_type_name) + "_" + str(found_component_index)
f = open(file_location + filename + ".off", "w")
f.write("OFF\n")
f.write(str(num_vertices) + " " + str(num_faces) + " 0\n" )
ob = bpy.context.object
verts_raw = ob.data.vertices
#iterate through and write all of the vertices in the file
verts_lookup = {}
counter = 0
for vert_num in verts_for_off:
f.write(str(verts_raw[vert_num].co[0]) + " " + str(verts_raw[vert_num].co[1]) + " " + str(verts_raw[vert_num].co[2])+"\n")
verts_lookup[vert_num] = counter
counter += 1
faces_lookup_reverse = []
counter = 0
print("finished writing verts")
for i in range(0,len(faces_indexes_for_off)):
face_indices = faces_indexes_for_off[i]
f.write("3 " + str(verts_lookup[face_indices[0]]) + " " + str(verts_lookup[face_indices[1]]) + " " + str(verts_lookup[face_indices[2]])+"\n")
faces_lookup_reverse.append(faces_for_off[i])
counter += 1
print("finished writing faces")
print("done_writing_off_file")
#f.write("end")
return filename,faces_lookup_reverse
import random
def get_cgal_data_and_label(key,ob_name):
#store the group_segmentation in the traingle labels from datajoint
component_data = (ta3p100.ComponentAutoSegmentFinal() & key).fetch(as_dict=True)
if component_data == []:
return [], []
else:
component_data = component_data[0]
triangles_labels = component_data["seg_group"].tolist()
#activate the current object
select_Neuron(ob_name)
ob = bpy.context.object
me = ob.data
#print("starting to hide everything")
#iterate through all of the vertices
verts_raw = ob.data.vertices
#print(len(active_verts_raw))
edges_raw = ob.data.edges
#print(len(active_edges_raw))
faces_raw = ob.data.polygons
#gets a list of the unique labels
unique_segments = list(Counter(triangles_labels).keys())
segmentation_length = len(unique_segments) # equals to list(set(words))
#print(segmentation_length)
#makes a dictionary that maps the unique segments to a number from range(0,len(unique_seg))
unique_index_dict = {unique_segments[x]:x for x in range(0,segmentation_length)}
#print("unique_index_dict = " + str(len(unique_index_dict)))
#print("triangle_labels = " + str(len(triangles_labels)))
#adds all of the labels to the faces
max_length = len(triangles_labels)
#just iterate and add them to the faces
#here is where need to get stats for sdf numbers
labels_list = []
for tri in triangles_labels:
#assembles the label list that represents all of the faces
labels_list.append(str(unique_index_dict[tri]))
select_Neuron(ob_name)
#make sure in solid mode
for area in bpy.context.screen.areas: # iterate through areas in current screen
if area.type == 'VIEW_3D':
for space in area.spaces: # iterate through spaces in current VIEW_3D area
if space.type == 'VIEW_3D': # check if space is a 3D view
space.viewport_shade = 'SOLID' # set the viewport shading to rendered
bpy.ops.object.mode_set(mode='OBJECT')
#these variables are set in order to keep the functions the same as FINAL_importing_auto_seg.py
newname = ob.name
print("done with cgal_segmentation")
#----------------------now return a dictionary of the sdf values like in the older function get_sdf_dictionary
#get the sdf values and store in sdf_labels
sdf_labels = component_data["sdf"].tolist()
sdf_temp_dict = {}
labels_seen = []
#iterate through the labels_list
for i,label in enumerate(labels_list):
if label not in labels_seen:
labels_seen.append(label)
sdf_temp_dict[label] = []
sdf_temp_dict[label].append(sdf_labels[i])
#print(sdf_temp_dict)
#now calculate the stats on the sdf values for each label
sdf_final_dict = {}
for dict_key,value in sdf_temp_dict.items():
"""
#calculate the average
mean = np.mean(value)
#calculate the median
median = np.median(value)
#calculate the max
max = np.amax(value)
#calculate minimum
min = np.amin(value)
temp_dict = {"mean":mean,"median":median,"max":max,"min":min}
#assign them
sdf_final_dict[key] = temp_dict.copy()
"""
#just want to store the median
sdf_final_dict[dict_key] = np.median(value)
return sdf_final_dict, labels_list
import sys
import numpy as np
#import matplotlib.pyplot as plt
import networkx as nx
import time
def find_neighbors(labels_list,current_label,verts_to_Face,faces_raw,verts_raw):
"""will return the number of neighbors that border the segment"""
#iterate over each face with that label
# get the vertices of that face
# get all the faces that have that vertice associated with that
# get the labels of all of the neighbor faces, for each of these labels, add it to the neighbors
#list if it is not already there and doesn't match the label you are currently checking
# return the list
#get the indexes of all of the faces with that label that you want to find the neighbors for
index_list = []
for i,x in enumerate(labels_list):
if x == current_label:
index_list.append(i)
verts_checked = []
faces_checked = []
neighbors_list = []
neighbors_shared_vert = {}
for index in index_list:
current_face = faces_raw[index]
#get the vertices associates with face
vertices = current_face.vertices
#get the faces associated with the vertices of that specific face
for vert in vertices:
#will only check each vertex once
if vert not in verts_checked:
verts_checked.append(vert)
faces_associated_vert = verts_to_Face[vert]
for fac in faces_associated_vert:
#make sure it is not a fellow face with the label who we are looking for the neighbors of
if (fac not in index_list):
#check to see if checked the the face already
if (fac not in faces_checked):
if(labels_list[fac] not in neighbors_list):
#add the vertex to the count of shared vertices
neighbors_shared_vert[labels_list[fac]] = 0
#only store the faces that are different
neighbors_list.append(labels_list[fac])
#faces_to_check.append(fac)
#faces_to_check.insert(0, fac)
#increment the number of times we have seen that label face
neighbors_shared_vert[labels_list[fac]] = neighbors_shared_vert[labels_list[fac]] + 1
#now add the face to the checked list
faces_checked.append(fac)
#have all of the faces to check
"""for facey in faces_to_check:
if labels_list[facey] != current_label and labels_list[facey] not in neighbors_list:
neighbors_list.append(labels_list[facey] )"""
number_of_faces = len(index_list)
#can filter out the neighbors that do not have 3 or more vertices
#print("neighbors_list = " + str(neighbors_list))
#print("neighbors_shared_vert = " + str(neighbors_shared_vert))
"""final_neighbors_shared_vert = {}
for key,value in neighbors_shared_vert.items():
if value >= neighbors_min or key == "backbone":
#add them to the final list if more than 3 neighbors:
final_neighbors_shared_vert[key]= value
final_neighbors_list = final_neighbors_shared_vert.keys()
if final_neighbors_list:
complete_Flag = True"""
return neighbors_list,neighbors_shared_vert,number_of_faces
##Functins from the auto_spine_labeler
def smooth_backbone_vp3(labels_list,sdf_final_dict,backbone_width_threshold = 0.35,max_backbone_threshold = 400,backbone_threshold=300,secondary_threshold=100,shared_vert_threshold=25,number_Flag = False, seg_numbers=1,smooth_Flag=True):
print("at beginning of smooth backbone vp3")
#things that could hint to backbone
#1) larger size
#2) touching 2 or more larger size
#have to go into object mode to do some editing
currentMode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.context.object
ob.update_from_editmode()
#print("object_name = " + bpy.context.object.name)
me = ob.data
#print("about to get faces_verts raw")
faces_raw = me.polygons
verts_raw = me.vertices
#print("DONE about to get faces_verts raw")
#print("don't need to generate labels_list anymore")
#print("about to generate labels_list") ####!!!! This takes a good bit of time#####
#labels_list = generate_labels_list(faces_raw)
#print("DONE about to generate labels_list")
#need to assemble a dictionary that relates vertices to faces
#*****making into a list if the speed is too slow*******#
#print("about to generate verts_to_Face")
verts_to_Face = generate_verts_to_face_dictionary(faces_raw,verts_raw)
#print("DONE about to generate verts_to_Face")
#add new color and reassign all of the labels with those colors as the backbone label
#create a list of all the labels and which ones are the biggest ones
from collections import Counter
myCounter = Counter(labels_list)
spine_labels = []
backbone_labels = []
#print(" about to get counter list")
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
for label,times in myCounter.items():
if(times >= max_backbone_threshold):
#print(str(label) + ":" + str(times))
backbone_labels.append(label)
for label in myCounter.keys():
if( sdf_final_dict[label] >= backbone_width_threshold):
#print(str(label) + ":" + str(times))
if(myCounter[label] > backbone_threshold) and (label not in backbone_labels):
backbone_labels.append(label)
#print(" DONE about to get counter list")
"""for lb in sdf_final_dict:
if( sdf_final_dict[lb] >= backbone_width_threshold):
backbone_labels.append(lb) """
#print("backbone_labels = " + str(backbone_labels))
#print("hello")
#need ot get rid of labels that don't border other backbone_labels
to_remove = []
for i in range(0,5):
print("smoothing round " + str(i+1))
printout_counter = 0
counter = 0
for bkbone in backbone_labels:
if bkbone not in to_remove:
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,bkbone,verts_to_Face,faces_raw,verts_raw)
#if(bkbone == "170"):
# print("70 nbrs = " + str(nbrs))
#counts up the number of shared vertices with backbone neighbors
backbone_count_flag = False
neighbor_counter = 0
total_backbone_shared_verts = 0
for n in neighbors_list:
if (n in backbone_labels) and (n not in to_remove):
neighbor_counter += 1
total_backbone_shared_verts = total_backbone_shared_verts + neighbors_shared_vert[n]
#if meets requirement of shared verts then activates flag
if (total_backbone_shared_verts > shared_vert_threshold):
backbone_count_flag = True
'''#prevent against the split heads with 2 or 3
backbone_neighbor_list = neighbors_list.copy()
backbone_neighbor_list.append(bkbone)
other_backbone_flag = 0
appendFlag = False
if(backbone_count_flag == True and neighbor_counter < 4):
#check the other neighbor and see if the only other backbone is the current label, if so then just a split head
other_backbone_flag = 0
for n in neighbors_list:
if (n in backbone_labels) and (n not in to_remove):
neighbors_list_of_n,neighbors_shared_vert_of_n,number_of_faces_of_n = find_neighbors(labels_list,n,verts_to_Face,faces_raw,verts_raw)
for nb in neighbors_list_of_n:
if (nb in backbone_labels) and (nb not in to_remove) and (nb not in backbone_neighbor_list):
backbone_neighbor_list.append(nb)
other_backbone_flag += 1
if other_backbone_flag == 0:
"""if printout_counter < 5:
#print("For backbone = " + str( bkbone))
#print("neighbors_list = " + str(neighbors_list))
#print("backbone_neighbor_list = " + str(backbone_neighbor_list))
#print("other_backbone_flag = " + str(other_backbone_flag))
appendFlag = True
#printout_counter +=1"""
if (backbone_count_flag == True and neighbor_counter < 4) and (other_backbone_flag == 0): #len(split_head_backbone_list) >= len(backbone_neighbor_list):
for bk in backbone_neighbor_list:
to_remove.append(bk)
counter += 1
#if not backbone neighbors and/or didn't have enought shared verts then not part of the backbone
else:
if neighbor_counter <= 0 or backbone_count_flag == False:
to_remove.append(bkbone)
counter += 1'''
#compute the number of shared vertices and see if fits:
if neighbor_counter <= 0 or backbone_count_flag == False:
to_remove.append(bkbone)
counter += 1
print("counter = " + str(counter))
if counter == 0:
print("counter caused the break")
break
#print("to remove = " + str(to_remove))
print("done Analyzing big and small segments")
#go through and switch the label of hte
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
print("about to rewrite the labels")
for i in range(0,len(labels_list)):
if labels_list[i] in backbone_labels and labels_list[i] not in to_remove:
labels_list[i] = "backbone"
#faces_raw[i].material_index = num_colors
print("DONE about to rewrite the labels")
return labels_list, verts_to_Face
#generates the stats: connections on who it is connected to), shared_verts (how many vertices it shares between it's neighbor), mesh_number (number of face for that label)
def export_connection(labels_list,label_name, verts_to_Face,outputFlag="False",file_name="None"):
#print("hello from export_connection with label_name = " + str(label_name) )
#find all the neighbors of the label
currentMode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.context.object
ob.update_from_editmode()
#print("object_name = " + bpy.context.object.name)
me = ob.data
faces_raw = me.polygons
verts_raw = me.vertices
#print("generating list in export connections")
#labels_list = generate_labels_list(faces_raw)
#print("done generating list in export connections")
#need to assemble a dictionary that relates vertices to faces
#*****making into a list if the speed is too slow*******#
#print("about to making verts_to_Face")
#verts_to_Face = generate_verts_to_face_dictionary(faces_raw,verts_raw)
#print("DONE about to making verts_to_Face")
total_labels_list = []
faces_checked = []
faces_to_check = [label_name]
still_checking_faces = True
connections = {}
shared_vertices = {}
mesh_number = {}
#print("about to start checking faces")
#will iterate through all of the labels with the label name until find all of the neighbors (until hitting the backbone) of the label
while still_checking_faces:
#will exit if no more faces to check
if not faces_to_check:
still_checking_faces = False
break
for facey in faces_to_check:
if facey != "backbone":
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,facey,verts_to_Face,faces_raw,verts_raw)
#reduce the shared vertices with a face and the backbone to 0 so doesn't mess up the shared vertices percentage
pairs = list(neighbors_shared_vert.items())
pre_connections = [k for k,i in pairs]
pre_shared_vertices = [i for k,i in pairs]
if ("backbone" in pre_connections):
back_index = pre_connections.index("backbone")
pre_shared_vertices[back_index] = 0
connections[facey] = pre_connections
shared_vertices[facey] = pre_shared_vertices
mesh_number[facey] = number_of_faces
for neighbors in neighbors_list:
if (neighbors != "backbone") and (neighbors not in faces_to_check) and (neighbors not in faces_checked):
faces_to_check.append(neighbors)
faces_to_check.remove(facey)
faces_checked.append(facey)
#append the backbone to the graph structure
mesh_number["backbone"] = 0
#print("faces_checked = " + str(faces_checked))
#print("DONE about to start checking faces")
#save off the file to an npz file
if(outputFlag == True):
complete_path = str("/Users/brendancelii/Google Drive/Xaq Lab/Datajoint Project/Automatic_Labelers/spine_graphs/"+file_name)
#package up the data that would go to the database and save it locally name of the file will look something like this "4_bcelii_2018-10-01_12-12-34"
# np.savez("/Users/brendancelii/Google Drive/Xaq Lab/Datajoint Project/local_neurons_saved/"+segment_ID+"_"+author+"_"+
# date_time[0:9]+"_"+date_time[11:].replace(":","-")+".npz",segment_ID=segment_ID,author=author,
# date_time=date_time,vertices=vertices,triangles=triangles,edges=edges,status=status)
np.savez(complete_path,connections=connections,shared_vertices=shared_vertices,mesh_number=mesh_number )
return connections,shared_vertices,mesh_number
def classify_spine_vp2(connections,shared_vertices,mesh_number,sdf_final_dict):
#print("inside classify_spine")
#head_threshold = 0.15
absolute_head_threshold = 30
stub_threshold = 40
path_threshold = 40
#make a new dictionary to hold the final labels of the spine
end_labels = {k:"none" for k in mesh_number.keys()}
#only one segment so label it as a spine
if len(connections.keys()) <= 1:
end_labels[list(connections.keys())[0]] = "spine_one_seg"
total_mesh_faces_outer = sum([k for i,k in mesh_number.items()])
#print("total_mesh_faces = " + str( total_mesh_faces_outer))
#create the graph from these
G=nx.Graph(connections)
endpoint_labels,shortest_paths = find_endpoints(G,mesh_number)
if endpoint_labels == []:
for jk in end_labels.keys():
end_labels[jk] = "backbone"
return end_labels
#print("endpoint_labels = "+str(endpoint_labels))
#print("shortest_paths = "+str(shortest_paths))
#make a new dictionary to hold the final labels of the spine
end_labels = {k:"none" for k in mesh_number.keys()}
end_labels["backbone"] = "backbone"
#print("end_labels at beginning")
#print(end_labels)
for endpoint in endpoint_labels:
#print("at beginning of endpoint loop with label = "+ str(endpoint))
#get the shortest path lists
endpoint_short_paths = shortest_paths[endpoint]
for path in endpoint_short_paths:
path.remove("backbone")
path_total_mesh_faces = sum([k for i,k in mesh_number.items() if i in path])
#print("path_total_mesh_faces = "+str(path_total_mesh_faces))
#print("at beginning of path loop with path = "+ str(path))
travel_index = 0
head_found = False
label_everything_above_as_head = False
while (head_found == False ) and travel_index < len(path):
current_face = path[travel_index]
sdf_guess = sdf_likely_category(current_face,travel_index,path,False,sdf_final_dict,connections,mesh_number,absolute_head_threshold)
if sdf_guess != "head" or mesh_number[current_face] < absolute_head_threshold:
#then not of any significance BUT ONLY REASSIGN IF NOT HAVE ASSIGNMENT***
if end_labels[current_face] == "none":
end_labels[current_face] = "no_significance"
travel_index = travel_index + 1
else:
#end_labels[current_face] = "head_reg" WAIT TO ASSIGN TILL LATER
if "neck" != end_labels[current_face][0:4] and "spine" != end_labels[current_face][0:5] : #if not already labeled as neck or spine
head_found = True
label_everything_above_as_head = True
else:
travel_index = travel_index + 1
#print("end of first while loop, travel_index = "+ str(travel_index) + " head_found = "+ str(head_found))
############Added new threshold that makes it so path length can't be really small
if travel_index < len(path):
travel_face = path[travel_index]
else:
travel_face = path[travel_index-1]
travel_index = travel_index-1
if (path[travel_index] == "backbone") or ("backbone" in connections[path[travel_index]]):
head_found = False
label_everything_above_as_head = True
if path_total_mesh_faces<path_threshold:
head_found = False
label_everything_above_as_head = True
####do the head splitting####
#see if there are any labels that border it that also share a high percentage of faces
if head_found == True:
##will return the names of the faces that have unusually high verts sharing
split_head_labels = get_split_heads_vp2(path[travel_index],travel_index,path,connections,shared_vertices,mesh_number,sdf_final_dict,absolute_head_threshold)
#print("split_head_labels = " + str(split_head_labels))
if len(split_head_labels) >= 2:
#print("adding the split head labels")
for split_label in split_head_labels:
#######may need to add in CHECK FOR ALREADY LABELED
if ("head" == end_labels[split_label][0:4] or end_labels[split_label] == "none"):
end_labels[split_label] = "head_split"
#else: THINK LABELING IT AS SPINE IS NOT WHAT WE WANT
# end_labels[split_label] = "spine_head_disagree_split_head"
label_everything_above_as_head = True
###if no head was found
if head_found == False:
#print("no head found so labeling as neck")
#######WILL NOT OVERWRITE UNLESS LABELED AS NO SIGNIFICANCE
for i in path:
if end_labels[i] == "no_significance" or end_labels[i] == "none" or end_labels[i][0:4] == "head":
end_labels[i] = "neck_no_head_on_path_head_false"
label_everything_above_as_head = False
#print("label_everything_above_as_head = " + str(label_everything_above_as_head))
#need to label any of those above it in the chain labeled as insignificant to heads
if label_everything_above_as_head == True and head_found == True:
if end_labels[travel_face] == "none":
#print("labeled as head reg")
end_labels[travel_face] = "head_reg"
#else: ########don't need this because don't want to overwrite already written spine neck
#if "head" not in end_labels[travel_index]:
#end_labels[travel_index] = "spine_head_disagree"
#will label everything above it as a head and then everything below it as neck
#####need to account for special case where not overwrite the head_split####
if "head" == end_labels[travel_face][0:4]:
#print('labeling all no_significance above as head hats')
for i in range(0,travel_index):
current_label = path[i]
if end_labels[current_label] == "no_significance":
end_labels[current_label] = "head_hat"
else:
if "head" != end_labels[current_label][0:4]:
end_labels[current_label] = "spine_head_disagree_above_head"
#print('labeling all below head as necks')
for i in range(travel_index+1,len(path)):
current_label = path[i]
if current_label not in split_head_labels and end_labels[current_label] != "head_split":
end_labels[current_label] = "neck_under_head"
else: ###not sure when this will be activated but maybe?
#print("head not present so labeling everything above as neck_hat")
for i in range(0,travel_index):
current_label = path[i]
#####need to account for special case where not overwrite the head_split####
if end_labels[current_label] == "no_significance":
end_labels[current_label] == "neck_hats_no_head"
#print("at end of one cycle of big loop")
#print("end_labels = " + str(end_labels))
#what about a head being accidentally written under another head?
#####you should not write a head to a spine that has already been labeled as under a head
#####you should overwrite all labels under a head as spine_under_head
#print("outside of big loop")
#print("end_labels = " + str(end_labels))
#if no heads present at all label as spines
spine_flag_no_head = False
for face,label in end_labels.items():
if "head" == label[0:4]:
spine_flag_no_head = True
if spine_flag_no_head == False:
#print("no face detected in all of spine")
for label_name in end_labels.keys():
end_labels[label_name] = "spine_no_head_at_all"
###### TO DO: can put in a piece of logic that seekss and labels the ones we know are necks for sure based on width
#once done all of the paths go through and label things as stubs
if total_mesh_faces_outer < stub_threshold:
#print("stub threshold triggered")
for label_name in end_labels.keys():
if "head" == end_labels[label_name][0:4]:
end_labels[label_name] = "stub_head"
elif "neck" == end_labels[label_name][0:4]:
end_labels[label_name] = "stub_neck"
else:
end_labels[label_name] = "stub_spine"
end_labels["backbone"] = "backbone"
###To Do: replace where look only in 1st four indexes
return end_labels
def relabel_segments(labels_list,current_label,new_label):
for i,x in enumerate(labels_list):
if x == current_label:
labels_list[i] = new_label
return labels_list
def generate_verts_to_face_dictionary(faces_raw,verts_raw):
verts_to_Face = {}
#initialize the lookup dictionary as empty lists
for pre_vertex in verts_raw:
verts_to_Face[pre_vertex.index] = []
#print(len(verts_raw))
#print(len(verts_to_Face))
#print(verts_to_Face[1])
for face in faces_raw:
#get the vertices
verts = face.vertices
#add the index to the list for each of the vertices
for vertex in verts:
verts_to_Face[vertex].append(face.index)
return verts_to_Face
def automatic_spine_classification_vp3(labels_list,verts_to_Face,sdf_final_dict):
#process of labeling
"""1) Get a list of all of the labels
2) Iterate through the labels and for each:
a. Get the connections, verts_shared and mesh_sizes for all labels connected to said label
b. Run the automatic spine classification to get the categories for each label
c. Create a new list that stores the categories for each label processed
d. repeat until all labels have been processed
3) Delete all the old colors and then setup the global colors with the regular labels
4) Change the material index for all labels based on the categorical classification"""
currentMode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.context.object
ob.update_from_editmode()
#print("object_name = " + bpy.context.object.name)
me = ob.data
faces_raw = me.polygons
verts_raw = me.vertices
#labels_list = generate_labels_list(faces_raw)
final_spine_labels = labels_list.copy()
processed_labels = []
myCounter = Counter(labels_list)
complete_labels = [label for label,times in myCounter.items()]
head_counter = 0
spine_counter = 0
neck_counter = 0
stub_counter = 0
for i in range(0,len(complete_labels)):
if complete_labels[i] != "backbone" and complete_labels[i] not in processed_labels:
#print("at beginning of spine labeling loop: about to enter export connection")
#get the conenections, shared vertices and mesh sizes for the whole spine segment in which label is connected to
connections,shared_vertices,mesh_number = export_connection(labels_list,complete_labels[i], verts_to_Face,outputFlag="False",file_name="None")
#print("about to send to classify spine")
#send that graph data to the spine classifier to get labels for that
final_labels = classify_spine_vp2(connections,shared_vertices,mesh_number,sdf_final_dict)
#print("done classify spines")
head_Flag = False
spine_Flag = False
stub_Flag = False
neck_Flag = False
#relabel the list accordingly
############could speed this up where they return the number of types of labels instead of having to search for them############
#print("about to find number of heads/spines/stubs/necks PLUS RELABEL AND append them to list")
for key,value in final_labels.items():
if value[0:4] == "head":
head_Flag = True
if value[0:4] == "spin":
spine_Flag = True
if value[0:4] == "stub":
stub_Flag = True
if value[0:4] == "neck":
neck_Flag = True
relabel_segments(final_spine_labels,key,value)
#add them to the list of processed labels
processed_labels.append(key)
#print("about to find number of heads/spines/stubs/necks PLUS RELABEL AND append them to list")
if head_Flag == True:
head_counter += 1
if spine_Flag == True:
spine_counter += 1
if stub_Flag == True:
stub_counter += 1
if neck_Flag == True:
neck_counter += 1
#get the indexes for the labeling from the datajoint table
label_data = ta3.LabelKey().fetch("numeric","description")
#print(label_data)
label_names = label_data[1].tolist()
label_indexes = label_data[0].tolist()
#print(label_names)
spine_head_index = label_indexes[label_names.index("Spine Head")]
spine_neck_index = label_indexes[label_names.index("Spine Neck")]
spine_reg_index = label_indexes[label_names.index("Spine")]
final_faces_labels_list = np.zeros(len(faces_raw))
final_verts_labels_list = np.zeros(len(verts_raw))
#assign the colors to the faces:
for i,fi in enumerate(final_spine_labels):
if fi[0:4] == "head":
#fac.material_index = 2
final_faces_labels_list[i] = spine_head_index
elif fi[0:4] == "neck":
#fac.material_index = 3
final_faces_labels_list[i] = spine_neck_index
elif fi[0:4] == "spin":
#fac.material_index = 4
final_faces_labels_list[i] = spine_reg_index
else:
#fac.material_index = 0
final_faces_labels_list[i] = 0
#assign the vertices an index
for vert in faces_raw[i].vertices:
if final_verts_labels_list[vert] == 0:
final_verts_labels_list[vert] = final_faces_labels_list[i]
#create the list of labels for the vertices
#print("DONE about to color heads")
return head_counter,neck_counter, spine_counter, stub_counter, final_verts_labels_list, final_faces_labels_list
####For automatic spine labeling
def find_endpoints(G,mesh_number):
#will first calculate all the shortest paths for each of the nodes
node_list = list(G.nodes)
if("backbone" in node_list):
node_list.remove("backbone")
else:
return [],[]
shortest_paths = {}
for node in node_list:
shortest_paths[node] = [k for k in nx.all_shortest_paths(G,node,"backbone")]
endpoints = []
#identify the nodes that are not a subset of other nodes
for node in node_list:
other_nodes = [k for k in node_list if k != node ]
not_unique = 0
for path in shortest_paths[node]:
not_unique_Flag = False
for o_node in other_nodes:
for o_shortest_path in shortest_paths[o_node]:
if set(path) <= set(o_shortest_path):
not_unique_Flag = True
if not_unique_Flag == True:
not_unique = not_unique + 1
#decide if unique endpoint
if not_unique < len(shortest_paths[node]): # this means there is a unique path
#if not_unique != 0:
#print(node + "-some unique and some non-unique paths for endpoint")
endpoints.append(node)
##print(endpoints)
longest_paths_list = []
for end_node in endpoints:
longest_path = 0
for path in shortest_paths[end_node]:
path_length = 0
for point in path:
path_length = path_length + mesh_number[point]
if path_length > longest_path:
longest_path = path_length
longest_paths_list.append((end_node,longest_path))
#print(longest_paths_list)
longest_paths_list.sort(key=lambda pair: pair[1], reverse=True)
#print(longest_paths_list)
ranked_endpoints = [x for x,i in longest_paths_list]
endpoint_paths_lengths = [i for x,i in longest_paths_list]
enpoint_path_list = {}
for endpt in ranked_endpoints:
enpoint_path_list[endpt] = shortest_paths[endpt]
#ranked_endpoints, longest_paths_list = (list(t) for t in zip(*sorted(zip(endpoints, longest_paths_list))))
return ranked_endpoints, enpoint_path_list
def sdf_likely_category(current_label,current_index,path,head_flag,sdf_final_dict,connections,mesh_number,absolute_head_threshold):
#width thresholding constants
width_thresholds = {"base":0.04, "item_top_threshold":1.5}
#if size is smaller than the max threshold for a head then return neck
if mesh_number[current_label] < absolute_head_threshold:
return "neck"
#get the mean, max, and median
median_width = sdf_final_dict[current_label]
#if the median is above a certain size and the total number of traingles is above a threshold then return as head
"""sdf_head_threshold = 50
over_median_threshold = 0.12
if label_mesh_number > sdf_head_threshold and median > over_median_threshold:
return "head"
"""
neck_near_base_threshold = 0.16
close_neck_call_threshold = 0.09
#common characteristics of neck:
#1) median width Less than neck_cuttoff_threshold
#2) if larger item on top and that item is not a head
#3) if larger item on top with more then 50% heads but less width
#4) connected to backbone
#1) median width Less than neck_cuttoff_threshold, return as neck
if median_width < width_thresholds["base"]:
return "neck"
#2) if larger item on top and that item is not a head or #3) if larger item on top with more then 50% heads but less width
#width_on_top = []
#face_number_on_top = []
for i in range(0,current_index):
face_number_on_top = mesh_number[path[i]]
width_on_top = sdf_final_dict[path[i]]
if face_number_on_top > mesh_number[current_label]:
if head_flag == False:
return "neck"
if median_width > width_thresholds["item_top_threshold"]*width_on_top:
return "neck"
#4) connected to backbone
if "backbone" in connections[current_label]:
return "neck"
######check for head based on if there is significantly smaller neck underneath it (because can be very close to 0.04 cuttoff sometimes
#get the mean, median and max
#will return head or neck
return "head"
def get_split_heads_vp2(current_label,current_index, path,connections,shared_vertices,mesh_number,sdf_final_dict,absolute_head_threshold):
final_split_heads = [current_label]
split_head_threshold = 0.35
#underneath_threshold = 0.20
#the only solid number threshold
split_head_absolute_threshold = 8
heads_to_check = True
while heads_to_check:
#1) go to the next label below it
if(current_index < (len(path)-1)):
next_index = current_index + 1
next_label = path[next_index]
if(next_label == "backbone"):
#no_more_split_head_Flag = True
break
#ask if this next satisfies 1) enough shared verts? 2) SDF head possible?
verts_sharing_index = connections[current_label].index(next_label)
verts_sharing = shared_vertices[current_label][verts_sharing_index]
#print("split share for faces " + str(current_label) + " " +str(next_label) + "="+str(verts_sharing/mesh_number[current_label]))
sdf_guess = sdf_likely_category(next_label,next_index,path,True,sdf_final_dict,connections,mesh_number,absolute_head_threshold)
if verts_sharing/mesh_number[current_label] > split_head_threshold and sdf_guess == "head" and mesh_number[next_label] > split_head_absolute_threshold:
#add next label to the list
final_split_heads.append(next_label)
current_index = next_index
current_label = next_label
else:
heads_to_check = False
return final_split_heads
##Functins from the auto_spine_labeler
def smooth_backbone_vp5(labels_list,sdf_final_dict,backbone_width_threshold = 0.35,max_backbone_threshold = 400,backbone_threshold=300,secondary_threshold=100,shared_vert_threshold=25,backbone_neighbor_min=20,number_Flag = False, seg_numbers=1,smooth_Flag=True):
print("at beginning of smooth backbone vp4")
#things that could hint to backbone
#1) larger size
#2) touching 2 or more larger size
#have to go into object mode to do some editing
currentMode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.context.object
ob.update_from_editmode()
#print("object_name = " + bpy.context.object.name)
me = ob.data
#print("about to get faces_verts raw")
faces_raw = me.polygons
verts_raw = me.vertices
#print("DONE about to get faces_verts raw")
#print("don't need to generate labels_list anymore")
#print("about to generate labels_list") ####!!!! This takes a good bit of time#####
#labels_list = generate_labels_list(faces_raw)
#print("DONE about to generate labels_list")
#need to assemble a dictionary that relates vertices to faces
#*****making into a list if the speed is too slow*******#
#print("about to generate verts_to_Face")
verts_to_Face = generate_verts_to_face_dictionary(faces_raw,verts_raw)
#print("DONE about to generate verts_to_Face")
#add new color and reassign all of the labels with those colors as the backbone label
#create a list of all the labels and which ones are the biggest ones
from collections import Counter
myCounter = Counter(labels_list)
spine_labels = []
backbone_labels = []
#print(" about to get counter list")
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
for label,times in myCounter.items():
if(times >= max_backbone_threshold):
#print(str(label) + ":" + str(times))
backbone_labels.append(label)
for label in myCounter.keys():
if( sdf_final_dict[label] >= backbone_width_threshold):
#print(str(label) + ":" + str(times))
if(myCounter[label] > backbone_threshold) and (label not in backbone_labels):
backbone_labels.append(label)
#print(" DONE about to get counter list")
"""for lb in sdf_final_dict:
if( sdf_final_dict[lb] >= backbone_width_threshold):
backbone_labels.append(lb) """
#print("backbone_labels = " + str(backbone_labels))
#print("hello")
#need ot get rid of labels that don't border other backbone_labels
to_remove = []
backbone_neighbors_dict = {}
for i in range(0,5):
print("smoothing round " + str(i+1))
printout_counter = 0
counter = 0
for bkbone in backbone_labels:
if bkbone not in to_remove:
#neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,bkbone,verts_to_Face,faces_raw,verts_raw)
if bkbone not in backbone_neighbors_dict.keys():
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,bkbone,verts_to_Face,faces_raw,verts_raw)
backbone_neighbors_dict[bkbone] = dict(neighbors_list=neighbors_list,neighbors_shared_vert=neighbors_shared_vert,
number_of_faces=number_of_faces)
else:
neighbors_list = backbone_neighbors_dict[bkbone]["neighbors_list"]
neighbors_shared_vert = backbone_neighbors_dict[bkbone]["neighbors_shared_vert"]
number_of_faces = backbone_neighbors_dict[bkbone]["number_of_faces"]
#if(bkbone == "170"):
# print("70 nbrs = " + str(nbrs))
#counts up the number of shared vertices with backbone neighbors
backbone_count_flag = False
neighbor_counter = 0
total_backbone_shared_verts = 0
for n in neighbors_list:
if (n in backbone_labels) and (n not in to_remove):
neighbor_counter += 1
total_backbone_shared_verts = total_backbone_shared_verts + neighbors_shared_vert[n]
#if meets requirement of shared verts then activates flag #not doing shared verts as a criteria
if (total_backbone_shared_verts > shared_vert_threshold):
backbone_count_flag = True
'''#prevent against the split heads with 2 or 3
backbone_neighbor_list = neighbors_list.copy()
backbone_neighbor_list.append(bkbone)
other_backbone_flag = 0
appendFlag = False
if(backbone_count_flag == True and neighbor_counter < 4):
#check the other neighbor and see if the only other backbone is the current label, if so then just a split head
other_backbone_flag = 0
for n in neighbors_list:
if (n in backbone_labels) and (n not in to_remove):
neighbors_list_of_n,neighbors_shared_vert_of_n,number_of_faces_of_n = find_neighbors(labels_list,n,verts_to_Face,faces_raw,verts_raw)
for nb in neighbors_list_of_n:
if (nb in backbone_labels) and (nb not in to_remove) and (nb not in backbone_neighbor_list):
backbone_neighbor_list.append(nb)
other_backbone_flag += 1
if other_backbone_flag == 0:
"""if printout_counter < 5:
#print("For backbone = " + str( bkbone))
#print("neighbors_list = " + str(neighbors_list))
#print("backbone_neighbor_list = " + str(backbone_neighbor_list))
#print("other_backbone_flag = " + str(other_backbone_flag))
appendFlag = True
#printout_counter +=1"""
if (backbone_count_flag == True and neighbor_counter < 4) and (other_backbone_flag == 0): #len(split_head_backbone_list) >= len(backbone_neighbor_list):
for bk in backbone_neighbor_list:
to_remove.append(bk)
counter += 1
#if not backbone neighbors and/or didn't have enought shared verts then not part of the backbone
else:
if neighbor_counter <= 0 or backbone_count_flag == False:
to_remove.append(bkbone)
counter += 1'''
#compute the number of shared vertices and see if fits:
if neighbor_counter <= 0 or backbone_count_flag == False:
to_remove.append(bkbone)
counter += 1
print("counter = " + str(counter))
if counter <= 3:
print("counter caused the break")
break
#now go through and make sure no unconnected backbone segments
"""Pseudo-code for filtering algorithm
1) iterate through all of the backbone labels
2) Go get the neighbors of the backbone
3) Add all of the neighbors who are too part of the backbone to the backbones to check list
4) While backbone neighbor counter is less than the threshold or until list to check is empty
5) Pop the next neighbor off the list and add it to the neighbors check list
6) Get the neighbors of this guy
7) for each of neighbors that is also on the backbone BUT HASN'T BEEN CHECKED YET append them to the list to be check and update counter
8) continue at beginning of loop
-- once loop breaks
9) if the counter is below the threshold:
Add all of values in the neighbros already checked list to the new_to_remove
10) Use the new_backbone_labels and new_to_remove to rewrite the labels_list
"""
new_backbone_labels = [bkbone for bkbone in backbone_labels if bkbone not in to_remove]
new_to_remove = []
for bkbonz in new_backbone_labels:
checked_backbone_neighbors = []
backbone_neighbors_to_check = []
new_backbone_neighbor_counter = 0
shared_vert_threshold = 5
if bkbonz not in backbone_neighbors_dict.keys():
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,bkbonz,verts_to_Face,faces_raw,verts_raw)
backbone_neighbors_dict[bkbonz] = dict(neighbors_list=neighbors_list,neighbors_shared_vert=neighbors_shared_vert,
number_of_faces=number_of_faces)
else:
neighbors_list = backbone_neighbors_dict[bkbonz]["neighbors_list"]
neighbors_shared_vert = backbone_neighbors_dict[bkbonz]["neighbors_shared_vert"]
number_of_faces = backbone_neighbors_dict[bkbonz]["number_of_faces"]
for bb in neighbors_list:
if (bb in new_backbone_labels) and (bb not in checked_backbone_neighbors) and (bb not in new_to_remove) and neighbors_shared_vert[bb] > shared_vert_threshold:
backbone_neighbors_to_check.append(bb)
new_backbone_neighbor_counter += 1
#backbone_neighbors_to_check = [nb for nb in neighbors_list if nb in new_backbone_labels]
checked_backbone_neighbors = [nb for nb in backbone_neighbors_to_check]
#new_backbone_neighbor_counter = len(backbone_neighbors_to_check)
#checked_backbone_neighbors = []
#4) While backbone neighbor counter is less than the threshold or until list to check is empty
while new_backbone_neighbor_counter < backbone_neighbor_min and backbone_neighbors_to_check != []:
#5) Pop the next neighbor off the list and add it to the neighbors check list
current_backbone = backbone_neighbors_to_check.pop(0)
if current_backbone not in checked_backbone_neighbors:
checked_backbone_neighbors.append(current_backbone)
#6) Get the neighbors of this guy
if current_backbone not in backbone_neighbors_dict.keys():
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,current_backbone,verts_to_Face,faces_raw,verts_raw)
backbone_neighbors_dict[current_backbone] = dict(neighbors_list=neighbors_list,neighbors_shared_vert=neighbors_shared_vert,
number_of_faces=number_of_faces)
else:
neighbors_list = backbone_neighbors_dict[current_backbone]["neighbors_list"]
neighbors_shared_vert = backbone_neighbors_dict[current_backbone]["neighbors_shared_vert"]
number_of_faces = backbone_neighbors_dict[current_backbone]["number_of_faces"]
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,current_backbone,verts_to_Face,faces_raw,verts_raw)
#7) for each of neighbors that is also on the backbone BUT HASN'T BEEN CHECKED YET append them to the list to be check and update counter
for bb in neighbors_list:
if (bb in new_backbone_labels) and (bb not in checked_backbone_neighbors) and (bb not in new_to_remove) and neighbors_shared_vert[bb] > shared_vert_threshold:
backbone_neighbors_to_check.append(bb)
new_backbone_neighbor_counter += 1
#9) if the counter is below the threshold --> Add all of values in the neighbros already checked list to the new_to_remove
if new_backbone_neighbor_counter < backbone_neighbor_min:
for bz in checked_backbone_neighbors:
if bz not in new_to_remove:
new_to_remove.append(bz)
#print("to remove = " + str(to_remove))
print("done Analyzing big and small segments")
#go through and switch the label of hte
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
print("about to rewrite the labels")
for i in range(0,len(labels_list)):
if labels_list[i] in new_backbone_labels and labels_list[i] not in new_to_remove:
labels_list[i] = "backbone"
#faces_raw[i].material_index = num_colors
print("DONE about to rewrite the labels")
return labels_list, verts_to_Face
##Functins from the auto_spine_labeler
def smooth_backbone_vp4(labels_list,sdf_final_dict,backbone_width_threshold = 0.35,max_backbone_threshold = 400,backbone_threshold=300,secondary_threshold=100,shared_vert_threshold=25,backbone_neighbor_min=10,number_Flag = False, seg_numbers=1,smooth_Flag=True):
print("at beginning of smooth backbone vp4")
#things that could hint to backbone
#1) larger size
#2) touching 2 or more larger size
#have to go into object mode to do some editing
currentMode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.context.object
ob.update_from_editmode()
#print("object_name = " + bpy.context.object.name)
me = ob.data
#print("about to get faces_verts raw")
faces_raw = me.polygons
verts_raw = me.vertices
#print("DONE about to get faces_verts raw")
#print("don't need to generate labels_list anymore")
#print("about to generate labels_list") ####!!!! This takes a good bit of time#####
#labels_list = generate_labels_list(faces_raw)
#print("DONE about to generate labels_list")
#need to assemble a dictionary that relates vertices to faces
#*****making into a list if the speed is too slow*******#
#print("about to generate verts_to_Face")
verts_to_Face = generate_verts_to_face_dictionary(faces_raw,verts_raw)
#print("DONE about to generate verts_to_Face")
#add new color and reassign all of the labels with those colors as the backbone label
#create a list of all the labels and which ones are the biggest ones
from collections import Counter
myCounter = Counter(labels_list)
spine_labels = []
backbone_labels = []
#print(" about to get counter list")
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
for label,times in myCounter.items():
if(times >= max_backbone_threshold):
#print(str(label) + ":" + str(times))
backbone_labels.append(label)
for label in myCounter.keys():
if( sdf_final_dict[label] >= backbone_width_threshold):
#print(str(label) + ":" + str(times))
if(myCounter[label] > backbone_threshold) and (label not in backbone_labels):
backbone_labels.append(label)
#print(" DONE about to get counter list")
"""for lb in sdf_final_dict:
if( sdf_final_dict[lb] >= backbone_width_threshold):
backbone_labels.append(lb) """
#print("backbone_labels = " + str(backbone_labels))
#print("hello")
#need ot get rid of labels that don't border other backbone_labels
to_remove = []
backbone_neighbors_dict = {}
for i in range(0,5):
print("smoothing round " + str(i+1))
printout_counter = 0
counter = 0
for bkbone in backbone_labels:
if bkbone not in to_remove:
if bkbone not in backbone_neighbors_dict.keys():
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,bkbone,verts_to_Face,faces_raw,verts_raw)
backbone_neighbors_dict[bkbone] = dict(neighbors_list=neighbors_list,neighbors_shared_vert=neighbors_shared_vert,
number_of_faces=number_of_faces)
else:
neighbors_list = backbone_neighbors_dict[bkbone]["neighbors_list"]
neighbors_shared_vert = backbone_neighbors_dict[bkbone]["neighbors_shared_vert"]
number_of_faces = backbone_neighbors_dict[bkbone]["number_of_faces"]
#if(bkbone == "170"):
# print("70 nbrs = " + str(nbrs))
#counts up the number of shared vertices with backbone neighbors
backbone_count_flag = False
neighbor_counter = 0
total_backbone_shared_verts = 0
for n in neighbors_list:
if (n in backbone_labels) and (n not in to_remove):
neighbor_counter += 1
total_backbone_shared_verts = total_backbone_shared_verts + neighbors_shared_vert[n]
#if meets requirement of shared verts then activates flag #not doing shared verts as a criteria
if (total_backbone_shared_verts > shared_vert_threshold):
backbone_count_flag = True
'''#prevent against the split heads with 2 or 3
backbone_neighbor_list = neighbors_list.copy()
backbone_neighbor_list.append(bkbone)
other_backbone_flag = 0
appendFlag = False
if(backbone_count_flag == True and neighbor_counter < 4):
#check the other neighbor and see if the only other backbone is the current label, if so then just a split head
other_backbone_flag = 0
for n in neighbors_list:
if (n in backbone_labels) and (n not in to_remove):
neighbors_list_of_n,neighbors_shared_vert_of_n,number_of_faces_of_n = find_neighbors(labels_list,n,verts_to_Face,faces_raw,verts_raw)
for nb in neighbors_list_of_n:
if (nb in backbone_labels) and (nb not in to_remove) and (nb not in backbone_neighbor_list):
backbone_neighbor_list.append(nb)
other_backbone_flag += 1
if other_backbone_flag == 0:
"""if printout_counter < 5:
#print("For backbone = " + str( bkbone))
#print("neighbors_list = " + str(neighbors_list))
#print("backbone_neighbor_list = " + str(backbone_neighbor_list))
#print("other_backbone_flag = " + str(other_backbone_flag))
appendFlag = True
#printout_counter +=1"""
if (backbone_count_flag == True and neighbor_counter < 4) and (other_backbone_flag == 0): #len(split_head_backbone_list) >= len(backbone_neighbor_list):
for bk in backbone_neighbor_list:
to_remove.append(bk)
counter += 1
#if not backbone neighbors and/or didn't have enought shared verts then not part of the backbone
else:
if neighbor_counter <= 0 or backbone_count_flag == False:
to_remove.append(bkbone)
counter += 1'''
#compute the number of shared vertices and see if fits:
if neighbor_counter <= 0 or backbone_count_flag == False:
to_remove.append(bkbone)
counter += 1
print("counter = " + str(counter))
if counter <= 3:
print("counter caused the break")
break
#now go through and make sure no unconnected backbone segments
"""Pseudo-code for filtering algorithm
1) iterate through all of the backbone labels
2) Go get the neighbors of the backbone
3) Add all of the neighbors who are too part of the backbone to the backbones to check list
4) While backbone neighbor counter is less than the threshold or until list to check is empty
5) Pop the next neighbor off the list and add it to the neighbors check list
6) Get the neighbors of this guy
7) for each of neighbors that is also on the backbone BUT HASN'T BEEN CHECKED YET append them to the list to be check and update counter
8) continue at beginning of loop
-- once loop breaks
9) if the counter is below the threshold:
Add all of values in the neighbros already checked list to the new_to_remove
10) Use the new_backbone_labels and new_to_remove to rewrite the labels_list
"""
print("just broke out of the loop")
new_backbone_labels = [bkbone for bkbone in backbone_labels if bkbone not in to_remove]
new_to_remove = []
skip_labels = []
print("new_backbone_labels lenght = " + str(len(new_backbone_labels)))
for bkbonz in new_backbone_labels:
if bkbonz not in skip_labels:
print("working on backbone = " + str(bkbonz))
checked_backbone_neighbors = []
backbone_neighbors_to_check = []
new_backbone_neighbor_counter = 0
shared_vert_threshold = 5
if bkbonz not in backbone_neighbors_dict.keys():
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,bkbonz,verts_to_Face,faces_raw,verts_raw)
backbone_neighbors_dict[bkbonz] = dict(neighbors_list=neighbors_list,neighbors_shared_vert=neighbors_shared_vert,
number_of_faces=number_of_faces)
else:
neighbors_list = backbone_neighbors_dict[bkbonz]["neighbors_list"]
neighbors_shared_vert = backbone_neighbors_dict[bkbonz]["neighbors_shared_vert"]
number_of_faces = backbone_neighbors_dict[bkbonz]["number_of_faces"]
for bb in neighbors_list:
if (bb in new_backbone_labels) and (bb not in checked_backbone_neighbors) and (bb not in new_to_remove) and neighbors_shared_vert[bb] > shared_vert_threshold:
backbone_neighbors_to_check.append(bb)
new_backbone_neighbor_counter += 1
#backbone_neighbors_to_check = [nb for nb in neighbors_list if nb in new_backbone_labels]
checked_backbone_neighbors = [nb for nb in backbone_neighbors_to_check]
#new_backbone_neighbor_counter = len(backbone_neighbors_to_check)
#checked_backbone_neighbors = []
#4) While backbone neighbor counter is less than the threshold or until list to check is empty
while new_backbone_neighbor_counter < backbone_neighbor_min and backbone_neighbors_to_check != []:
#5) Pop the next neighbor off the list and add it to the neighbors check list
current_backbone = backbone_neighbors_to_check.pop(0)
if current_backbone not in checked_backbone_neighbors:
checked_backbone_neighbors.append(current_backbone)
#6) Get the neighbors of this guy
if current_backbone not in backbone_neighbors_dict.keys():
neighbors_list,neighbors_shared_vert,number_of_faces = find_neighbors(labels_list,current_backbone,verts_to_Face,faces_raw,verts_raw)
backbone_neighbors_dict[current_backbone] = dict(neighbors_list=neighbors_list,neighbors_shared_vert=neighbors_shared_vert,
number_of_faces=number_of_faces)
else:
neighbors_list = backbone_neighbors_dict[current_backbone]["neighbors_list"]
neighbors_shared_vert = backbone_neighbors_dict[current_backbone]["neighbors_shared_vert"]
number_of_faces = backbone_neighbors_dict[current_backbone]["number_of_faces"]
#7) for each of neighbors that is also on the backbone BUT HASN'T BEEN CHECKED YET append them to the list to be check and update counter
for bb in neighbors_list:
if (bb in new_backbone_labels) and (bb not in checked_backbone_neighbors) and (bb not in new_to_remove) and neighbors_shared_vert[bb] > shared_vert_threshold:
backbone_neighbors_to_check.append(bb)
new_backbone_neighbor_counter += 1
#9) if the counter is below the threshold --> Add all of values in the neighbros already checked list to the new_to_remove
if new_backbone_neighbor_counter < backbone_neighbor_min:
for bz in checked_backbone_neighbors:
if bz not in new_to_remove:
new_to_remove.append(bz)
print("removed " + str(checked_backbone_neighbors))
else:
skip_labels = skip_labels + checked_backbone_neighbors
#print("to remove = " + str(to_remove))
print("done Analyzing big and small segments")
#go through and switch the label of hte
#may not want to relabel until the end in order to preserve the labels in case label a big one wrong
print("about to rewrite the labels")
for i in range(0,len(labels_list)):
if labels_list[i] in new_backbone_labels and labels_list[i] not in new_to_remove:
labels_list[i] = "backbone"
#faces_raw[i].material_index = num_colors
print("DONE about to rewrite the labels")
return labels_list, verts_to_Face
import csv
from collections import Counter
import time
#Unused function that was previously used to distribute the computational work
#but now is already accounted for by the populate method in the computed datajoint
# Create a function called "chunks" with two arguments, l and n:
"""def get_neurons_assignment(parts,index):
#get the list of neurons from datajoint
l = list(set(ta3.Compartment.Component().fetch("segment_id")))
print("len(l) = " + str(len(l)))
print(l)
# For item i in a range that is a length of l,
n = int(len(l)/parts)
if len(l)/parts > n:
n = n + 1
print("n = "+str(n))
storage = []
for i in range(0, len(l), n):
# Create an index range for l of n items:
#print("l[i:i+n] = " + str(l[i:i+n]))
storage.append( l[i:i+n] )
#print(storage)
return(storage[index])"""
ta3 = dj.create_virtual_module('ta3', 'microns_ta3')
ta3p100 = dj.create_virtual_module('ta3p100', 'microns_ta3p100')
schema = dj.schema('microns_ta3p100')
@schema
class ComponentLabelFinal(dj.Computed):
definition = """
# creates the labels for the mesh table
-> ta3p100.ComponentAutoSegmentFinal
time_updated :timestamp # the time at which the component labels were updated
---
n_vertices :int unsigned #number of vertices in component
n_triangles :int unsigned #number of faces in component
labeled_vertices :longblob #indicate which vertices are spine,spine_head,spine_neck otherwise 0
labeled_triangles :longblob #indicate which faces are spine,spine_head,spine_neck otherwise 0
n_heads :int unsigned #totals the number of heads after classification, helps for optimization
used_version :tinyint #whether this component is used in the final labels or not, 0 no, 1 yes
"""
#key_source = ta3.ComponentAutoSegment #& 'n_triangle_indices>100' & [dict(compartment_type=comp) for comp in ['Basal', 'Apical', 'Oblique', 'Dendrite']]
def make(self, key):
original_start_time = time.time()
start_time = time.time()
#neuron_ID = 579228
#compartment_type = "Basal"
#component_index = 2
#clusters = 12
#smoothness = 0.04
#Apical_Basal_Oblique_default = [12,16]
#basal_big = [16,18]
neuron_ID = str(key["segment_id"])
#component = (ta3.Compartment.Component & key).fetch1()
component_index = key["component_index"]
compartment_type = key["compartment_type"]
#print("component_size = " + str(component_size))
"""if (compartment_type == "Basal") & (component_size > 160000):
cluster_list = basal_big
else:
cluster_list = Apical_Basal_Oblique_default"""
#for clusters in cluster_list:
print("starting on cluster took--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
print(str(key["segment_id"]) + " type:" + str(key["compartment_type"])
+ " index:" + str(key["component_index"]) + " cluster:" + str(key["clusters"])
+ " smoothness:" + str(key["smoothness"]))
for obj in bpy.data.objects:
if "neuron" in obj.name:
obj.select = True
ob_name = load_Neuron_automatic_spine(key)
object_counter = 0
for obj in bpy.data.objects:
if "neuron" in obj.name:
object_counter += 1
if object_counter>1:
raise ValueError("THE NUMBER OF OBJECTS ARE MORE THAN 1")
print("loading object and box--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
#what I will need to get from datajoint acces 1) sdf_final_dict 2) labels_list, might need to make the object active
sdf_final_dict, labels_list = get_cgal_data_and_label(key,ob_name)
if(sdf_final_dict == [] and labels_list == []):
print("NO CGAL DATA FOR " + str(neuron_ID))
# deselect all
bpy.ops.object.select_all(action='DESELECT')
# selection
#for ob in bpy.data.objects
#bpy.data.objects[ob_name].select = True
for obj in bpy.data.objects:
if "neuron" in obj.name:
obj.select = True
# remove it
bpy.ops.object.delete()
##########should this be a return??#########
return
print("getting cgal data--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
#complete_path = "/Users/brendancelii/Google Drive/Xaq Lab/Final_Blender/saved_sdf/sdf_saved_off.npz"
#np.savez(complete_path,labels_list=labels_list,sdf_final_dict=sdf_final_dict)
max_backbone_threshold = 200 #the absolute size if it is greater than this then labeled as a possible backbone
backbone_threshold=40 #if the label meets the width requirements, these are the size requirements as well in order to be considered possible backbone
secondary_threshold=20
shared_vert_threshold=20
backbone_width_threshold = 0.10 #the median sdf/width value the segment has to have in order to be considered a possible backbone
#labels_list,verts_to_Face = smooth_backbone_vp3(labels_list,sdf_final_dict,backbone_width_threshold,max_backbone_threshold = max_backbone_threshold,backbone_threshold=backbone_threshold
# ,secondary_threshold=secondary_threshold,shared_vert_threshold=shared_vert_threshold,number_Flag = False, seg_numbers=1,smooth_Flag=True)
backbone_neighbor_min=20
labels_list,verts_to_Face = smooth_backbone_vp4(labels_list,sdf_final_dict,backbone_width_threshold,max_backbone_threshold = max_backbone_threshold,backbone_threshold=backbone_threshold
,secondary_threshold=secondary_threshold,shared_vert_threshold=shared_vert_threshold,backbone_neighbor_min=backbone_neighbor_min,
number_Flag = False, seg_numbers=1,smooth_Flag=True)
print("smoothing backbone--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
#save off the sdf value for testing:
#save off the faces_raw as an npz file
#complete_path = "/Users/brendancelii/Google Drive/Xaq Lab/Final_Blender/saved_sdf/sdf_saved_off.npz"
#np.savez(complete_path,labels_list=labels_list,sdf_final_dict=sdf_final_dict)
object_counter = 0
for obj in bpy.data.objects:
if "neuron" in obj.name:
object_counter += 1
if object_counter>1:
raise ValueError("THE NUMBER OF OBJECTS ARE MORE THAN 1")
head_counter,neck_counter, spine_counter, stub_counter,final_verts_labels_list, final_faces_labels_list = automatic_spine_classification_vp3(labels_list,verts_to_Face,sdf_final_dict)
print("classifying spine--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
print("head_counter = " + str(head_counter))
print("neck_counter = " + str(neck_counter))
print("spine_counter = " + str(spine_counter))
print("stub_counter = " + str(stub_counter))
#now send out the labels to the table
#now write them to the datajoint table
comp_dict = dict(key,
time_updated = str(datetime.datetime.now())[0:19],
n_vertices = len(final_verts_labels_list),
n_triangles = len(final_faces_labels_list),
labeled_vertices = final_verts_labels_list,
labeled_triangles = final_faces_labels_list,
n_heads = head_counter,
used_version = 1)
self.insert1(comp_dict)
print("writing label data to datajoint--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
#delete the object after this
#delete the object
# deselect all
bpy.ops.object.select_all(action='DESELECT')
# selection
#for ob in bpy.data.objects
#bpy.data.objects[ob_name].select = True
for obj in bpy.data.objects:
if "neuron" in obj.name:
obj.select = True
# remove it
bpy.ops.object.delete()
print("deleting object--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
# deselect all
bpy.ops.object.select_all(action='DESELECT')
# selection
#for ob in bpy.data.objects
#bpy.data.objects[ob_name].select = True
object_counter = 0
for obj in bpy.data.objects:
if "neuron" in obj.name:
object_counter += 1
if object_counter>1:
raise ValueError("THE NUMBER OF OBJECTS ARE MORE THAN 1")
print("finished")
print("--- %s seconds ---" % (time.time() - original_start_time))
populate_start = time.time()
ComponentLabelFinal.populate(reserve_jobs=True)
print("\npopulate:", time.time() - populate_start)
|
[
"42202912+celiibrendan@users.noreply.github.com"
] |
42202912+celiibrendan@users.noreply.github.com
|
544e3c52beebc09d4aa6f0d60a20bde9143251e9
|
365af79b0eb6f45fcdd92f7164f0291ffe6b7f4a
|
/3.py
|
25025be6358e17fe2fa893abe8c64351af78436f
|
[] |
no_license
|
darthkenobi5319/Python-Lab-8
|
7895b06165dfb52cb0691290fdb3f4cf19115139
|
0f2e1d87c5b453b7d5a7f6cc669e789fd65b0810
|
refs/heads/master
| 2020-03-28T12:27:18.236228
| 2018-09-11T10:22:31
| 2018-09-11T10:22:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 5 11:03:26 2017
@author: ZHENGHAN ZHANG
"""
#first load the file
#f=open('C:\Users\ZHENGHAN ZHANG\Desktop\Python\2017.10.5\board.txt','r')
f=open('board.txt','r')
ls1=[]
while True:
c=f.readline().strip()
if c == '':
break
ls2 = list(c)
ls1.append(ls2)
f.close()
#put the matrix on a board
for i in range(len(ls1)-1):
m=''
for j in ls1[i]:
m+=j
m+='|'
print(m[:-1])
l='-+'*len(ls1[i])
print(l[:-1])
m=''
for j in ls1[-1]:
m+=j
m+='|'
print(m[:-1])
#user interaction
while True:
x=input('Please enter two coordinates (row,col): (enter "stop" to end) ').split(',')
if x[0]=='stop':
break
y=input('Please enter a letter: ')
row=int(x[0])
column=int(x[1])
ls1[row][column]=y
m=''
for i in range(len(ls1)):
for j in range(len(ls1[i])):
m+=ls1[i][j]
m+='\n'
f=open('board.txt','w')
f.write(m)
f.close()
|
[
"43033983+darthkenobi5319@users.noreply.github.com"
] |
43033983+darthkenobi5319@users.noreply.github.com
|
8325e8bfb6c511e224f3aac8f88afc68acd879ea
|
46e35bb8e400c09381b467ff0ae3e7100fa9c737
|
/venv/Scripts/pip-script.py
|
8683d9c7918f6879cc0769b490f13fecd9434030
|
[] |
no_license
|
goastsj/interface_autoframe
|
54e8c9fce95ef33ec3d10296084cf24c6be2b691
|
514e79fd3a489e48163881c512c089971a6c4e4c
|
refs/heads/master
| 2020-04-26T12:44:22.408801
| 2019-03-10T14:04:03
| 2019-03-10T14:04:03
| 173,559,231
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 464
|
py
|
#!D:\学习资料\接口自动化\接口自动化-视频\interfaceauto\interface_autoframe\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"1037905204@qq.com"
] |
1037905204@qq.com
|
85723fbe5a2fd93ee074528b9234f24cb86ed9e2
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/6/n7e.py
|
33f3429b1e2c4c1ad76ee80ffc4f1c7f76064da3
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'n7E':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
6819e9f732af27b2ad4095c2cf77489ae7040c70
|
d1a32ab42cf1db8c45244cc9fa087c29163c360a
|
/Online Module/recommender/accounts/urls.py
|
17718e6a964dca952ddafa7e85c8e3da021f0b3e
|
[] |
no_license
|
AnjithPaul/Online-Course-Recommendation-System
|
0a834522c6570996618735027e575a828feff5c2
|
e160167e99286e8d705da518618cec750f9405ad
|
refs/heads/main
| 2023-06-07T22:21:51.940781
| 2021-06-16T07:37:08
| 2021-06-16T07:37:08
| 353,596,738
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
"""recommender URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('register', views.register, name="register"),
path('login', views.login, name="login"),
path('logout', views.logout, name="logout"),
]
|
[
"65152866+AnjithPaul@users.noreply.github.com"
] |
65152866+AnjithPaul@users.noreply.github.com
|
c5a4840e2abacff143dd7d855e796d90b83c83fe
|
d9eef8dd3489682c8db41f2311e3058d1f369780
|
/.history/abel-network-files/metis_transf_20180709124830.py
|
42a9fae8f327a0df02f62926b8ffe1d5dacf3f19
|
[] |
no_license
|
McKenzie-Lamb/Gerrymandering
|
93fe4a49fe39a0b307ed341e46ba8620ea1225be
|
b7a7c4129d6b0fcd760ba8952de51eafa701eac3
|
refs/heads/master
| 2021-01-25T06:06:43.824339
| 2018-10-16T14:27:01
| 2018-10-16T14:27:01
| 93,526,515
| 0
| 0
| null | 2018-07-12T19:07:35
| 2017-06-06T14:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
# Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import graph_tool.all as gt
import metis
from pathlib import Path
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph100.gt"))
name = graph.new_vertex_property('string')
color = graph.new_vertex_property('string')
adjlist_pop = []
nodew_pop = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist_pop.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
weights = (graph.vp.data[i]['PERSONS'], graph.vp.data[i][int('CONREP14']/graph.vp.data[i]['CONDEM14']))
nodew_pop.append(weights)
metis_graph = metis.adjlist_to_metis(adjlist_pop, nodew=nodew_pop)
objval, parts = metis.part_graph(metis_graph, nparts=4)
for i in range(len(parts)):
name[graph.vertex(i)] = parts[i]
if graph.vp.data[graph.vertex(i)]['CONREP14'] > graph.vp.data[graph.vertex(i)]['CONDEM14']:
color[graph.vertex(i)] = 'red'
else:
color[graph.vertex(i)] = 'blue'
gt.graph_draw(graph, pos=graph.vp.pos, vertex_text=name, output=str(main_folder / 'tmp_metis_init.png'))
adjlist = []
nodew = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
weights = (graph.vp.data[i]['PERSONS'], int(graph.vp.data[i]['CONREP14']/graph.vp.data[i]['CONDEM14']))
nodew.append(weights)
metis_graph = metis.adjlist_to_metis(adjlist, nodew=nodew)
objval, parts = metis.part_graph(metis_graph, nparts=4, tpwgts=[(0.25,0.50),(0.25,0.10),(0.25, 0.30),(0.25, 0.10)])
for i in range(len(parts)):
name[graph.vertex(i)] = parts[i]
if graph.vp.data[graph.vertex(i)]['CONREP14'] > graph.vp.data[graph.vertex(i)]['CONDEM14']:
color[graph.vertex(i)] = 'red'
else:
color[graph.vertex(i)] = 'blue'
gt.graph_draw(graph, pos=graph.vp.pos, vertex_text=name, output=str(main_folder / 'tmp_metis_fin.png'))
|
[
"gonzaleza@ripon.edu"
] |
gonzaleza@ripon.edu
|
66c01cbc8829d45abdad4bbf37d41345a0a5bee9
|
dc2e5e4b63b632b69f154f7ad30d9c8aed3692e5
|
/world/api.py
|
c73410e97c95105da8f3e35f5563de8760be943c
|
[] |
no_license
|
jayArnel/geodjango
|
fad21a66afcf6ada4ca1366205ccb10eed282a87
|
3fb8f6c431e4ae8894f4bd885d426ef0b342b3af
|
refs/heads/master
| 2021-01-10T10:37:23.676218
| 2016-03-30T14:36:07
| 2016-03-30T14:36:07
| 53,738,339
| 0
| 0
| null | 2016-03-13T16:44:46
| 2016-03-12T15:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
from tastypie.resources import ModelResource
from models import WorldBorder
from tastypie import fields
class WorldBorderResource(ModelResource):
geojson = fields.CharField(attribute='geojson', readonly=True)
class Meta:
queryset = WorldBorder.objects.all()
resource_name = 'worldborder'
def dehydrate_geom(self, bundle):
return bundle.obj.geom.json
|
[
"jay.arnel@gmail.com"
] |
jay.arnel@gmail.com
|
b2369b62199eee97be83fc60d97c7e11261bf934
|
0d8f06405e28f954a240132ad0f58ed79396f32a
|
/simpleProject/articles/urls.py
|
97872cf8409bd344284e759c3fabc5371c8ddc3d
|
[] |
no_license
|
szalik-m/djangoTutorial
|
f565e975a0346484a1334c4806830b818dd75820
|
f7fe0acc71d40e4482c18b91291dc48305e73a19
|
refs/heads/master
| 2022-10-04T15:15:36.644769
| 2020-06-05T11:27:50
| 2020-06-05T11:27:50
| 267,392,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.article_list),
]
|
[
"szalik.mat@gmail.com"
] |
szalik.mat@gmail.com
|
ca60dfeb903c62d9617eb6584ac2ae23d593ea90
|
24be2965c613549f62407ecc4272fb6e96185497
|
/raw/project1-nn/nn/__init__.py
|
b0eedbf3ad78f1e5011dd80ecd44e1570202348d
|
[] |
no_license
|
falgunee/AI101-DeepLearning
|
8905551a146ec6502abf21c4f67dbaef7113673e
|
e478dcd2a8532a46eb0a2f98cd399ce1fc1d5383
|
refs/heads/master
| 2020-04-28T00:59:34.768625
| 2019-03-07T09:21:37
| 2019-03-07T09:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 16:26:25 2018
@author: jaley
"""
|
[
"jaley.dholakiya@gmail.com"
] |
jaley.dholakiya@gmail.com
|
1de0e4cd109d4b91f7b44a82e52d59983b730d6c
|
e3414d2d22912bba8dc0d91140e3c5ca7ede4c99
|
/pages/base_page.py
|
53f1cba9ef68cdbf77068e18526c882ea717196e
|
[] |
no_license
|
Chovhan/Slenium-QAA-final-task
|
47cb8d3f87d25d17e9b4fdfd765db0c8535ebede
|
70374074af5f9d1ae8b76e00d846943581e50c64
|
refs/heads/main
| 2023-04-12T22:45:21.012452
| 2021-04-21T21:09:04
| 2021-04-21T21:09:04
| 360,078,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.common.exceptions import NoAlertPresentException
import math
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .locators import BasePageLocators
class BasePage():
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def open(self):
self.browser.get(self.url)
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except NoSuchElementException:
return False
return True
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException). \
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
def go_to_basket_page(self):
basket_link = self.browser.find_element(*BasePageLocators.BASKET_LINK)
basket_link.click()
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented, probably unauthorised user"
|
[
"dema200043@gmail.com"
] |
dema200043@gmail.com
|
de5d7a80fe1c6c4e82c57b745a268c0ed520bed0
|
da38ce93c6a807a3877fb40d7cb890a1bdd27015
|
/convert/views.py
|
10f0e09a0b45956bdea40a2df8ee433cb42ffcdd
|
[] |
no_license
|
kanandachristian/final_Project-
|
902fc4afa03943624cf062c7fb38b46d0aa36b0e
|
17e9b5ddfa63f0aab09e804e0ae6df10b4aa29f5
|
refs/heads/master
| 2023-02-10T22:35:03.074722
| 2021-01-10T17:38:27
| 2021-01-10T17:38:27
| 323,913,889
| 1
| 0
| null | 2021-01-10T17:38:28
| 2020-12-23T13:47:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,037
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.core.paginator import Paginator, EmptyPage
from django.contrib import messages
from rental.models import*
from convert.models import Rate
from django.db.models import Q
# Create your views here.
def events(request, category_slug=None):
category = None
categories = Category.objects.all()
properties = Propertie.objects.filter(available=True, vaccant=True)
if category_slug:
category = get_object_or_404(
Category, slug=category_slug)
properties = Propertie.objects.filter(
category=category, available=True, vaccant=True)
currencies = ['RWF', 'USD', 'FC']
currencie = ['USD', 'FC', 'RWF']
# return render(request,'converter.html',{'currencies':currencies,'currencie':currencie})
return render(request, 'converter.html', {'category': category, 'categories': categories, 'properties': properties})
def actionconv(request, category_slug=None):
category = None
categories = Category.objects.all()
properties = Propertie.objects.filter(available=True, vaccant=True)
if category_slug:
category = get_object_or_404(
Category, slug=category_slug)
properties = Propertie.objects.filter(
category=category, available=True, vaccant=True)
if request.method == 'GET':
amount = float(request.GET['montant'])
currence1 = int(request.GET['selection1'])
currence2 = int(request.GET['selection2'])
rwf = 'RWF'
usd = 'USD'
fc = 'FC'
tauxA = 980
tauxV = 2000
value = 0.0
if currence1 != currence2:
try:
if currence1 == 1 and currence2 == 2:
value = amount * tauxA
x = 1 * tauxA
y = 1 / tauxA
tot = {"am": amount, "tauxA": tauxA, "tauxV": tauxV,
"value": value, "usd": usd, "rwf": rwf, "fc": fc, "x": x, "y": y}
return render(request, 'converter2.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
if currence1 == 2 and currence2 == 1:
value = amount / tauxA
x = 1 * tauxA
y = 1 / tauxA
tot = {"am": amount, "tauxA": tauxA, "tauxV": tauxV,
"value": value, "usd": usd, "rwf": rwf, "fc": fc, "x": x, "y": y}
return render(request, 'converter3.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
############################# USD RWF ####################################################
if currence1 == 1 and currence2 == 3:
value = amount * tauxV
x = 1 * tauxV
y = 1 / tauxV
tot = {"am": amount, "tauxA": tauxA, "tauxV": tauxV,
"value": value, "usd": usd, "rwf": rwf, "fc": fc, "x": x, "y": y}
return render(request, 'converter4.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
if currence1 == 3 and currence2 == 1:
value = amount / tauxV
x = 1 * tauxV
y = 1 / tauxV
tot = {"am": amount, "tauxA": tauxA, "tauxV": tauxV,
"value": value, "usd": usd, "rwf": rwf, "fc": fc, "x": x, "y": y}
return render(request, 'converter5.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
############################ USD FC ####################################################
if currence1 == 2 and currence2 == 3:
value = amount * 2
x = 1 * 2
y = 1 / 2
tot = {"am": amount, "tauxA": tauxA, "tauxV": tauxV,
"value": value, "usd": usd, "rwf": rwf, "fc": fc, "x": x, "y": y}
return render(request, 'converter6.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
if currence1 == 3 and currence2 == 2:
value = amount / 2
x = 1 * 2
y = 1 / 2
tot = {"am": amount, "tauxA": tauxA, "tauxV": tauxV,
"value": value, "usd": usd, "rwf": rwf, "fc": fc, "x": x, "y": y}
return render(request, 'converter7.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
else:
return redirect('conversion:Error')
############################ RWF FC ####################################################
except TypeError:
return HttpResponse('Type Value Error')
else:
return redirect('conversion:Error')
else:
return render(request, 'converter1.html', {'total': tot, 'category': category, 'categories': categories, 'properties': properties})
# if currence0 and currence1:
# t = 992
# amountConverted = am * t
# amof1D = 1
# amof1R = 1*992
# amofR2 = 1
# amofD2 = 1/992
# {"USD":'USD'}
# {"RWF":'RWF'}
# # tot={'amountConverted': amountConverted ,'amof1D':amof1D,'amof1R': amof1R,
# # 'amofR2':amofR2,'amofD2':amofD2,'cur1':cur1,'cur2':cur2,'am':am}
# tot={'amount':am}
#
# return render(request,'converter2.html',{'tot':tot})
# else:
# return redirect('conversion:conv2')
#
# else:
# return render(request,"converter.html")
|
[
"kanandachristian@gmail.com"
] |
kanandachristian@gmail.com
|
997757f5a351977385d38e25a9b8faa727157099
|
b997f959941208dc6a54e311527abbbd40e45517
|
/week3/91_numDecodings.py
|
ac68f5c33cfd53a53a467829d4123e754bc0bf79
|
[] |
no_license
|
Richard9784/geekbang_homework
|
dc7b2a2c481e60c231806f1ddf17ef6af112ab57
|
79d60e100bf5ee26d750f6d93d83fffb77ef60b9
|
refs/heads/main
| 2023-04-05T05:23:04.139410
| 2021-04-16T12:18:43
| 2021-04-16T12:18:43
| 347,631,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
class Solution:
def numDecodings(self, s: str) -> int:
if s[0] == '0':
return 0
n = len(s)
dp = [1] * (n+1)
for i in range(2, n+1):
if s[i-1] == '0' and s[i-2] not in '12':
return 0
if s[i-2:i] in ['10', '20']:
dp[i] = dp[i-2]
elif '10' < s[i-2:i] <= '26':
dp[i] = dp[i-1] + dp[i-2]
else:
dp[i] = dp[i-1]
return dp[n]
if __name__ == "__main__":
test = Solution()
s = "12"
print(test.numDecodings(s))
|
[
"jianxiaochen84@163.com"
] |
jianxiaochen84@163.com
|
3fbbc2905cbd2486769a535857eb8440454dffa6
|
36567aa5e17c697a4b9b27977953a21638d25e53
|
/7/02-flask-intro/cheng_leon/app.py
|
050b6fc08ca0c156d7898b28889ab9d05b991e80
|
[] |
no_license
|
stuycs-softdev/submissions
|
83f609fb5dc796a79c5c4dc956e2504f040a0f17
|
2011a519f710060ab30e5d63fdfde0ee84feda44
|
refs/heads/master
| 2021-01-21T22:26:00.587669
| 2016-02-03T17:07:20
| 2016-02-03T17:07:20
| 42,660,106
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,770
|
py
|
from flask import Flask, render_template, request, redirect, url_for, session
app = Flask(__name__)
@app.route("/home")
def home():
return render_template("home.html")
@app.route("/p1")
def p1():
return render_template("p1.html")
@app.route("/p2")
def p2():
return render_template("p2.html")
@app.route("/hidden")
def hidden():
import random
n1 = random.randrange(1,100)
n2 = random.randrange(1,100)
ret = "<h1>Awesome! You are the %dth visitor!</h1>" % n1
ret += "Just kidding...you are really the %dth visitor" % n2
return ret
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method=="GET":
return render_template("login.html")
@app.route("/login2",methods=["GET","POST"])
def login2():
if request.method == "GET":
return render_template("login2.html")
else:
name = request.form['name']
email = request.form['email']
button = request.form["button"]
print request.__doc__
print request.args
print request.__dict__
print request.__dir__
print request.args.get("email") #works with GET
print request.form["email"] #works with POST
s = "name: " + request.form["name"]
s += "<hr>"
s += "email: " + request.form["email"]
return s;
@app.route("/profile/<name>/<email>")
def profile(name="", email=""):
# dict = {"name": name, "email": email}
dict = {}
dict["name"]=name
dict["email"]=email
return render_template("profile.html", d = dict)
@app.route("/inc")
def inc():
if "n" not in session:
session["n"]=0
session["n"] = session["n"]+1
return render_template("counter.html", n = session["n"])
@app.route("/")
@app.route("/start")
def start():
return render_template("start.html")
@app.route("/login3", methods=["GET","POST"])
def login3():
if request.method == "GET":
return render_template("login3.html")
else:
uname = request.form["username"]
pword = request.form["password"]
if uname == "Leon" and pword == "pass":
# return "You have logged in!"
# return redirect(url_for("user"))
return redirect("/userpage")
else:
return "You have entered an incorrect username or password <br> <br> <a href> Click Here to go back to login page </a>"
@app.route("/userpage")
def userpage():
#TODO: add a way to log out
return render_template("userpage.html")
@app.route("/reset")
def reset():
return redirect(url_for("start"))
if __name__ == "__main__":
app.debug=True
app.secret_key = "Don't store this on github" #used for cookies, session
app.run(host='0.0.0.0',port=8000)
|
[
"57leonardo@gmail.com"
] |
57leonardo@gmail.com
|
cde74c8664798c8237fa5329c575a705974c6f41
|
34c5a03855ab0aca39acea941be520157f7d0b74
|
/lib/ansible/modules/cloud/alicloud/ali_slb_vsg_info.py
|
72e35f09e490e814c2cd95556da2fa6bd18f6359
|
[
"Apache-2.0"
] |
permissive
|
lixue323/ansible-provider
|
1260d1bc17a2fa7bf4c0f387a33dd942059850ed
|
aae2658532afcbcdf471609fae0e2108fb57af3b
|
refs/heads/master
| 2020-08-11T21:44:37.685788
| 2019-12-13T03:11:23
| 2019-12-13T04:00:45
| 214,633,323
| 0
| 1
|
Apache-2.0
| 2019-10-12T11:12:07
| 2019-10-12T11:12:07
| null |
UTF-8
|
Python
| false
| false
| 6,239
|
py
|
#!/usr/bin/python
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_slb_vsg_info
version_added: "2.8"
short_description: Gather facts on virtual server group of Alibaba Cloud SLB.
description:
- This module fetches virtual server groups data from the Open API in Alibaba Cloud.
options:
load_balancer_id:
description:
- ID of server load balancer.
required: true
aliases: ["lb_id"]
vserver_group_ids:
description:
- A list of SLB vserver group ids.
required: false
aliases: ["group_ids", "ids"]
name_prefix:
description:
- Use a vritual server group name prefix to filter vserver groups.
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Retrieving vsgs using slb id
ali_slb_vsg_info:
lb_id: '{{item}}'
with_items: '{{slbs.ids}}'
- name: Filter vsg using name_regex
ali_slb_vsg_info:
name_prefix: 'ansible-foo'
lb_id: 'lb-cn3cn34'
'''
RETURN = '''
ids:
description: List ids of being fetched virtual server group.
returned: when success
type: list
sample: ["rsp-2zehblhcv", "rsp-f22c4lhcv"]
names:
description: List name of being fetched virtual server group.
returned: when success
type: list
sample: ["ansible-1", "ansible-2"]
vserver_groups:
description:
- info about the virtual server group that was created or deleted.
returned: on present
type: complex
contains:
address:
description: The IP address of the loal balancer
returned: always
type: string
sample: "47.94.26.126"
backend_servers:
description: The load balancer's backend servers
returned: always
type: complex
contains:
port:
description: The backend server port
returned: always
type: int
sample: 22
server_id:
description: The backend server id
returned: always
type: string
sample: "i-vqunci342"
type:
description: The backend server type, ecs or eni
returned: always
type: string
sample: "ecs"
weight:
description: The backend server weight
returned: always
type: int
sample: 100
id:
description: The ID of the virtual server group was created. Same as vserver_group_id.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_id:
description: The ID of the virtual server group was created.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
tags:
description: The load balancer tags
returned: always
type: complex
sample: {}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
load_balancer_id=dict(type='str', aliases=['lb_id'], required=True),
vserver_group_ids=dict(type='list', aliases=['group_ids', 'ids']),
name_prefix=dict(type='str')
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for this module.")
vsg_ids = module.params['vserver_group_ids']
name_prefix = module.params['name_prefix']
ids = []
vsgs = []
names = []
try:
slb = slb_connect(module)
groups = slb.describe_vserver_groups(**{'load_balancer_id': module.params['load_balancer_id']})
if groups:
for group in groups:
if vsg_ids and group.id not in vsg_ids:
continue
if name_prefix and not str(group.name).startswith(name_prefix):
continue
vsgs.append(group.read())
ids.append(group.id)
names.append(group.name)
module.exit_json(changed=False, vserver_groups=vsgs, ids=ids, names=names)
except Exception as e:
module.fail_json(msg=str("Unable to describe slb vserver groups, error:{0}".format(e)))
if __name__ == '__main__':
main()
|
[
"guimin.hgm@alibaba-inc.com"
] |
guimin.hgm@alibaba-inc.com
|
4067eaa4a5851aa47554afb318aa9f0825522d89
|
c9490d7bb9c3add1a5e71b06c9180260ffc1fff5
|
/web_dynamic/2-hbnb.py
|
2eadee48a25ba93f32aa643310baf9dfb56b7b2c
|
[
"MIT"
] |
permissive
|
PierreBeaujuge/AirBnB_clone_v4
|
54a255023587e6e291f41410f124da8089f2a5b7
|
f93bb1f22660f4497fb942abe120a5e69815affc
|
refs/heads/master
| 2021-01-04T15:00:01.541582
| 2020-10-08T09:04:29
| 2020-10-08T09:04:29
| 240,601,631
| 0
| 1
|
MIT
| 2020-02-18T02:25:15
| 2020-02-14T21:28:36
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,351
|
py
|
#!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
import uuid
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/2-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = uuid.uuid4()
return render_template('2-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=cache_id)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
|
[
"pierre.beaujuge@gmail.com"
] |
pierre.beaujuge@gmail.com
|
31db0a2b0eec44a444ec11cbde5ae8a6dc007f0a
|
60a352cddeab022d643b61b8e8965c296cc815cc
|
/src/main.py
|
435097cb3b84386bcddbfb042cfdf0614e95089c
|
[] |
no_license
|
tessied/blackjack
|
f24b0433614528696b61c5f00c81f6186bf0e1c5
|
b588c86423b0646fac0662872a9cd46c47d72db3
|
refs/heads/main
| 2023-07-12T11:23:09.425372
| 2021-08-23T02:07:09
| 2021-08-23T02:07:09
| 398,940,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,689
|
py
|
#This is a simplified version of the Blackjack game.
#Inspired by the Udemy Course 100 Days of Code
import random
logo = """
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
"""
#Returns a random card from the deck
def deal_card():
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
return random.choice(cards)
#Takes a list of cards as input and returns the score
def calculate_score(card_list):
score = sum(card_list)
#Checks for a blackjack - returns 0 instead of the actual score
if score == 21 and len(card_list) == 2:
return 0
#Checks for an ace - if the score is over 21, replace it with a 1
if 11 in card_list and score > 21:
card_list.remove(11)
card_list.append(1)
return sum(card_list)
#Compares the user score to the computer score
#Takes in the user score and computer score
#Returns the result of the game
def compare(user, computer):
if user == computer:
return "Draw!"
elif computer == 0:
return "Lose, opponent has blackjack!"
elif user == 0:
return "You win with a blackjack!"
elif user > 21:
return "You went over. You lose!"
elif computer > 21:
return "Computer went over. You win!"
elif user > computer:
return "You win!"
else:
return "You lose!"
#Plays a single game
def play_game():
print(logo)
computer_cards = []
user_cards = []
end_of_game = False
#Deal the user and computer two cards at the beginning of each game
for _ in range(2):
user_cards.append(deal_card())
computer_cards.append(deal_card())
#Repeatedly calculates the score until the user has finished
while not end_of_game:
computer_score = calculate_score(computer_cards)
user_score = calculate_score(user_cards)
print(f"\tYour cards: {user_cards}, current score: {user_score}")
print(f"\tcomputer's first card: {computer_cards[0]}")
#If the computer or the user has a blackjack or if the user's score is over 21, then the game ends already
if computer_score == 0 or user_score == 0 or user_score > 21:
end_of_game = True
#If the game is not over, ask the user if they want another card
else:
another = input("Type 'y' to get another card, type 'n' to pass: ")
if another == "y":
user_cards.append(deal_card())
else:
end_of_game = True
#Repeatedly deals card to computer and calculates score until it reaches the minumum score of 17
while computer_score != 0 and computer_score < 17:
computer_cards.append(deal_card())
computer_score = calculate_score(computer_cards)
print(f"\tYour final hand: {user_cards}, final score: {user_score}")
print(f"\tComputer's final hand: {computer_cards}, final score: {computer_score}")
print("-------------------------------------------------------------")
print(compare(user_score, computer_score))
print("-------------------------------------------------------------")
#Asks if the user wants to play again
while input("Do you want to play a game of Blackjack? Type 'y' or 'n': ") == "y":
play_game()
|
[
"tessiedong@tessies-mbp.myfiosgateway.com"
] |
tessiedong@tessies-mbp.myfiosgateway.com
|
b8d8a4bc6f9d4d07e8b654e61ebe35cb2e5b3c8f
|
d0de9a88220c85bbb0f8678630f3a35dd6718466
|
/posts/migrations/0007_post_board.py
|
c24b90fa51e784e2ff21eea4fb9b5421c2f96193
|
[] |
no_license
|
devrockstar928/django-pris
|
f690e545c56238b4c6f88586737aae8984e3327d
|
5b23a5cae445316245adecd35c6b3d18bd599f6a
|
refs/heads/master
| 2020-03-17T13:00:39.348414
| 2018-07-13T05:03:22
| 2018-07-13T05:03:22
| 133,613,458
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0002_auto_20150323_0436'),
('posts', '0006_postlike'),
]
operations = [
migrations.AddField(
model_name='post',
name='board',
field=models.ManyToManyField(related_name=b'post_board', null=True, to='boards.Board', blank=True),
preserve_default=True,
),
]
|
[
"devrockstar928@gmail.com"
] |
devrockstar928@gmail.com
|
4c20e6b6769d1680490e49efd35daee18df732f1
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/plotly/py2/plotly/graph_objs/sankey/__init__.py
|
951083a2bed5804da50a572a9104aeb1dea14990
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 95,548
|
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Textfont object
Sets the font for node labels
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import textfont as v_textfont
# Initialize validators
# ---------------------
self._validators["color"] = v_textfont.ColorValidator()
self._validators["family"] = v_textfont.FamilyValidator()
self._validators["size"] = v_textfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Node(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the `node` color. It can be a single value, or an array
for specifying color for each `node`. If `node.color` is
omitted, then the default `Plotly` color palette will be cycled
through to have a variety of colors. These defaults are not
fully opaque, to allow some visibility of what is beneath the
node.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data to each node.
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# groups
# ------
@property
def groups(self):
"""
Groups of nodes. Each group is defined by an array with the
indices of the nodes it contains. Multiple groups can be
specified.
The 'groups' property is an info array that may be specified as:
* a 2D list where:
The 'groups[i][j]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["groups"]
@groups.setter
def groups(self, val):
self["groups"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear when hovering nodes.
If `none` or `skip` are set, no information is displayed upon
hovering. But, if `none` is set, click and hover events are
still fired.
The 'hoverinfo' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'none', 'skip']
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.sankey.node.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `value` and `label`. Anything contained in tag
`<extra>` is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# label
# -----
@property
def label(self):
"""
The shown name of the node.
The 'label' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# labelsrc
# --------
@property
def labelsrc(self):
"""
Sets the source reference on Chart Studio Cloud for label .
The 'labelsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelsrc"]
@labelsrc.setter
def labelsrc(self, val):
self["labelsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the `line` around each
`node`.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
width
Sets the width (in px) of the `line` around
each `node`.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.sankey.node.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the padding (in px) between the `nodes`.
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the `nodes`.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# x
# -
@property
def x(self):
"""
The normalized horizontal position of the node.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
The normalized vertical position of the node.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the `node` color. It can be a single value, or an
array for specifying color for each `node`. If
`node.color` is omitted, then the default `Plotly`
color palette will be cycled through to have a variety
of colors. These defaults are not fully opaque, to
allow some visibility of what is beneath the node.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each node.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
groups
Groups of nodes. Each group is defined by an array with
the indices of the nodes it contains. Multiple groups
can be specified.
hoverinfo
Determines which trace information appear when hovering
nodes. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.node.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the node.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.node.Line` instance
or dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
x
The normalized horizontal position of the node.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
The normalized vertical position of the node.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
customdata=None,
customdatasrc=None,
groups=None,
hoverinfo=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
label=None,
labelsrc=None,
line=None,
pad=None,
thickness=None,
x=None,
xsrc=None,
y=None,
ysrc=None,
**kwargs
):
"""
Construct a new Node object
The nodes of the Sankey plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Node`
color
Sets the `node` color. It can be a single value, or an
array for specifying color for each `node`. If
`node.color` is omitted, then the default `Plotly`
color palette will be cycled through to have a variety
of colors. These defaults are not fully opaque, to
allow some visibility of what is beneath the node.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each node.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
groups
Groups of nodes. Each group is defined by an array with
the indices of the nodes it contains. Multiple groups
can be specified.
hoverinfo
Determines which trace information appear when hovering
nodes. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.node.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the node.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.node.Line` instance
or dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
x
The normalized horizontal position of the node.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
The normalized vertical position of the node.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
Returns
-------
Node
"""
super(Node, self).__init__("node")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Node
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Node`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import node as v_node
# Initialize validators
# ---------------------
self._validators["color"] = v_node.ColorValidator()
self._validators["colorsrc"] = v_node.ColorsrcValidator()
self._validators["customdata"] = v_node.CustomdataValidator()
self._validators["customdatasrc"] = v_node.CustomdatasrcValidator()
self._validators["groups"] = v_node.GroupsValidator()
self._validators["hoverinfo"] = v_node.HoverinfoValidator()
self._validators["hoverlabel"] = v_node.HoverlabelValidator()
self._validators["hovertemplate"] = v_node.HovertemplateValidator()
self._validators["hovertemplatesrc"] = v_node.HovertemplatesrcValidator()
self._validators["label"] = v_node.LabelValidator()
self._validators["labelsrc"] = v_node.LabelsrcValidator()
self._validators["line"] = v_node.LineValidator()
self._validators["pad"] = v_node.PadValidator()
self._validators["thickness"] = v_node.ThicknessValidator()
self._validators["x"] = v_node.XValidator()
self._validators["xsrc"] = v_node.XsrcValidator()
self._validators["y"] = v_node.YValidator()
self._validators["ysrc"] = v_node.YsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("customdata", None)
self["customdata"] = customdata if customdata is not None else _v
_v = arg.pop("customdatasrc", None)
self["customdatasrc"] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop("groups", None)
self["groups"] = groups if groups is not None else _v
_v = arg.pop("hoverinfo", None)
self["hoverinfo"] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop("hoverlabel", None)
self["hoverlabel"] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop("hovertemplate", None)
self["hovertemplate"] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop("hovertemplatesrc", None)
self["hovertemplatesrc"] = (
hovertemplatesrc if hovertemplatesrc is not None else _v
)
_v = arg.pop("label", None)
self["label"] = label if label is not None else _v
_v = arg.pop("labelsrc", None)
self["labelsrc"] = labelsrc if labelsrc is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("pad", None)
self["pad"] = pad if pad is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("xsrc", None)
self["xsrc"] = xsrc if xsrc is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("ysrc", None)
self["ysrc"] = ysrc if ysrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Link(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the `link` color. It can be a single value, or an array
for specifying color for each `link`. If `link.color` is
omitted, then by default, a translucent grey link will be used.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorscales
# -----------
@property
def colorscales(self):
"""
The 'colorscales' property is a tuple of instances of
Colorscale that may be specified as:
- A list or tuple of instances of plotly.graph_objs.sankey.link.Colorscale
- A list or tuple of dicts of string/value properties that
will be passed to the Colorscale constructor
Supported dict properties:
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
Returns
-------
tuple[plotly.graph_objs.sankey.link.Colorscale]
"""
return self["colorscales"]
@colorscales.setter
def colorscales(self, val):
self["colorscales"] = val
# colorscaledefaults
# ------------------
@property
def colorscaledefaults(self):
"""
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults), sets the
default property values to use for elements of
sankey.link.colorscales
The 'colorscaledefaults' property is an instance of Colorscale
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Colorscale`
- A dict of string/value properties that will be passed
to the Colorscale constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.sankey.link.Colorscale
"""
return self["colorscaledefaults"]
@colorscaledefaults.setter
def colorscaledefaults(self, val):
self["colorscaledefaults"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data to each link.
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear when hovering links.
If `none` or `skip` are set, no information is displayed upon
hovering. But, if `none` is set, click and hover events are
still fired.
The 'hoverinfo' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'none', 'skip']
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.sankey.link.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `value` and `label`. Anything contained in tag
`<extra>` is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# label
# -----
@property
def label(self):
"""
The shown name of the link.
The 'label' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# labelsrc
# --------
@property
def labelsrc(self):
"""
Sets the source reference on Chart Studio Cloud for label .
The 'labelsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelsrc"]
@labelsrc.setter
def labelsrc(self, val):
self["labelsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the `line` around each
`link`.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
width
Sets the width (in px) of the `line` around
each `link`.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.sankey.link.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# source
# ------
@property
def source(self):
"""
An integer number `[0..nodes.length - 1]` that represents the
source node.
The 'source' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
# sourcesrc
# ---------
@property
def sourcesrc(self):
"""
Sets the source reference on Chart Studio Cloud for source .
The 'sourcesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sourcesrc"]
@sourcesrc.setter
def sourcesrc(self, val):
self["sourcesrc"] = val
# target
# ------
@property
def target(self):
"""
An integer number `[0..nodes.length - 1]` that represents the
target node.
The 'target' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["target"]
@target.setter
def target(self, val):
self["target"] = val
# targetsrc
# ---------
@property
def targetsrc(self):
"""
Sets the source reference on Chart Studio Cloud for target .
The 'targetsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["targetsrc"]
@targetsrc.setter
def targetsrc(self, val):
self["targetsrc"] = val
# value
# -----
@property
def value(self):
"""
A numeric value representing the flow volume value.
The 'value' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valuesrc
# --------
@property
def valuesrc(self):
"""
Sets the source reference on Chart Studio Cloud for value .
The 'valuesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuesrc"]
@valuesrc.setter
def valuesrc(self, val):
self["valuesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the `link` color. It can be a single value, or an
array for specifying color for each `link`. If
`link.color` is omitted, then by default, a translucent
grey link will be used.
colorscales
A tuple of
:class:`plotly.graph_objects.sankey.link.Colorscale`
instances or dicts with compatible properties
colorscaledefaults
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults),
sets the default property values to use for elements of
sankey.link.colorscales
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each link.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear when hovering
links. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.link.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the link.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.link.Line` instance
or dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on Chart Studio Cloud for
source .
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on Chart Studio Cloud for
target .
value
A numeric value representing the flow volume value.
valuesrc
Sets the source reference on Chart Studio Cloud for
value .
"""
def __init__(
self,
arg=None,
color=None,
colorscales=None,
colorscaledefaults=None,
colorsrc=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
label=None,
labelsrc=None,
line=None,
source=None,
sourcesrc=None,
target=None,
targetsrc=None,
value=None,
valuesrc=None,
**kwargs
):
"""
Construct a new Link object
The links of the Sankey plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Link`
color
Sets the `link` color. It can be a single value, or an
array for specifying color for each `link`. If
`link.color` is omitted, then by default, a translucent
grey link will be used.
colorscales
A tuple of
:class:`plotly.graph_objects.sankey.link.Colorscale`
instances or dicts with compatible properties
colorscaledefaults
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults),
sets the default property values to use for elements of
sankey.link.colorscales
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
customdata
Assigns extra data to each link.
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear when hovering
links. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.link.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `value` and `label`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
label
The shown name of the link.
labelsrc
Sets the source reference on Chart Studio Cloud for
label .
line
:class:`plotly.graph_objects.sankey.link.Line` instance
or dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on Chart Studio Cloud for
source .
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on Chart Studio Cloud for
target .
value
A numeric value representing the flow volume value.
valuesrc
Sets the source reference on Chart Studio Cloud for
value .
Returns
-------
Link
"""
super(Link, self).__init__("link")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Link
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Link`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import link as v_link
# Initialize validators
# ---------------------
self._validators["color"] = v_link.ColorValidator()
self._validators["colorscales"] = v_link.ColorscalesValidator()
self._validators["colorscaledefaults"] = v_link.ColorscaleValidator()
self._validators["colorsrc"] = v_link.ColorsrcValidator()
self._validators["customdata"] = v_link.CustomdataValidator()
self._validators["customdatasrc"] = v_link.CustomdatasrcValidator()
self._validators["hoverinfo"] = v_link.HoverinfoValidator()
self._validators["hoverlabel"] = v_link.HoverlabelValidator()
self._validators["hovertemplate"] = v_link.HovertemplateValidator()
self._validators["hovertemplatesrc"] = v_link.HovertemplatesrcValidator()
self._validators["label"] = v_link.LabelValidator()
self._validators["labelsrc"] = v_link.LabelsrcValidator()
self._validators["line"] = v_link.LineValidator()
self._validators["source"] = v_link.SourceValidator()
self._validators["sourcesrc"] = v_link.SourcesrcValidator()
self._validators["target"] = v_link.TargetValidator()
self._validators["targetsrc"] = v_link.TargetsrcValidator()
self._validators["value"] = v_link.ValueValidator()
self._validators["valuesrc"] = v_link.ValuesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorscales", None)
self["colorscales"] = colorscales if colorscales is not None else _v
_v = arg.pop("colorscaledefaults", None)
self["colorscaledefaults"] = (
colorscaledefaults if colorscaledefaults is not None else _v
)
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("customdata", None)
self["customdata"] = customdata if customdata is not None else _v
_v = arg.pop("customdatasrc", None)
self["customdatasrc"] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop("hoverinfo", None)
self["hoverinfo"] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop("hoverlabel", None)
self["hoverlabel"] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop("hovertemplate", None)
self["hovertemplate"] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop("hovertemplatesrc", None)
self["hovertemplatesrc"] = (
hovertemplatesrc if hovertemplatesrc is not None else _v
)
_v = arg.pop("label", None)
self["label"] = label if label is not None else _v
_v = arg.pop("labelsrc", None)
self["labelsrc"] = labelsrc if labelsrc is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("source", None)
self["source"] = source if source is not None else _v
_v = arg.pop("sourcesrc", None)
self["sourcesrc"] = sourcesrc if sourcesrc is not None else _v
_v = arg.pop("target", None)
self["target"] = target if target is not None else _v
_v = arg.pop("targetsrc", None)
self["targetsrc"] = targetsrc if targetsrc is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valuesrc", None)
self["valuesrc"] = valuesrc if valuesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.sankey.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this sankey trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this sankey trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this sankey trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this sankey trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace (in
plot fraction).
y
Sets the vertical domain of this sankey trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace (in
plot fraction).
y
Sets the vertical domain of this sankey trace (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey import domain as v_domain
# Initialize validators
# ---------------------
self._validators["column"] = v_domain.ColumnValidator()
self._validators["row"] = v_domain.RowValidator()
self._validators["x"] = v_domain.XValidator()
self._validators["y"] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
self["column"] = column if column is not None else _v
_v = arg.pop("row", None)
self["row"] = row if row is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Domain",
"Hoverlabel",
"Link",
"Node",
"Stream",
"Textfont",
"hoverlabel",
"link",
"node",
]
from plotly.graph_objs.sankey import node
from plotly.graph_objs.sankey import link
from plotly.graph_objs.sankey import hoverlabel
|
[
"robot-piglet@yandex-team.com"
] |
robot-piglet@yandex-team.com
|
f1937d73d5ca59f1d3d284eae6aad9c8138f6512
|
c1287fe2f9292a77bf544070f665137d267cc814
|
/task3/log_generator.py
|
431860fdba2f463fe75be42402038bbf7407a6aa
|
[] |
no_license
|
ovgolovin/cli_tasks
|
08bf9ccc972c075259f56662be08aea9ae4c8d95
|
18a791f53480caae769e73d91e55d27cd69bac82
|
refs/heads/master
| 2016-09-10T13:53:05.664801
| 2013-07-03T12:50:15
| 2013-07-03T12:50:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import division
import random
import time
from datetime import datetime, timedelta
period_expectation = 10000
def generate_url_and_code():
def resume():
ps = ['id={}'.format(random.randrange(0,100)),
'rss={}'.format(random.choice((0,1)))]
random.shuffle(ps)
return '/resume?{}'.format('&'.join(ps)), '200'
def vacancyerror():
return '/vacancyerror', '500'
def user():
return '/user', '200'
return random.choice((resume,vacancyerror,user))()
with open('log.txt','w') as f:
time = datetime(year=2013, month=1, day=20, hour=12, minute=00) + timedelta(minutes=-1)
timeend = time + timedelta(hours=1, minutes=2)
while(True):
time = time + timedelta(milliseconds = random.normalvariate(period_expectation, 2000))
url, code = generate_url_and_code()
out = '\t'.join([time.strftime('%Y-%m-%d\t%H:%M:%S') + ',' + '{:03.0f}'.format(time.microsecond / 1000),
random.choice(('info', 'warn', 'error')),
random.choice(('GET', 'POST')),
'{}'.format(random.randrange(10000, 15000)),
url,
code,
'{:0.2f}ms'.format(random.normalvariate(400, 60))
])
f.write('{}\n'.format(out))
if time >= timeend:
break
|
[
"ovgolovin@gmail.com"
] |
ovgolovin@gmail.com
|
36164f7ee97dba594be61b1f5bc709171535d893
|
e43eed4a7af5dad43f6377cb270949dab16d6ab7
|
/Python/new_prop.py
|
92cf119b26007c02115c450edf7fb442e2d032b5
|
[] |
no_license
|
hossamabdullah/Hacker_Rank
|
d49df52c6182fc75365d3e9fb2b657d8553cb9ad
|
75f0fb5b3a5f0db5891d684905c576bc1d133275
|
refs/heads/master
| 2022-09-21T05:47:48.230184
| 2022-09-04T12:27:09
| 2022-09-04T12:27:09
| 139,197,041
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import sys
link = sys.argv[1]
temp = link
start_index = 38
link = link[start_index:]
end_index = link.find('/')
link = link[:end_index]
print(link)
import os
if not os.path.exists(link):
os.makedirs(link)
file = open(link+'/README.md','w')
file.write('this is the link for the problem \n')
file.write(temp)
file.close()
file = open(link+'/'+link+'.py','w')
file.write('')
file.close()
from subprocess import call
call(["git", "add", "."])
call(["git", "commit", "-m", "initial directory for "+link])
|
[
"hossamabdalh@gmail.com"
] |
hossamabdalh@gmail.com
|
eddf9f5f7a2a6c7585655da5f7ccb69a9ca882d2
|
1ee920e0f7a05b0b50776ed7874ba2e76bbba7de
|
/简简/机器学习初步/part1_course4_Minist2TFRecord.py
|
760c4c72a3466271acafbcf951ff571c0f48f9b8
|
[] |
no_license
|
wangqingbaidu/LearningML
|
cb4066ed616d3914b85fa559bc1cba1ed18954bc
|
9c970f9e6c4052fef444bcf518cf5b1b7c6adfdc
|
refs/heads/master
| 2020-07-31T10:13:06.377235
| 2019-11-15T02:03:39
| 2019-11-15T02:03:39
| 210,571,009
| 0
| 4
| null | 2019-10-10T06:10:14
| 2019-09-24T10:03:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,671
|
py
|
# -*- coding: UTF-8 -*-
# @Time : 2019/10/20 19:41
# @Author : Janeasefor
# @Site :
# @File : test2.py
# @Software: PyCharm
import tensorflow as tf
import os
import traceback
from utils import process_image, ImageCoder
# 避免低版本下不必要警告
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 如果输入的数据不是list类型的,例如是一个标量,需要先转化成list类型。
def int64_feature (value):
# Int类型的数据转化。
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature (value):
# Byte类型数据转化,一般存放语音或者视频的原始二进制文件流。
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def float_feature (value):
# Float类型数据转化。
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# 手写字符数据存放的位置,文件格式:label.index.jpg。
# file_dir = sys.argv[1]
file_dir = 'C:/Users/ysl-pc/Desktop/机器学习入门/part1_course4/hand_writing_storage'
# 输出TF record的位置,如果位置不存在,那么创建。
# output_dir = sys.argv[2]
output_dir = 'C:/Users/ysl-pc/Desktop/机器学习入门/part1_course4/TF_record'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# 特征映射定义两个字段,`label`存放数据的标签,`data`存放的是数据。
feature = {'label': None, 'data': None}
# 定义Coder
coder = ImageCoder()
if os.path.exists(file_dir):
# 创建一个TF record的writer。
writer = tf.python_io.TFRecordWriter(os.path.join(output_dir, 'mnist_byte.tfreocrd'))
for file_name in os.listdir(file_dir):
# 过滤掉该目录下面非`.jpg`结尾的文件。
if file_name.endswith('.jpg'):
try:
label, index, _ = file_name.split('.')
# 把读入的图像转化成灰度图。
image_encoded, _, _ = process_image(os.path.join(file_dir, file_name), coder)
# 构造此样本的特征映射。
feature['label'] = int64_feature(int(label))
feature['data'] = bytes_feature(image_encoded)
# 序列化之后写入对应的TF record。
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
except:
traceback.print_exc()
print('Error while serializing %s.' % file_name)
else:
print('File dir %s not exist!' % file_dir)
|
[
"noreply@github.com"
] |
wangqingbaidu.noreply@github.com
|
a71575c3f5fb41e8566027db9fecf817bb9fcd5e
|
7c63011cc895542cdf8aead2d90517985c1f9bed
|
/v2.py
|
91dd60eabe526c3256d49ff5e41f420aeb3508bc
|
[] |
no_license
|
npinnaka/py_example
|
e1af6d2e75e97591ee0fe3d74b25ea1ec5630df8
|
b7f75648e3eb442e88620ce8d4f28bcd870ace76
|
refs/heads/master
| 2020-08-28T14:39:13.289810
| 2019-10-26T15:26:13
| 2019-10-26T15:26:13
| 217,728,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
from pyspark.sql import functions as F
from pyspark.sql.window import Window
from pyspark.sql import Row
from pyspark.sql.types import *
def f(x):
gi = 0
previous_row = 0
previous_gi = float(0)
output = []
for r in x[1]:
d = r.asDict()
marks = int(d["Marks"])
if previous_row == 0:
d["Growth"] = 0
else:
c = int(previous_row)
d["Growth"] = int(marks - c / c)
if previous_row <= 0 or previous_gi <= 0:
d["ExpectedScore"] = float(marks)
else:
d["ExpectedScore"] = round(float(marks + round((marks * previous_gi) / 100, 2)),2)
gi = d["Growth"] - gi
d["Growth_Increase"] = gi
# =IF(F2<0,D3,D3+((D3*F2)/100)
previous_row = marks
previous_gi = d["Growth_Increase"]
output.append(Row(**d))
return output
df = spark.read.option("header", "true").csv("indatas.csv")
outrdd = df.rdd.groupBy(lambda r: r[0] + r[1]).map(lambda av: (av[0], list(av[1]))).map(f).flatMap(lambda x: x)
schema = StructType([StructField("Name", StringType(), True),
StructField("Subject", StringType(), True),
StructField("Dateon", StringType(), True),
StructField("Marks", StringType(), True),
StructField("Growth", IntegerType(), True),
StructField("Growth_Increase", IntegerType(), True),
StructField("ExpectedScore", DoubleType(), True)])
ds = spark.createDataFrame(outrdd, schema)
ds.show()
|
[
"npinnaka@yahoo.com"
] |
npinnaka@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.