blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6acd9d44dc1191828b5807335b648d30c0e9194d | 0eda43d797abfc69ad28000b3c3599af44049bdf | /setup.py | 21f2ea356d1b8e4b3e0b98a7bd61d346e529cf0b | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/BIOMD0000000048 | d8d23b0491ac80e27692b6e115b9884ee46397d6 | 6d17577fdde45ed5c0ec8457eacb860458e30215 | refs/heads/master | 2021-01-18T14:19:32.446581 | 2014-10-16T05:18:50 | 2014-10-16T05:18:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from setuptools import setup, find_packages
setup(name='BIOMD0000000048',
version=20140916,
description='BIOMD0000000048 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000048',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
92342462dbbae240ef7cda2ffdb9248d3c5c9ee5 | d9ebec1b75f31881ae7e05139a9ad7004dd92227 | /api/recommender/models.py | 55993431ad80a72aeec090a0f85ea6cc58322235 | [] | no_license | Drobo07/eBackend | 83efa41aac6dddc483b8a4061bbcb00fee9208b7 | cf090d5d4b0c4d5bb8c50c511dbc0f3d90e1aca4 | refs/heads/master | 2020-12-21T15:08:18.811907 | 2019-05-02T17:18:49 | 2019-05-02T17:18:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from django.db import models
from django.contrib.postgres.fields import ArrayField
from datetime import datetime
from general.models import Publications
# Create your models here.
class BarcRequest(models.Model):
title = models.CharField(max_length=128,null=False)
abstract = models.TextField(max_length=1024,null=False)
created = models.DateTimeField(auto_now_add=True)
result_token = models.CharField(max_length=32)
result_generated = models.BooleanField(default=False)
def __str__(self):
if(len(self.title)>32):
return self.title[:30]+"..."
return self.title
def get_result_token(self):
ind = self.pk
token = "barc/"+self.created.strftime("/%Y/%m/%d/%H/%M/%S")
return token
| [
"dipeshkrj14@gmail.com"
] | dipeshkrj14@gmail.com |
4b246fb650a74fd88ecc1f77ebd88120fb7af169 | 53cc8792bad1090243ac2e58c05fc9d85a08a7c0 | /motorController.py | dbfa6e2c2e6c2010d0f58ea3d0031076879000db | [] | no_license | intruedeep/intruedeep-firmware | 179de0fca49967690e0e1f229db30733a9660d4f | a8dd0d7abad41b22ce697272288afde86906b63d | refs/heads/master | 2021-01-10T10:21:38.375779 | 2016-04-28T16:07:41 | 2016-04-28T16:07:41 | 54,795,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,475 | py | import RPi.GPIO as GPIO
import smbus
import sys
import motor_control
import random;
from time import sleep
bus = smbus.SMBus(1)
address = 0x30
turnTime = .00005;
fireTIme = .5;
Motor1A = 37
FireGPIO = 11
#If the gun is within offset ticks of its desination location, just stop there.
offset = 5;
homeOffset = 5;
downPos = 1.22
middlePos = downPos + .15
gravityOffset = 0;
homePos = 16;
yOffset = 0;
servo = 35;
frequency = 200;
GPIO.setmode(GPIO.BOARD)
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(servo, GPIO.OUT);
pwm = GPIO.PWM(servo, frequency);
def Fire():
GPIO.output(FireGPIO, True);
sleep(.5);
GPIO.output(FireGPIO, False);
def getBearing1():
while(1):
try:
bear = bus.read_byte_data(address, 64)
return bear
except:
print "had a bus error on 64"
def getBearing2():
while(1):
try:
bear = bus.read_byte_data(address, 65)
return bear
except:
print "had a bus error on 65"
def turnMotor(rotdir, distance, pos, prevPos):
if(rotdir == 'cw'):
#The lower duty cycle is more precise, but slow. So switch to the lower cycle when you are within a close distance of the taget
if(distance > 200):
dutyCycle = .55
elif(pos == prevPos):
seed = random.randint(0, 2);
if(seed % 2 == 0):
dutyCycle = .57;
else:
dutyCycle = .59;
else:
dutyCycle = .51;
else:
if(distance > 200):
dutyCycle = .39
elif(pos == prevPos):
dutyCycle = .37;
seed = random.randint(0, 2);
if(seed % 2 == 0):
dutyCycle = .37;
else:
dutyCycle = .35;
else:
dutyCycle = .43
p = GPIO.PWM(Motor1A, 3.3333);
p.start(dutyCycle);
sleep(turnTime);
p.stop
def Postition():
msb = getBearing1();
lsb = getBearing2();
if(msb * 255 + lsb > 64000):
return msb * 255 + lsb - 65280
return (msb * 255 + lsb)
def findDestinationTicks(index):
TicksPerDegree = 3449.5 / 360;
degrees = [1.3631164591600067, 1.40431393310737, 1.4456434205564785, 1.486902441626652, 1.5278648068465346, 1.568281176545824, 1.6078803699140154, 1.646371526719773, 1.6834472021318871, 1.718787440866362, 1.7520648308221207, 1.7829504795736577, 1.8111207921908659, 1.8362648602662743, 1.8580922055217846, 1.8763405639418504, 1.890783355280717, 1.9012364646142086, 1.9075639722393514, 1.9096825077443618, 1.9096825077443618, 1.9075639722393514, 1.9012364646142086, 1.890783355280717, 1.8763405639418504, 1.8580922055217846, 1.8362648602662743, 1.8111207921908659, 1.7829504795736577, 1.7520648308221207, 1.718787440866362, 1.6834472021318871, 1.646371526719773, 1.607880369913562, 1.568281176545824, 1.5278648068465346, 1.4869024416264067, 1.4456434205564785, 1.4043139331076293, 1.3631164591600067]
totalDegrees = 0;
for i in range(0, index):
totalDegrees += degrees[i];
return(totalDegrees * TicksPerDegree);
def goToDestination(destinationPos):
pos = Postition();
prevPos = 0;
while(1):
if(destinationPos > pos + offset):
turnMotor("cw", destinationPos - pos, pos, prevPos);
#sleep to ensure proper encoder reading
if(pos - destinationPos < 50):
sleep(.5);
prevPos = pos;
pos = Postition();
print "pos = " + str(pos);
elif(destinationPos + offset < pos):
turnMotor("cww", pos - destinationPos, pos, prevPos);
if(destinationPos - pos < 50):
sleep(.5);
prevPos = pos;
pos = Postition();
print "pos = " + str(pos);
else:
break;
def moveServo(y):
angles = [1.3631164591600067, 1.40431393310737, 1.4456434205564785, 1.486902441626652, 1.5278648068465346, 1.568281176545824, 1.6078803699140154, 1.646371526719773, 1.6834472021318871, 1.718787440866362, 1.7520648308221207, 1.7829504795736577, 1.8111207921908659, 1.8362648602662743, 1.8580922055217846, 1.8763405639418504, 1.890783355280717, 1.9012364646142086, 1.9075639722393514, 1.9096825077443618, 1.9096825077443618, 1.9075639722393514, 1.9012364646142086, 1.890783355280717, 1.8763405639418504, 1.8580922055217846, 1.8362648602662743, 1.8111207921908659, 1.7829504795736577, 1.7520648308221207, 1.718787440866362, 1.6834472021318871, 1.646371526719773, 1.607880369913562, 1.568281176545824, 1.5278648068465346, 1.4869024416264067, 1.4456434205564785, 1.4043139331076293, 1.3631164591600067]
print(y)
angle = sum(angles[0:int(y)+1]);
print "Angle = " + str(angle);
Pos = angle * (.3 / 68) + downPos;
print "Pos = " + str(Pos);
# Pos = downPos + ((float(y) + gravityOffset) * .0075)
msPerCycle = 1000 / frequency;
dutyCycle = Pos * 100 / msPerCycle;
pwm.start(dutyCycle);
sleep(2);
# pwm.stop()
def main(x, y):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(servo, GPIO.OUT);
pwm = GPIO.PWM(servo, frequency);
startingPos = Postition();
print "starting = " + str(startingPos);
targetPos = findDestinationTicks(int(x));
destinationPos = startingPos + targetPos
print "Goal destination = " + str(destinationPos);
goToDestination(destinationPos);
currentPos = Postition();
print "Reached destination = " + str(currentPos);
moveServo(int(y) + yOffset);
motor_control.fire()
sleep(1);
offset = homeOffset;
endingPos = Postition();
print "Actually fired at " + str(endingPos);
#Return home
goToDestination(startingPos);
endingPos = Postition();
print "Returned Home = " + str(endingPos);
endingPos = Postition();
#Go to a neutral position;
moveServo(homePos);
GPIO.cleanup()
return 0;
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2]);
| [
"kdunn13@vols.utk.edu"
] | kdunn13@vols.utk.edu |
a3408f3cacbced284115c90a5f39637e7b80dc5c | 269d6d6a0c2975030232c354a1ff65303de96ec1 | /modules/dune_cvn.py | a99aee5d645c42d5689c0b4e53ba27a4a7320cbf | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | DUNE/dune-cvn | 1cf44d5a7ab9ca663121a99eedb0285d122298d9 | 8402a529447d8601a8c16e9954e75fd0dfc2a9d7 | refs/heads/master | 2023-03-15T05:16:15.502690 | 2021-03-12T16:01:15 | 2021-03-12T16:01:15 | 258,130,863 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,224 | py | '''
DUNE CVN model
Inspired by https://github.com/titu1994/keras-squeeze-excite-network/blob/master/se_resnet.py
References:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
- [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507)
'''
__version__ = '1.0'
__author__ = 'Saul Alonso-Monsalve, Leigh Howard Whitehead'
__email__ = 'saul.alonso.monsalve@cern.ch, leigh.howard.whitehead@cern.ch'
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Reshape, Activation, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, GlobalAveragePooling2D, Conv2D
from tensorflow.keras.layers import add, concatenate, multiply, Permute
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
def DUNECVNModel(width=1,
weight_decay=1e-4,
weights=None):
return SEResNetB(depth=[3, 4, 6, 3],
width=width,
weight_decay=weight_decay,
weights=weights)
def SEResNetB(initial_conv_filters=64,
depth=[3, 4, 6, 3],
filters=[64, 128, 256, 512],
width=1,
weight_decay=1e-4,
weights=None,
input_names=['view0','view1','view2'],
input_shapes=[[500,500,1],[500,500,1],[500,500,1]],
output_names=['is_antineutrino','flavour','interaction',\
'protons','pions','pizeros','neutrons'],
output_neurons=[1,4,4,4,4,4,4]):
''' Instantiate the Squeeze and Excite ResNet architecture with branches.
Args:
initial_conv_filters: number of features for the initial convolution.
depth: number or layers in the each block, defined as a list.
filter: number of filters per block, defined as a list.
width: width multiplier for the network (for Wide ResNets).
weight_decay: weight decay (l2 norm).
weights: path of HDF5 file with model weights.
input_names: name of each input, defined as a list.
input_shapes: shape of each input, defined as a list.
output_names: name of each output, defined as a list.
output_neurons: number of neurons of each output, defined as a list.
Returns: a tf.keras model instance.
'''
assert len(depth) == len(filters), 'The length of filter increment list must match the length ' \
'of the depth list.'
assert len(input_names) == len(input_shapes), 'The length of input_names must match the length ' \
'of input_shapes.'
assert len(output_names) == len(output_neurons), 'The length of output_names must match the length ' \
'of output_neurons.'
# inputs
inputs = [None]*len(input_names)
for i in range(len(inputs)):
inputs[i] = Input(shape=input_shapes[i], name=input_names[i])
# generate architecture
x = _create_se_resnet_with_branches(inputs, initial_conv_filters,
filters, depth, width, weight_decay)
# outputs
outputs = [None]*len(output_names)
for i in range(len(outputs)):
activation='sigmoid' if output_neurons[i]==1 else 'softmax'
outputs[i] = Dense(output_neurons[i], use_bias=False, kernel_regularizer=l2(weight_decay),
activation=activation, name=output_names[i])(x)
# create model
model = Model(inputs=inputs, outputs=outputs, name='dunecvn')
# load weights
if weights:
model.load_weights(weights, by_name=True)
return model
def _resnet_block(input, filters, k=1, strides=(1, 1)):
''' Adds a pre-activation resnet block without bottleneck layers.
Args:
input: input tensor.
filters: number of output filters.
k: width factor.
strides: strides of the convolution layer.
Returns: a tf tensor.
'''
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=channel_axis)(input)
x = Activation('relu')(x)
if strides != (1, 1) or init.shape[channel_axis] != filters * k:
init = Conv2D(filters * k, (1, 1), padding='same', kernel_initializer='he_normal',
use_bias=False, strides=strides)(x)
x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False, strides=strides)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(x)
# squeeze and excite block
x = squeeze_excite_block(x)
m = add([x, init])
return m
def squeeze_excite_block(input, ratio=16):
''' Create a squeeze-excite block.
Args:
input: input tensor.
k: width factor.
Returns: a tf tensor.
'''
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
# se block
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def _create_se_resnet_with_branches(img_input, initial_conv_filters, filters,
depth, width, weight_decay):
'''Creates the SE-ResNet architecture with specified parameters.
Args:
initial_conv_filters: number of features for the initial convolution.
filters: number of filters per block, defined as a list.
depth: number or layers in the each block, defined as a list.
width: width multiplier for network (for Wide ResNet).
weight_decay: weight_decay (l2 norm).
Returns: a tf.keras Model.
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
N = list(depth)
# branches
branches = []
for i in range(len(img_input)):
# block 1 (initial conv block)
branch = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, strides=(2, 2),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(img_input[i])
branch = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(branch)
# block 2 (projection block)
for i in range(N[0]):
branch = _resnet_block(branch, filters[0], width)
branches.append(branch)
# concatenate branches
x = concatenate(branches)
# block 3 - N
for k in range(1, len(N)):
x = _resnet_block(x, filters[k], width)
for i in range(N[k] - 1):
x = _resnet_block(x, filters[k], width)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
return x
| [
"saul.alonso.monsalve@cern.ch"
] | saul.alonso.monsalve@cern.ch |
f7893075ad7f7f33d47f38004cf784a9fc9afb2d | ebe11743bb33a1bfe36cddf92551a1e08b3e565d | /advanced/Common.py | d01638f48bbc3fb6f7e1f69744c92fc1ace91f0e | [] | no_license | nick-cheatwood7/python-mondrian | 671ca3aa0734fa283906181f81d1b29be3dccb1b | e5f98b6009eb41eacc90f5906218bb3f3e59452e | refs/heads/main | 2023-03-13T10:30:36.799891 | 2021-02-22T07:55:29 | 2021-02-22T07:55:29 | 339,519,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py |
# Created 02/22/2021 by Nick Cheatwood
# Contains any numbers or functions generic enough to warrant being outside of main file
import random
# Specify canvas width/height
canvasWidth = 1024
canvasHeight = 768
# Specify splits
# 'Generate a random integer between 120 and the width of the region * 1.5'
# If the random integer is less than the width of the region then split the region
splitLow = 120 # Ensures that we never split a region less than 120px wide
splitPenalty = 1.5 # Provides a random chance that a larger region will not be split into smaller regions
# Generate a random color
def getRandomColor():
# Available color choices
colorChoices = [ 'white',
'#6E5AE0',
'#E09287',
'#8170E0',
'#E0CF5A',
'#65E0A7'
]
# Pick a random value between 0 and the length of the color choices array
randColorVal = random.randint(0, len(colorChoices) + 1)
if(randColorVal >= len(colorChoices)):
# randColorVal depends on a range, so end range is length of array + 1 to get proper range
randIndex = randColorVal - 2 # out of index, bring down 2 to get to proper end array index
else:
randIndex = randColorVal
return colorChoices[randIndex] # return a random color
def getRandomBorderColor():
# Avaliable color options
colors = [
'black',
'white',
'hot pink',
'grey',
'blue'
]
# range, so add 1 to length of colors array
randVal = random.randint(0, len(colors) + 1)
# make sure randomized index is in range
randIndex = randVal - 2 if randVal >= len(colors) else randVal
return colors[randIndex] | [
"nick.cheatwood@gmail.com"
] | nick.cheatwood@gmail.com |
d5b6777d7162805424e48a672f5d020c4bd445be | d7d22ea566bd8e97f7bfe956118ad3be4edb9d2f | /game/entity/actor.py | 355a0a98d7f1aa4005adc119a411a9821d482da1 | [] | no_license | philax/gourd_of_the_lings | c02ef1db5467306c7994b20280c7c140ea65280f | 8bdf4c0c410e39e4ac531906aacbb98d5122c632 | refs/heads/master | 2021-01-10T05:35:45.802300 | 2015-12-17T22:35:22 | 2015-12-17T22:35:22 | 48,065,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | class Actor(object):
def __init__(self, name, base_hp, base_dmg, inventory=[]):
self.base_hp = base_hp
self.hp = base_hp
self.name = name
# if(stats == None):
# self.stats['HP'] = 10
# self.stats['STR'] = 5
# self.stats['AGI'] = 5
# self.stats['DEX'] = 5
def gain_hp(self, heal):
self.hp += heal
def lose_hp(self, dmg):
self.hp -= dmg
self.is_alive()
def get_hp(self):
print "%s has %s HP."% (self.name, self.hp)
def is_alive(self, is_quiet=False):
# print "DEBUG: Actor death check: %s has %s hp!"% (self.name, str(self.hp))
if self.hp > 0:
if is_quiet == False:
self.get_hp()
return True
elif self.hp <= 0:
if is_quiet == False: #hackey
print "%s has been defeated!"% (self.name)
return False | [
"plaks@turbine.com"
] | plaks@turbine.com |
11065362a8ac77972c519aadeae585300bb5085d | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_25/models/active_directory_get_response.py | ee529854e041a3ff612ccf174315845d4e2c49ef | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 5,613 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class ActiveDirectoryGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ActiveDirectory]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ActiveDirectory]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ActiveDirectory]): A list of Active Directory computer account configuration objects.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ActiveDirectoryGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActiveDirectoryGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | noreply@github.com |
4a1c309a93de9647a0f1adc90e88ad9c8624b3be | 2b8c88dfee5c5a784357515eafe8cd5f997c8774 | /leetcode/dynamic_programming/code-84.py | 1b9e7013a5652e79e6603e09d069daf7eb6aa134 | [] | no_license | archenRen/learnpy | e060f3aa2f77c35fc1b12345720af6c8b528da57 | 934ef76b97297f746a722a48c76672c7bc744cd9 | refs/heads/master | 2022-04-28T20:25:59.114036 | 2020-05-03T02:16:03 | 2020-05-03T02:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py |
# This is a TLE solution.
def largestRectangleArea2(heights: 'List[int]') -> int:
n = len(heights)
max_val = 0
for i in range(n):
min_val = heights[i]
max_val = max(max_val, min_val)
for j in range(i-1, -1, -1):
min_val = min(heights[j], min_val)
max_val = max(max_val, min_val * (i - j + 1))
return max_val
def largestRectangleArea(heights: 'List[int]') -> int:
# The stack maintain the indexes of buildings with ascending height.
n = len(heights)
heights.append(0)
stack = []
ans = 0
i = 0
while i <= n:
if not stack or heights[i] >= heights[stack[-1]]:
stack.append(i)
else:
tp = stack.pop()
if stack:
ans = max(ans, heights[tp] * (i - stack[-1] - 1))
else:
ans = max(ans, heights[tp] * i)
i -= 1
i += 1
return ans
# print(largestRectangleArea([2, 1, 5, 6, 2, 3])) # expect 10 (2*5)
# print(largestRectangleArea([2, 1, 3, 6, 2, 3]))# expect 8 (4*2)
# print(largestRectangleArea([2,3]))
# print(largestRectangleArea([3]))
print(largestRectangleArea(list(range(10))))
| [
"wangdi03@ppdai.com"
] | wangdi03@ppdai.com |
341107c12104363f5af83709a82a18422f87fb29 | 81246c8049ebf8d58e9614f4f062ec4dc0a2bd8b | /venv/Scripts/django-admin.py | 62609559dbfa35dd7608610ad723d1334c72616d | [] | no_license | nikakuzina/django | cf68e43f9779c5ee19ebfc95c4173c31338c0bec | afb2c4fe6f6e72ecad9653e72758350989e794e7 | refs/heads/main | 2023-01-31T15:07:37.111973 | 2020-12-07T22:20:18 | 2020-12-07T22:20:18 | 319,457,660 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #!C:\Users\Admin\PycharmProjects\djangonika\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"stasciobanu777@gmail.com"
] | stasciobanu777@gmail.com |
2d8ac059bbfc47157c68170ee0adcfcc597a30df | d6c7a20e0286d389ba17aef2b22931bd5d294dd0 | /postgres_demo.py | a722c97c42594c3b784794e756dfd057802fee71 | [] | no_license | bashooff/databases_demo | d1ebfde67f418324f45cf11922ce4395f96f9028 | 281cd00254451519e989bd076ed70bf4fe3cacde | refs/heads/main | 2023-04-05T22:02:20.629234 | 2021-05-05T08:51:35 | 2021-05-05T08:51:35 | 364,515,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | import psycopg2
def create_table():
# Connect to a database
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
# Create a cursor object
cur = conn.cursor()
# Write SQL query
cur.execute("CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)")
# Commit changes
conn.commit()
# Close connection
conn.close()
def insert(item,quantity,price):
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
#cur.execute("INSERT INTO store VALUES('%s', '%s', '%s')" % (item, quantity, price))
cur.execute("INSERT INTO store VALUES(%s, %s, %s)", (item, quantity, price))
conn.commit()
conn.close()
def view():
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
cur.execute("SELECT * FROM store")
rows = cur.fetchall()
conn.close()
return rows
def delete(item):
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
cur.execute("DELETE FROM store WHERE item=%s", (item,))
conn.commit()
conn.close()
def update(quantity, price, item):
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
cur.execute("UPDATE store SET quantity=%s, price=%s WHERE item=%s", (quantity, price, item))
conn.commit()
conn.close()
create_table()
#insert("Orange", 10, 15)
#delete("Orange")
update(20, 9, "Apple")
print(view()) | [
"bashooff@hotmail.com"
] | bashooff@hotmail.com |
0bd4bb83349048366f08e67ed891cd7688d8efe5 | 9de52a33dfa175b3f4994658ad6a2261f045c8b0 | /难点积累/通过颜色特征直方图与滑动窗口比较图片相似性.py | c5f30321927b646fe0c3e45f26744c413506b9df | [] | no_license | infcnwangjie/opencv | 64140cebc64cacc9fe078bb266ee6569ba62bc0f | ea18ef4e9c514f703ed8fdd83b0f5d74069e1d90 | refs/heads/master | 2022-12-14T23:01:21.642076 | 2020-11-04T08:01:54 | 2020-11-04T08:01:54 | 133,104,182 | 0 | 0 | null | 2022-12-08T11:40:53 | 2018-05-12T01:38:08 | Python | UTF-8 | Python | false | false | 2,060 | py | # -*- coding: utf-8 -*-
import cv2
#https://baijiahao.baidu.com/s?id=1615404760897105428&wfr=spider&for=pc
def color_similar_ratio(image1, image2):
if image1 is None or image2 is None:
return 0
img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2HSV)
img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
hist1 = cv2.calcHist([img1], [0, 1], None, [180, 256], [0, 180, 0, 255.0])
cv2.normalize(hist1, hist1, 0, 255, cv2.NORM_MINMAX) # 规划到0-255之间
# cv2.imshow("hist1",hist1)
hist2 = cv2.calcHist([img2], [0, 1], None, [180, 256], [0, 180, 0, 255.0])
cv2.normalize(hist2, hist2, 0, 255, cv2.NORM_MINMAX) # 规划到0-255之间
degree = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL) # HISTCMP_BHATTACHARYYA HISTCMP_CORREL
print(degree)
# if degree > 0.56:
# backproject = cv2.calcBackProject([img2], [0, 1], hist1, [0, 180, 0, 255.0], 1)
# cv2.imshow("backproject", backproject)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return degree
def slide():
img = cv2.imread("D:/2020-04-10-15-26-22test.bmp")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rows, cols = gray.shape
for row in range(0,rows):
for col in range(502, 612):
# print("-" * 1000)
yield (col, row, img[row:row + 80, col:col + 80])
# for col in range(2619, 2743):
# print("-" * 1000)
# yield (col, row, img[row:row + 80, col:col + 80])
def my_testslide():
roi_red_img=cv2.imread("D:/roi_red.png")
for col,row,img in slide():
# print("+"*100)
# print("rows:{},cols:{}".format(row,col))
roi_red_img=cv2.resize(roi_red_img,(80,80))
similar=color_similar_ratio(roi_red_img,img)
# print("similar:{}".format(similar))
if similar>0.85:
print("find red landmark")
cv2.namedWindow("roi", 0)
cv2.imshow("roi", roi_red_img)
cv2.namedWindow("target")
cv2.imshow("target",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
# image1 = cv2.imread("D:/roi1.png")
# image2 = cv2.imread("D:/target_gy.png")
# i = color_similar_ratio(image1, image2)
# print("color,相似度为:{}".format(i))
my_testslide()
| [
"wangjie_rj@163.com"
] | wangjie_rj@163.com |
ae8c74c7b7f7206875a2b5b26902c89d55155d71 | d6362b0d78433efb7b6f0a59ed427bf58d5bad47 | /python_basics/multiprocessing/other_tests/pool_map.py | ec8073e05d2559b0a26b9e75a30f10d1c6c6b3b5 | [] | no_license | davidbp/python_tutorials | 27c78791ad1ea041d543a8cd5b48a6f58bca700d | 5a7bc14a967f18d6820b39b152fc8219c95d3f75 | refs/heads/master | 2023-07-13T11:26:21.374596 | 2023-07-01T21:19:55 | 2023-07-01T21:19:55 | 104,083,908 | 14 | 4 | null | 2022-06-21T22:37:29 | 2017-09-19T14:13:34 | Jupyter Notebook | UTF-8 | Python | false | false | 526 | py |
from random import random
from time import sleep
from multiprocessing.pool import Pool
def multi_run_wrapper(args):
return task(*args)
def task(identifier, id):
value = random()
print(f'Task {identifier} executing with {id}', flush=True)
sleep(1)
return value
# protect the entry point
if __name__ == '__main__':
n_examples = 1000
chunksize = 100
with Pool(10) as pool:
pool.map(multi_run_wrapper, ((x,y) for x,y in zip(range(n_examples),range(n_examples))) , chunksize=chunksize)
| [
"davidbuchaca@gmail.com"
] | davidbuchaca@gmail.com |
bd5146467d282b22af61c534fd0e9f2dd2af0c1e | eeee85ce45554dc328776fa5b6f24c894c86a098 | /bikeshed/app/admin.py | 034d9f6b95426c879ccfcfc4d05eaba3613feeea | [] | no_license | varavan/bikeshed-test | 646b0fc4dcd452dd398c46fe75ad2154009f853e | 39b93c340362e02ebf06290cd2c0b214696579c8 | refs/heads/master | 2021-01-11T18:54:58.106643 | 2017-01-22T18:12:48 | 2017-01-22T18:12:48 | 79,654,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | # coding=utf-8
from django.contrib import admin
from .models import Brand
admin.site.register(Brand)
| [
"ivan.ruiz.delatorre@gmail.com"
] | ivan.ruiz.delatorre@gmail.com |
8e19d867074a1f86ccc691db65d40f99ea7ffb2b | c989985588c9c3132cb3df4710a59fa8df7c7f2d | /user.py | 3bf573c28689ca53aea39411c68c0e0d0a081ec0 | [] | no_license | hsolmaz/amazonbrowsertest | 3100886d08ba88f16efe33ef71db9dd4bf1b4566 | 54f0010ffcbcda89b2e85a5ab6f1d2724303a52e | refs/heads/master | 2020-03-21T00:39:38.131878 | 2018-06-21T11:16:55 | 2018-06-21T11:16:55 | 137,903,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import random
import string
import time
char_set2 = string.ascii_lowercase + string.digits
random_number = char_set2.split('0')[1]
char_set = char_set2.split('0')[0]
random_numbers = ''.join(random.sample(random_number*1, 1))
i = int(random_numbers)
username = ''.join(random.sample(char_set*6, 6))
lastname = ''.join(random.sample(char_set*6, 6))
password = ''.join(random.sample(char_set*8, 8))
maildomain = ''.join(random.sample(char_set*6, 6))
usermail = username+'@'+maildomain+'.com'
category = [u'yamaç paraşütü', 'balon turu', u'dalıl', 'kitesurf',
'yelkencilik', 'windsurf', 'paintball', 'atv safari', u'okçuluk', 'jeep safari']
driver = webdriver.Chrome()
driver.get("https://alt.test/")
""" driver.find_element_by_xpath('//*[@id="main-nav"]/li[5]/a').click()
driver.find_element_by_name(
'fos_user_registration_form[name]').send_keys(username)
driver.find_element_by_name(
'fos_user_registration_form[surname]').send_keys(lastname)
driver.find_element_by_name(
'fos_user_registration_form[email]').send_keys(usermail)
driver.find_element_by_name(
'fos_user_registration_form[plainPassword][first]').send_keys(password)
driver.find_element_by_name(
'fos_user_registration_form[plainPassword][second]').send_keys(password)
driver.find_element_by_xpath(
'//*[@id="user-actions"]/div/div/form/div/div[1]/div[9]/button').click() """
try:
driver.find_element_by_xpath(
'//*[@id="index-top-search"]/div/div[2]/div/input[1]').send_keys(category[i]+Keys.ENTER)
driver.find_element_by_xpath(
'//*[@id="index-top-search"]/div/div[4]/button').click()
try:
driver.execute_script(
"document.getElementsByClassName('select2 select2-container select2-container--default select2-container--focus')[0].click()")
time.sleep(2)
driver.find_element_by_xpath(
'//*[@id="select-order"]/div[2]/ul/li[3]').click()
time.sleep(5)
str = driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div[1]/div/div[2]/div[3]/div[2]/div').text
sayilar = [int(s) for s in str.split() if s.isdigit()]
str = driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div[2]/div/div[2]/div[3]/div[2]/div').text
sayilars = [int(s) for s in str.split() if s.isdigit()]
if sayilar < sayilars:
print u'Sıralama büyükten küçüğe çalışmaktadır'
except Exception as e:
print(e)
print u'tek ürün mevcut'
try:
driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div['+i+']/div/div[2]/div[1]/a').click()
except:
print u'tek ürün mevcut ilk ürün açıldı'
driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div[1]/div/div[2]/div[1]/a').click()
time.sleep(10)
driver.close()
except Exception as e:
driver.close()
| [
"huseyin@hayalmahsulleri.com.tr"
] | huseyin@hayalmahsulleri.com.tr |
ffd4ff39507434f06cbbc5a0767aeadf66cdf5a4 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/idea/party/issue/line_friend/group_lot_guy_lombok_kind/door/oauth.py | 555d4e970019c6d7f81128a63b321c2efb7bdedb | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,672 | py | const request = require('request')
const uuidv4 = require('uuid/v4')
const { LimitReachedError } = require('./errors')
const ITEMS_IN_REQUEST_LIMIT = 25
const REQUEST_CHAR_LIMIT = 5000
const CHAR_PER_HOUR_LIMIT = 2000000
// const subscriptionKey = process.env.TRANSLATOR_TEXT_KEY;
// if (!subscriptionKey) {
// throw new Error('Environment variable for your subscription key is not set.')
// }
const subscriptionKey = 'a674785ff843a278a87995ef4ee1659b'
function MicrosoftTranslator () {}
MicrosoftTranslator.prototype.translate = function (strings, targetLang) {
console.log(`Microsoft: translating ${strings.length} strings to ${targetLang}...`)
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
to: targetLang
},
headers: {
'5bb321a1b738949e8bace956a490028a': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: strings.map(str => ({ text: str })),
json: true
}
return new Promise((resolve, reject) => {
request(options, (err, res, body) => {
if (err) {
reject(err)
return
}
if (body.error) {
console.log('body', body)
if (body.error.code === 400077) {
reject(new LimitReachedError('Microsoft', 'Maximum request size'))
} else if (body.error.code === 403001) {
reject(new LimitReachedError('Microsoft', 'Quota per hour'))
} else {
reject(new Error(body.error.message))
}
} else {
let translations = body
.reduce((accum, item) => accum.concat(item.translations), [])
.map(i => i.text)
resolve(translations)
}
})
}).then(translations => {
console.log(`Microsoft: Translation succeed. Got ${translations.length} translations.`)
return translations
})
}
MicrosoftTranslator.prototype.getRequestLimit = function () {
return REQUEST_CHAR_LIMIT
}
MicrosoftTranslator.prototype.getRequestItemsCountLimit = function () {
return ITEMS_IN_REQUEST_LIMIT
}
MicrosoftTranslator.prototype.getMaxLimit = function () {
return CHAR_PER_HOUR_LIMIT
}
module.exports = MicrosoftTranslator
// new MicrosoftTranslator()
// .translate([(new Array(5001)).join('a'), 'b'], 'ru')
// .then(translations => console.log('Result', translations))
// .catch(err => console.error(err))
/*
* Limits: https://docs.microsoft.com/en-us/azure/cognitive-services/translator/request-limits
* https://docs.microsoft.com/en-us/azure/cognitive-services/translator/reference/v3-0-translate?tabs=curl
* */
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
24bb1c93e94a9f3ea07ca4b69095ba78a63c236d | 569c958cf7a13f61ebe1417caee671be5ba4adb4 | /LFUcache-Python/LFUCache.py | a1c98614d68c3c1bb3abb707f57c2efb94946ea7 | [] | no_license | Dechuan0629/LeetCodePractice | 932c079aff4cc1ef01d3a57b2a3d70389c1c81e3 | 8e801a667617bc76854f02dbe2fcd068d448fa39 | refs/heads/master | 2022-12-26T02:37:00.247683 | 2022-12-15T09:29:17 | 2022-12-15T09:29:17 | 246,292,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | import time
class LFUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.current_capacity = 0
self.cache = {}
def get(self, key: int) -> int:
try:
ans = self.cache[str(key)][0]
self.cache[str(key)][1]+=1
self.cache[str(key)][2] = round(time.time() * 1000000)
except KeyError:
return -1
else:
return ans
def put(self, key: int, value: int) -> None: #本可以一次ac,没考虑到put相同的key时,也会占用操作次数
try:
if self.current_capacity < self.capacity: #每一个键值对保存的形式为key:[value,次数,时间戳]
if str(key) in self.cache:
self.cache[str(key)][0] = value
self.cache[str(key)][1] += 1
self.cache[str(key)][2] = round(time.time() * 1000000) #时间戳精准到微秒
else:
self.cache.update({str(key):[value,0,round(time.time() * 1000000)]})
self.current_capacity+=1
else:
if str(key) in self.cache:
self.cache[str(key)][0] = value
self.cache[str(key)][1] += 1
self.cache[str(key)][2] = round(time.time() * 1000000)
else:
key_min = min(self.cache.keys(), key=(lambda k: self.cache[k][1:])) #利用min函数找出次数最少的,优先级为 操作次数 > 操作时间
del self.cache[key_min] #如果次数相同,会按照时间戳判断,时间戳小的,说明最近没有使用,因此会删除
self.cache.update({str(key): [value, 0, round(time.time() * 1000000)]})
except ValueError:
return -1
def main():
capacity = input('input the cache capacity:')
cache = LFUCache(int(capacity))
cache.put(2,1)
cache.put(2,2)
cache.get(2)
while True:
op = input('input operator:')
if op == 'put':
key,value = map(int,input('input key,value:').split(','))
cache.put(key,value)
elif op == 'get':
key = input('input key:')
print(cache.get(int(key)))
else:
break
if __name__ == '__main__':
main() | [
"610459802@qq.com"
] | 610459802@qq.com |
f158c6821e350e490fa25d9eda4fc880f01fe9d0 | 6e2dc82bcfbc420ce6fd8e890f9f254e8e594902 | /www/cursivedata/migrations/0005_auto__add_field_pipeline_anim_loop.py | 92063d7e04206acf824ce406bdbd7eabe0b2d325 | [
"CC-BY-4.0"
] | permissive | mattvenn/cursivedata | 8ea86bde4a58a5678b1116953d17f0ae3600daf6 | 43e43263bef6f01698166d87bcff00b246957277 | refs/heads/master | 2021-01-17T08:06:38.715586 | 2016-07-22T16:04:10 | 2016-07-22T16:04:10 | 5,599,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,340 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pipeline.anim_loop'
db.add_column('cursivedata_pipeline', 'anim_loop',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pipeline.anim_loop'
db.delete_column('cursivedata_pipeline', 'anim_loop')
models = {
'cursivedata.cosmsource': {
'Meta': {'object_name': 'COSMSource'},
'add_feed_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'add_feed_title': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'add_location': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'api_key': ('django.db.models.fields.CharField', [], {'default': "'WsH6oBOmVbflt5ytsSYHYVGQzCaSAKw0Ti92WHZzajZHWT0g'", 'max_length': '400'}),
'cosm_trigger_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'cosm_url': ('django.db.models.fields.CharField', [], {'default': "'http://api.cosm.com/v2/triggers/'", 'max_length': '200'}),
'feed_id': ('django.db.models.fields.CharField', [], {'default': "'96779'", 'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unknown Source'", 'max_length': '100'}),
'pipelines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cursivedata.Pipeline']", 'symmetrical': 'False', 'blank': 'True'}),
'stream_id': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '400'}),
'use_stream_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'cursivedata.datapoint': {
'Meta': {'object_name': 'DataPoint'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'datastore': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.DataStore']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cursivedata.datastore': {
'Meta': {'object_name': 'DataStore'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'cursivedata.endpoint': {
'Meta': {'object_name': 'Endpoint'},
'device': ('django.db.models.fields.CharField', [], {'default': "'web'", 'max_length': '200'}),
'full_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'full_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'generate_gcode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'height': ('django.db.models.fields.FloatField', [], {'default': '200', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_height': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'img_width': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'last_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'robot_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'side_margin': ('django.db.models.fields.FloatField', [], {'default': '10', 'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}),
'status_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'top_margin': ('django.db.models.fields.FloatField', [], {'default': '10', 'max_length': '200'}),
'width': ('django.db.models.fields.FloatField', [], {'default': '200', 'max_length': '200'})
},
'cursivedata.gcodeoutput': {
'Meta': {'object_name': 'GCodeOutput'},
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'served': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'cursivedata.generator': {
'Meta': {'object_name': 'Generator'},
'description': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '2000'}),
'file_path': ('django.db.models.fields.CharField', [], {'default': "'./generators'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'default': "'No Image'", 'max_length': '200'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'module_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'cursivedata.generatorstate': {
'Meta': {'object_name': 'GeneratorState'},
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'params': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'state': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'cursivedata.parameter': {
'Meta': {'object_name': 'Parameter'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'float'", 'max_length': '20'}),
'default': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "'Some parameter'", 'max_length': '1000', 'blank': 'True'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'cursivedata.pipeline': {
'Meta': {'object_name': 'Pipeline'},
'anim_autoplay': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anim_loop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anim_speed': ('django.db.models.fields.IntegerField', [], {'default': '1000'}),
'auto_begin_days': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'data_store': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cursivedata.DataStore']", 'unique': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000', 'blank': 'True'}),
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']"}),
'full_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'full_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_height': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'img_width': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'last_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'next_auto_begin_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'print_top_left_x': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'print_top_left_y': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'print_width': ('django.db.models.fields.FloatField', [], {'default': '500'}),
'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cursivedata.COSMSource']", 'symmetrical': 'False', 'blank': 'True'}),
'state': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cursivedata.GeneratorState']", 'unique': 'True'})
},
'cursivedata.storedoutput': {
'Meta': {'object_name': 'StoredOutput'},
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']", 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "'output/none'", 'max_length': '200'}),
'filetype': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '10'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pipeline': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Pipeline']", 'null': 'True', 'blank': 'True'}),
'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'complete'", 'max_length': '10'})
}
}
complete_apps = ['cursivedata'] | [
"matt@mattvenn.net"
] | matt@mattvenn.net |
4ab262e191693747daa93149c342a0572400a678 | 0255c635cd5aba99c7dafb3f34757fc4761ecbb7 | /annotation_pipeline/mturk_backend/hit_results.py | b7381b2b864259150bd21a56e92d852280a9fcc7 | [
"Apache-2.0",
"MIT"
] | permissive | tomerwolgithub/Break | 021a6ecfd780fbcd0556dbdc8dcd6a2f2fe16115 | e7106929b9b7cca069e5d33c894d0eec10ef538f | refs/heads/master | 2022-12-17T14:31:34.528576 | 2021-10-30T08:56:09 | 2021-10-30T08:56:09 | 233,827,307 | 46 | 13 | MIT | 2022-12-08T03:40:20 | 2020-01-14T11:35:00 | JavaScript | UTF-8 | Python | false | false | 1,537 | py | """HIT results data structure"""
import json
class HITResults:
def __init__(self, hit_id, assignment_id, worker_id, submit_time):
self.hit_id = hit_id
self.assignment_id = assignment_id
self.worker_id = worker_id
# submit_time is a datetime object which we convert to a string
self.submit_time = submit_time.strftime('%Y-%m-%d %H:%M:%S.%f')
self.accepted = None
self.type = None
def accept(self):
self.accepted = True
def reject(self):
self.accepted = False
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__)
class GenerationResults(HITResults):
def __init__(self, hit_id, assignment_id, worker_id, submit_time, decomposition):
super().__init__(hit_id, assignment_id, worker_id, submit_time)
self.decomposition = decomposition
self.type = 'gen'
self.manually_validated = None
self.valid_annotation = None
def validate(self, manual_validation_result):
self.manually_validated = True
self.valid_annotation = manual_validation_result
class ValidationResults(HITResults):
def __init__(self, hit_id, assignment_id, worker_id, submit_time, annotation_validation, bank_validation):
super().__init__(hit_id, assignment_id, worker_id, submit_time)
self.annotation_validation = annotation_validation
self.bank_validation = bank_validation
self.type = 'val' | [
"noreply@github.com"
] | noreply@github.com |
610ac8671393a3cc93c8ac2f5fb7cbe982e9e849 | 96090102d5e87f1771ba5a90f7b676f4ccb0afa6 | /src/profiles/forms.py | ef4d8adbf95e2f2acf6f725493fe0bef6afcef2b | [] | no_license | rahulsayon/SocialWedding | b4b37ad69b89236784c6fb983ab27b4cd2e4266e | ab96b6a5d381936463065e75f74d0c8ffd3b1907 | refs/heads/master | 2022-12-18T15:08:39.380348 | 2020-09-27T14:49:40 | 2020-09-27T14:49:40 | 299,053,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from django import forms
from . models import Profile
class ProfileModalForm(forms.ModelForm):
class Meta:
model = Profile
fields = [ 'first_name','last_name','bio','avatar' ] | [
"rahulsayon95@gmail.com"
] | rahulsayon95@gmail.com |
7094d4bbe7a500eb46faa9fac35c316ada1389af | 77fc5af96da1d461c86c7f9668b64b99ca04a1b6 | /codes/horner.py | 4458f960d38c57f60ba6940082b190afccdbd331 | [] | no_license | rene-d/edupython | 5b6bc8ddb5eb8ec896ee70fb961d4e689af1075a | 1261d0c7aae17bb2d4ff3370860768b73ba4172d | refs/heads/master | 2020-11-24T10:07:18.504472 | 2019-12-21T21:03:08 | 2019-12-21T21:03:08 | 228,099,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # Méthode de Hörner
# https://edupython.tuxfamily.org/sources/view.php?code=horner
# Créé par IANTE, le 12/07/2011
from lycee import *
P=liste_demande('entrez les coefficients de P(x) par ordre des puissances croissantes')
r=demande('Entrez une racine évidente')
Q=[0]*(len(P)-1)
v=0
for d in range(len(P)-2,-1,-1):
v=P[d+1]+r*v
Q[d]=v
print (affiche_poly(P)+'=('+affiche_poly([-r,1])+')('+affiche_poly(Q)+')')
| [
"rene.devichi@gmail.com"
] | rene.devichi@gmail.com |
8dfafbc28a614569c623e5f7c6693a7448952581 | b48a447030cd1afd7c38f765eb21448ff87c7b2f | /app/app/test.py | c767945514566650545e409e3378eeeac378c435 | [] | no_license | joshuagato/django_rest_framework_docker_setup | 87bba4292d708bc33340c07eec08bf2b00917bb6 | ea5f69654616993de91f4993a216a195b1f64657 | refs/heads/master | 2022-12-13T20:15:59.224836 | 2020-09-19T16:36:14 | 2020-09-19T16:36:14 | 295,513,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from django.test import TestCase
from .calc import add, subtract
class CalcTests(TestCase):
def test_add_numbers(self):
"""Test that two numbers are added together"""
self.assertEqual(add(4, 6), 10)
def test_subtract_numbers(self):
"""Test that values are subtracted and returned"""
self.assertEqual(subtract(2, 7), 5)
| [
"joshuagato37@gmail.com"
] | joshuagato37@gmail.com |
39d5e277eb935eee8876c1af0b0557edcf5f6b91 | 146012dda21ab72badad6daa8f98e6b26fedb128 | /13day/9-名片系统.py | c647a413c352cb726036cb58e94329648c26b284 | [] | no_license | fengshuai1/1805 | 41786c3561beca580ba82d9e9d4347571e38e198 | 8dc3e6605cc1d6f91685ae45bfebfc062f0aa489 | refs/heads/master | 2020-03-19T07:41:40.608389 | 2018-06-28T01:45:43 | 2018-06-28T01:45:43 | 136,140,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py | list = []#存放名字
print("名片管理系统".center(50,"*"))
while True:
print("1:添加名片".center(50," "))
print("2:查找名片".center(50," "))
print("3:修改名片".center(50," "))
print("4:删除名片".center(50," "))
print("5:打印名片".center(50," "))
num = int(input("请选择功能"))
if num == 1:
d = {}#空字典
while True:
name = input("请输入要添加的名字")
if len(name) > 4:
print("太长,请重新输入")
continue
job = input("请输入要添加的职位")
if len(job) > 4:
print("太长,请重新输入")
continue
phone = input("请输入手机号")
if len(phone) != 11 or phone.startswith("1") == False:
print("手机号输入有误,请重新输入")
continue
d["name"] = name
d["job"] = job
d["phone"] = phone
#添加到列表
list.append(d)
print("添加成功")
break
elif num == 2:
name = input("请输入要查找的姓名")
flag = False#假设没有咱们要找的人
for i in list:
if name == i["name"]:
print("姓名:%s\n职位:%s\n电话:%s"%(i["name"],i["job"],i["phone"]))
flag = True#找到了
break
if flag == False:
print("查无此人")
elif num == 3:
#要改之前,你得先查到你要找的那个
name = input("请输入你要改的人的姓名")
flag = False
for i in list:
if name == i["name"]:
print("1:修改名字")
print("2:修改职位")
print("3:修改电话")
num_1 = int(input("请选择功能"))
if num_1 == 1:
new_name = input("请输入新的名字")
i["name"] = new_name
elif num_1 == 2:
new_job = input("请输入新的职位")
i["job"] = new_job
elif num_1 == 3:
new_phone = input("请输入新的电话")
i["phone"] = new_phone
flag = True
break
if flag == False:
print("查无此人")
elif num == 4:
name = input("请输入你要删除的名字")
flag = False
for position,i in enumerate(list):#把索引遍历出来
if name == i["name"]:
flag = True#找到了
print("1:确认删除")
print("2:取消删除")
num_2 = int(input("请选择序号"))
if num_2 == 1:
list.pop(position)#直接删除
break
if flag == False:
print("查无此人")
elif num == 5:#打印名片
print("名字\t职位\t电话")
for i in list:
print(" "+i["name"]+"\t "+i["job"]+"\t "+i["phone"])
| [
"1329008013@qq.com"
] | 1329008013@qq.com |
a4a14ef74a6feb9dfff1fce11de617aeefe4c012 | 8f4cb6b34e4a13b0d71756987aa07d22d1e5c399 | /solutions/uri/1029/1029.py | aa872f44a910ef2e08cbb1dcaf8f16351f4eafd5 | [
"MIT"
] | permissive | kaneki-ken01/playground | e688537439d4ef937cfeb3a0be54159c5d47d51b | 1900da4a7b352b1228659631068ff365456408e1 | refs/heads/main | 2023-08-16T21:00:05.823664 | 2021-10-04T19:09:08 | 2021-10-04T19:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f'fib({n}) = {calls_count} calls = {fibonacci_result}')
| [
"deniscostadsc@gmail.com"
] | deniscostadsc@gmail.com |
c441941156bd0808bc93eb34a0c6ef9a076dbaee | 06164402e4a9c46a03d579175e588519dbd4048d | /experiments/experiments_gdsc/cross_validation/vb_nmf/linesearch_xval_vb.py | 013c70a9a0481ff098be2e4b97b6fb3098dc6e91 | [
"Apache-2.0"
] | permissive | XuanHeIIIS/BNMTF | 19547e36466ecee8d45fb0002d305ee6b7ba6c23 | 34df0c3cebc5e67a5e39762b9305b75d73a2a0e0 | refs/heads/master | 2020-03-27T12:47:58.375964 | 2018-06-10T10:22:19 | 2018-06-10T10:22:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | """
Run the cross validation with line search for model selection using VB-NMF on
the Sanger dataset.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../../"
sys.path.append(project_location)
import numpy, random
from BNMTF.code.models.bnmf_vb_optimised import bnmf_vb_optimised
from BNMTF.code.cross_validation.line_search_cross_validation import LineSearchCrossValidation
from BNMTF.data_drug_sensitivity.gdsc.load_data import load_gdsc
# Settings
standardised = False
iterations = 1000
init_UV = 'random'
K_range = [15,20,25,30]
no_folds = 10
restarts = 1
quality_metric = 'AIC'
output_file = "./results.txt"
alpha, beta = 1., 1.
lambdaU = 1./10.
lambdaV = 1./10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
# Load in the Sanger dataset
(_,X_min,M,_,_,_,_) = load_gdsc(standardised=standardised,sep=',')
# Run the cross-validation framework
#random.seed(42)
#numpy.random.seed(9000)
nested_crossval = LineSearchCrossValidation(
classifier=bnmf_vb_optimised,
R=X_min,
M=M,
values_K=K_range,
folds=no_folds,
priors=priors,
init_UV=init_UV,
iterations=iterations,
restarts=restarts,
quality_metric=quality_metric,
file_performance=output_file
)
nested_crossval.run()
"""
all_MSE = [2.2242309355503416, 2.3108126630384804, 2.4095896447817631, 2.2188694213830114, 2.4185938516134278, 2.1808748510586002, 2.2503432196374651, 2.2305023229025145, 2.3595465204422488, 2.2186318302878667]
all_R2 = [0.8123419361488506, 0.8011409466575017, 0.7943028271877304, 0.8125046212085996, 0.7934881370166628, 0.8111969927756486, 0.8058878338360765, 0.811089129626958, 0.798953276136085, 0.8151865445946502]
Average MSE: 2.2821995260695718 +- 0.0066998949966021598
Average R^2: 0.80560922451887629 +- 5.8495363723835686e-05
""" | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
6dd96a9c6db1009bb2305040395a722405b07ba1 | e877a3761f4f6ceefc361eee30844e82ca4155b1 | /testing/models.py | 2a1cae72ade8df75fec16b47e780c9480994b4d6 | [] | no_license | AlexSilva18/StatikA | a24b2d54b63b7038188310fe56adddb3dbba2a8b | 7b5791ff1055d7e4fa25fc249930d5beb2b58a1e | refs/heads/master | 2021-01-01T15:54:25.604198 | 2017-08-10T18:25:54 | 2017-08-10T18:25:54 | 97,729,746 | 0 | 1 | null | 2017-07-27T16:10:16 | 2017-07-19T15:02:14 | HTML | UTF-8 | Python | false | false | 325 | py | from django.db import models
class Testing(models.Model):
title = models.CharField(max_length=255)
test_description = models.TextField(blank=True, default='')
def __str__(self):
return '{}: {}'.format(self.title, self.test_description)
def get_absolute_url(self):
return print("Hello")
| [
"alex.rsilva18@gmail.com"
] | alex.rsilva18@gmail.com |
4eff0fd515d88525ed27929659bd4f5416c8a937 | 0181ec7a90e4e5955c88751f7ef7ab0dbbb21203 | /parser.py | cc5701698b239f2659b835cfd3032656d4c417a0 | [] | no_license | rizel/gyle | fa7d490fc587bddc0688bd02ff2cd25e4e4504b3 | e32ad8c7ba8b5c011f25c837f0bb378d9db110bf | refs/heads/master | 2023-06-13T08:11:22.627707 | 2021-07-11T22:56:33 | 2021-07-11T22:56:33 | 383,124,195 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | import json
from pprint import pprint
from configuration import OUTPUT_FILE, INTERESTING_JOB_TITLES
from global_variables import *
def parse_informational_data():
global skipped_employees, processed_employees
global argyle_countries, argyle_languages, argyle_interesting_facts
json_content = None
with open(OUTPUT_FILE, 'r') as file:
file_string = file.read()
# loading the stringified json
stringified_json = json.loads(file_string)
# loading as a json finally.
json_content = json.loads(stringified_json)
# pprint(json_content)
employees_data = json_content['props']['pageProps']['employees']
# print(len(employees_data))
# pprint(employees_data)
for employee in employees_data:
try:
country = employee['Country']
languages = employee['Languages']
job_title = employee['Job Title']
argyle_interesting_facts.append(employee['Interesting Fact'])
# country processing
if country in argyle_countries:
argyle_countries[country] +=1
else:
argyle_countries[country] = 1
# job title processing
if job_title in argyle_job_titles:
argyle_job_titles[job_title] +=1
else:
argyle_job_titles[job_title] = 1
# select people
if job_title in INTERESTING_JOB_TITLES:
interesting_people[job_title].append(employee)
# languages processing
for language in languages:
if language in argyle_languages:
argyle_languages[language] +=1
else:
argyle_languages[language] = 1
processed_employees+= 1
except Exception as e:
print(e) | [
"rizelita@gmail.com"
] | rizelita@gmail.com |
1ac9e3f7b26aa042692590d17c2b31da13a46806 | 3549dd941a47e359fa3ab17f2f2ab1968fb928e4 | /dataset/mini_imagenet.py | 977d67fbdfa3631520e6fc5a48353ed196f061d1 | [] | no_license | Juncheng-Dong/FSL | 74628d414a863091cfcc5627ed5dc2a54a7f2611 | 1273175c6cfc14a27dcc6d7e5b682f950b45b4ed | refs/heads/main | 2023-08-01T03:49:01.978935 | 2021-09-14T15:48:33 | 2021-09-14T15:48:33 | 406,255,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,283 | py | import os
import pickle
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class ImageNet(Dataset):
def __init__(self, args, partition='train', pretrain=True, is_sample=False, k=4096,
transform=None):
super(Dataset, self).__init__()
self.data_root = args.data_root
self.partition = partition
self.data_aug = args.data_aug
self.mean = [120.39586422 / 255.0, 115.59361427 / 255.0, 104.54012653 / 255.0]
self.std = [70.68188272 / 255.0, 68.27635443 / 255.0, 72.54505529 / 255.0]
self.normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.pretrain = pretrain
if transform is None:
if self.partition == 'train' and self.data_aug:
self.transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.array(x),
transforms.ToTensor(),
self.normalize
])
else:
self.transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.ToTensor(),
self.normalize
])
else:
self.transform = transform
if self.pretrain:
self.file_pattern = 'miniImageNet_category_split_train_phase_%s.pickle'
else:
self.file_pattern = 'miniImageNet_category_split_%s.pickle'
self.data = {}
with open(os.path.join(self.data_root, self.file_pattern % partition), 'rb') as f:
data = pickle.load(f, encoding='latin1')
self.imgs = data['data']
self.labels = data['labels']
# pre-process for contrastive sampling
self.k = k
self.is_sample = is_sample
if self.is_sample:
self.labels = np.asarray(self.labels)
self.labels = self.labels - np.min(self.labels)
num_classes = np.max(self.labels) + 1
self.cls_positive = [[] for _ in range(num_classes)]
for i in range(len(self.imgs)):
self.cls_positive[self.labels[i]].append(i)
self.cls_negative = [[] for _ in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i]) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(num_classes)]
self.cls_positive = np.asarray(self.cls_positive)
self.cls_negative = np.asarray(self.cls_negative)
def __getitem__(self, item):
img = np.asarray(self.imgs[item]).astype('uint8')
img = self.transform(img)
target = self.labels[item] - min(self.labels)
if not self.is_sample:
return img, target, item
else:
pos_idx = item
replace = True if self.k > len(self.cls_negative[target]) else False
neg_idx = np.random.choice(self.cls_negative[target], self.k, replace=replace)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return img, target, item, sample_idx
def __len__(self):
return len(self.labels)
class MetaImageNet(ImageNet):
def __init__(self, args, partition='train', train_transform=None, test_transform=None, fix_seed=True):
super(MetaImageNet, self).__init__(args, partition, False)
self.fix_seed = fix_seed
self.n_ways = args.n_ways
self.n_shots = args.n_shots
self.n_queries = args.n_queries
self.classes = list(self.data.keys())
self.n_test_runs = args.n_test_runs
self.n_aug_support_samples = args.n_aug_support_samples
if train_transform is None:
self.train_transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.array(x),
transforms.ToTensor(),
self.normalize
])
else:
self.train_transform = train_transform
if test_transform is None:
self.test_transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.ToTensor(),
lambda x: np.array(x),
self.normalize
])
else:
self.test_transform = test_transform
self.data = {}
for idx in range(self.imgs.shape[0]):
if self.labels[idx] not in self.data:
self.data[self.labels[idx]] = []
self.data[self.labels[idx]].append(self.imgs[idx])
self.classes = list(self.data.keys())
def __getitem__(self, item):
if self.fix_seed:
np.random.seed(item)
cls_sampled = np.random.choice(self.classes, self.n_ways, False)
support_xs = []
support_ys = []
query_xs = []
query_ys = []
for idx, cls in enumerate(cls_sampled):
imgs = np.asarray(self.data[cls]).astype('uint8')
support_xs_ids_sampled = np.random.choice(range(imgs.shape[0]), self.n_shots, False)
support_xs.append(imgs[support_xs_ids_sampled])
support_ys.append([idx] * self.n_shots)
query_xs_ids = np.setxor1d(np.arange(imgs.shape[0]), support_xs_ids_sampled)
query_xs_ids = np.random.choice(query_xs_ids, self.n_queries, False)
query_xs.append(imgs[query_xs_ids])
query_ys.append([idx] * query_xs_ids.shape[0])
support_xs, support_ys, query_xs, query_ys = np.array(support_xs), np.array(support_ys), np.array(
query_xs), np.array(query_ys)
num_ways, n_queries_per_way, height, width, channel = query_xs.shape
query_xs = query_xs.reshape((num_ways * n_queries_per_way, height, width, channel))
query_ys = query_ys.reshape((num_ways * n_queries_per_way, ))
support_xs = support_xs.reshape((-1, height, width, channel))
if self.n_aug_support_samples > 1:
support_xs = np.tile(support_xs, (self.n_aug_support_samples, 1, 1, 1))
support_ys = np.tile(support_ys.reshape((-1, )), (self.n_aug_support_samples))
support_xs = np.split(support_xs, support_xs.shape[0], axis=0)
query_xs = query_xs.reshape((-1, height, width, channel))
query_xs = np.split(query_xs, query_xs.shape[0], axis=0)
support_xs = torch.stack(list(map(lambda x: self.train_transform(x.squeeze()), support_xs)))
query_xs = torch.stack(list(map(lambda x: self.test_transform(x.squeeze()), query_xs)))
return support_xs, support_ys, query_xs, query_ys
def __len__(self):
return self.n_test_runs
if __name__ == '__main__':
args = lambda x: None
args.n_ways = 5
args.n_shots = 1
args.n_queries = 12
args.data_root = 'data'
args.data_aug = True
args.n_test_runs = 5
args.n_aug_support_samples = 1
imagenet = ImageNet(args, 'val')
print(len(imagenet))
print(imagenet.__getitem__(500)[0].shape)
metaimagenet = MetaImageNet(args)
print(len(metaimagenet))
print(metaimagenet.__getitem__(500)[0].size())
print(metaimagenet.__getitem__(500)[1].shape)
print(metaimagenet.__getitem__(500)[2].size())
print(metaimagenet.__getitem__(500)[3].shape)
| [
"jd420@research-tarokhlab-10.oit.duke.edu"
] | jd420@research-tarokhlab-10.oit.duke.edu |
b6797ce0808d55a048fdb5f8ef31e878e2ee688e | c637f95bb4fdd6fcf5e0ee2b8a7ea59f915ebc13 | /Red Team/cobaltsrike-notif-beacon/telegram.py | 6784631dfb91f0d412b586b7c97998db51fbdd83 | [] | no_license | Crj0b/Pentest | e9bbfcc82122176ad0ae9c27961376b33fe76e94 | a893c3cbd4d34dcb70cb12430dc33558208f3c2b | refs/heads/master | 2023-03-17T10:45:31.035452 | 2019-06-30T18:23:19 | 2019-06-30T18:23:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #! /usr/bin/env python3
import argparse
import telepot
import socket
chat_id = 'xxx' #userID
bot = telepot.Bot('xxxxxxxx') #token telegram
parser = argparse.ArgumentParser(description='Beacon Info')
parser.add_argument('--computername')
parser.add_argument('--internalip')
parser.add_argument('--username')
args = parser.parse_args()
computername = args.computername
internalip = args.internalip
username = args.username
hostname = socket.gethostname()
message = "Message from "+hostname+" Server\nBeacon succes implant Info Target\nUsername : "+username+"\nIpaddres : "+internalip+"\nComputer name : "+computername+"."
bot.sendMessage(chat_id, message)
| [
"rahmat.hadi@tiket.com"
] | rahmat.hadi@tiket.com |
624bff574da212d7d85f10e9d8fb96838f062fbc | c638ed4efd02028c97c0d6fe84200c4e4484fdba | /CCC/triangleTimes.py | 11a228073e63568a153a405bd609ebddd0c4ea4c | [] | no_license | oliver-ode/Algorithmic-Coding | 3c08f783e2f8d07040b81610a8e175383716eaf3 | dc4d4bfb538e04789ed731236e1a5a39978cad0a | refs/heads/master | 2022-12-07T02:35:56.629521 | 2020-08-31T03:18:30 | 2020-08-31T03:18:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | angles = []
for i in range(3):
angles.append(int(input()))
if angles[0] == 60 and angles[1] == 60 and angles[2] == 60:
print("Equilateral")
elif sum(angles) == 180 and angles[0] == angles[1] or angles[1] == angles[2] or angles[0] == angles[2]:
print("Isosceles")
elif sum(angles) != 180:
print("Error")
else:
print("Scalene") | [
"oliverode@gmail.com"
] | oliverode@gmail.com |
94849937d8f7341ee3a6c4ea309d665c8ed58ae7 | 8d1351a226efbe70d4bffa7f185e74b9fe49827e | /app/registerData/registerConform.py | 460e328c7178f6ce1647295140a13e2960b3f5b2 | [] | no_license | heojoon/flaskdemo | 3a0afb2594e736b4774ff4c6f6a3c4a4dad73489 | 9fc1fd317e37c35495a9fcf421cc78787ab31229 | refs/heads/main | 2023-04-05T05:46:47.244706 | 2021-04-17T13:04:03 | 2021-04-17T13:04:03 | 357,064,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # file name : registerConform.py
# pwd : /project_name/app/registerData/registerConform.py
from flask import Blueprint, request, render_template, flash, redirect, url_for
from flask import current_app as app
from app.module import forms
registerData = Blueprint('registerData', __name__, url_prefix='/register')
#@registerData.route('/', methods=['GET','POST'])
#def home():
# return render_template('/registerData/layout.html')
@registerData.route('/', methods=['GET','POST'])
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash(f'{form.username.data} 님 가입 완료!', 'success')
#return redirect(url_for('home'))
return render_template('/registerData/register.html', form=form) | [
"heojoon48@gmail.com"
] | heojoon48@gmail.com |
6310e23a458f6e5fc6663a32b235b73bc2454352 | 0da6893c3d64c5653e33b1330e7ea02975e6138b | /Flame_Sensor_Publish.py | 454917a337e2c4222609ddcdb705913bd80d80b5 | [] | no_license | AdarshKBulusu/projectWildfire_Alert-Detection | 6faf6a5b39846d1fd2cdbccf44868d2b23aba1b1 | 2a698a29899aae0953ec4feb95c9b964921038d2 | refs/heads/master | 2022-11-20T00:23:12.646675 | 2020-07-22T01:49:29 | 2020-07-22T01:49:29 | 281,527,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import socket
host = '10.0.0.251.' #127.0.0.1
port = 50008
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(1)
print("hello starting program")
conn, addr = s.accept()
print ("Connection from", addr)
while True:
data = conn.recv(1024)
if not data: break
print("Recieved: "+str(data)) #del str
conn.send("I am server")
response = input("Reply: ")
jim=response.encode()
if response == "exit":
break
conn.sendall(jim)
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
dda125c8083666e799a4bccbfac1e27a51202a18 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_13284-2532/sdB_EC_13284-2532_lc.py | dfa44dd24cbfe5cc7255aa0893f9c5a3ba440b9b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[202.803875,-25.791181], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_13284-2532 /sdB_EC_13284-2532_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
93f72136ac32eeb64dafcf570b2025f0c7a8aca7 | a51b2cfd2fcfce0e800fba591efb675d7e83da61 | /src/multi_gpu_train.py | 6bb22be2ec00116833cd6539d7ba2be1e6fb2546 | [] | no_license | yyht/tf-center-loss | a3c4164b9de151b970ec3efaf38f79609e1f8f34 | 5cdcb638f1cec355f1938bf3a646338596bc21ef | refs/heads/master | 2020-05-05T08:05:36.618949 | 2018-08-05T04:29:33 | 2018-08-05T04:29:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,137 | py | # auther : lxy
# time : 2017.12.15 /09:56
#project:
# tool: python2
#version: 0.1
#modify:
#name: center loss
#citations: https://github.com/ydwen/caffe-face
#############################
import numpy as np
import tensorflow as tf
from read_tfrecord_v2 import read_single_tfrecord
from net import *
from Center_loss_custom import *
from mnist import mnist_data
import argparse
CENTER_LOSS_ALPHA = 0.9
def argument():
parser = argparse.ArgumentParser(description="face resnet center loss")
parser.add_argument('--batch_size',type=int,default=16,help='the batch_size num')
parser.add_argument('--epoch_num',type=int,default=10,\
help='the epoch num should bigger than 10000')
parser.add_argument('--save_model_name',type=str,default='./face_model/model.ckpt',\
help='model Parameters saved name and directory')
parser.add_argument('--lr',type=float,default=0.001,help='the Learning rate begin')
parser.add_argument('--sta',type=str,default='train',help="input should 'train' or 'test' ")
parser.add_argument('--img_shape',type=int,default='300',help="the input image reshape size")
args = parser.parse_args()
return args
def build_network(input_images, labels):
num_class = 526
sta = 'train'
ratio = 0.003
net = face_net(input_images,num_class,sta)
#logits, features = net.inference()
logits, features = net.get_resnet18()
#res1 = net.res1
assert num_class== net.num_classes,"net class should be equal to loss"
with tf.name_scope('loss'):
with tf.name_scope('center_loss'):
center_loss, centers, centers_update_op = get_center_loss(features,logits, labels, CENTER_LOSS_ALPHA, num_class)
with tf.name_scope('softmax_loss'):
#softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
labels_onehot = tf.one_hot(labels,on_value=1,off_value=0,depth=num_class)
entropy_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_onehot, logits=logits)
print("entropy_loss ",entropy_loss.shape)
softmax_loss = tf.reduce_mean(entropy_loss)
with tf.name_scope('total_loss'):
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = softmax_loss + ratio * center_loss+0.01 * sum(regularization_losses)
#total_loss = softmax_loss
with tf.name_scope('acc'):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(tf.arg_max(logits, 1),tf.int32), labels), tf.float32))
with tf.name_scope('pred_class'):
pred_class = tf.arg_max(logits, 1)
with tf.name_scope('loss/'):
tf.summary.scalar('CenterLoss', center_loss)
tf.summary.scalar('SoftmaxLoss', softmax_loss)
tf.summary.scalar('TotalLoss', total_loss)
#return total_loss, accuracy, centers_update_op, center_loss, softmax_loss,pred_class
return total_loss
def make_parallel(model,num_gpus,**kwargs):
in_splits = {}
for k,v in kwargs.items():
in_splits[k] = tf.split(v,num_gpus)
out_splits = []
for i in range(num_gpus):
with tf.device(tf.DeviceSpec(device_type="GPU",device_index=i)):
with tf.variable_scope(tf.get_variable_scope(),reuse=i>0):
out_splits.append(model(**{k:v[i] for k,v in in_splits.items()}))
return tf.stack(out_splits,axis=0)
def main():
LAMBDA = 0.001
num_class = 526
args = argument()
checkpoint_dir = args.save_model_name
lr = args.lr
batch_size = args.batch_size
epoch_num = args.epoch_num
sta = args.sta
img_shape = args.img_shape
num_gpus = 4
#train_batch_loader = BatchLoader("./data/facescrub_train.list", batch_size,img_shape)
#test_batch_loader = BatchLoader("./data/facescrub_val.list", batch_size,img_shape)
#(Height,Width) = (train_batch_loader.height,train_batch_loader.width)
#train_batch_loader = mnist_data(batch_size)
tfrecord_file = './data/MegaFace_train.tfrecord_shuffle'
val_file = './data/MegaFace_val.tfrecord_shuffle'
image_batch, label_batch = read_single_tfrecord(tfrecord_file, batch_size, img_shape)
val_image_batch, val_label_batch = read_single_tfrecord(val_file, batch_size, img_shape)
print("img shape",img_shape)
with tf.name_scope('input'):
input_images = tf.placeholder(tf.float32, shape=(batch_size,img_shape,img_shape,3), name='input_images')
labels = tf.placeholder(tf.int32, shape=(batch_size), name='labels')
learn_rate = tf.placeholder(tf.float32,shape=(None),name='learn_rate')
with tf.name_scope('var'):
global_step = tf.Variable(0, trainable=False, name='global_step')
#total_loss, accuracy, centers_update_op, center_loss, softmax_loss,pred_class = build_network(input_images,labels)
#total_loss, accuracy, centers_update_op, center_loss, softmax_loss,pred_class = make_parallel(build_network,num_gpus,input_images=input_images,labels=labels)
total_loss = make_parallel(build_network,num_gpus,input_images=input_images,labels=labels)
#optimizer = tf.train.AdamOptimizer(learn_rate)
optimizer = tf.train.GradientDescentOptimizer(learn_rate)
#with tf.control_dependencies([centers_update_op]):
train_op = optimizer.minimize(tf.reduce_mean(total_loss), colocate_gradients_with_ops=True)
#train_op = optimizer.minimize(total_loss, global_step=global_step)
summary_op = tf.summary.merge_all()
with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('./tmp/face_log', sess.graph)
saver = tf.train.Saver()
#begin
coord = tf.train.Coordinator()
#begin enqueue thread
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = sess.run(global_step)
epoch_idx =0
graph_step=0
item = './data/facescrub_train.list'
imagelist = open(item, 'r')
files_item = imagelist.readlines()
file_len = len(files_item)
batch_num = np.ceil(file_len / batch_size)
while epoch_idx <= epoch_num:
step = 0
ckpt_fg = 'True'
ps_loss=0.0
pc_loss=0.0
acc_sum = 0.0
while step < batch_num:
train_img_batch, train_label_batch = sess.run([image_batch,label_batch])
#print("data in ",in_img[0,:2,:2,0])
_, summary_str,Center_loss = sess.run(
[train_op, summary_op,total_loss],
feed_dict={
input_images: train_img_batch,
labels: train_label_batch,
learn_rate: lr
})
step += 1
#print("step",step, str(Softmax_loss),str(Center_loss))
#print("res1",res1_o[0,:20])
#print("step label",step, str(batch_labels))
graph_step+=1
if step %10 ==0 :
writer.add_summary(summary_str, global_step=graph_step)
pc_loss+=Center_loss
#ps_loss+=Softmax_loss
#acc_sum+=train_acc
if step % 100 == 0:
#lr = lr*0.1
#c_loss+=c_loss
#s_loss+=s_loss
print ("****** Epoch {} Step {}: ***********".format(str(epoch_idx),str(step)) )
print ("center loss: {}".format(pc_loss/100.0))
print ("softmax_loss: {}".format(ps_loss/100.0))
print ("train_acc: {}".format(acc_sum/100.0))
print ("*******************************")
if (Center_loss<0.1 and ckpt_fg=='True'):
print("******************************************************************************")
saver.save(sess, checkpoint_dir, global_step=epoch_idx)
ckpt_fg = 'False'
ps_loss=0.0
pc_loss=0.0
acc_sum=0.0
epoch_idx +=1
if epoch_idx % 5 ==0:
print("******************************************************************************")
saver.save(sess, checkpoint_dir, global_step=epoch_idx)
#writer.add_summary(summary_str, global_step=step)
if epoch_idx % 5 == 0:
lr = lr*0.5
if epoch_idx:
val_img_batch,val_label_batch = sess.run([val_image_batch,val_label_batch])
vali_acc = sess.run(
total_loss,
feed_dict={
input_images: val_img_batch,
labels: val_label_batch
})
print(("epoch: {}, train_acc:{:.4f}, vali_acc:{:.4f}".
format(epoch_idx, Center_loss, vali_acc)))
coord.join(threads)
sess.close()
if __name__ == '__main__':
main()
| [
"lixiaoyu283284@163.com"
] | lixiaoyu283284@163.com |
f4ece548db005dd63655e9189b41b5c461dedea0 | 2ad4b5b73b050f01f4952bd95806f3ff316fbfa4 | /Leetcode/python/sol_25(2).py | e13645131ecae05cbe6f2bafd5435c575cb5a201 | [] | no_license | Clement25/Leetcode-Solution-and-Algorithm | 5d44f4099b8fb7b81fa497cc08161e16e70285b0 | c80fd1dee21209abcbaa1fb09412cd7f2de7b586 | refs/heads/master | 2021-09-08T15:01:44.661219 | 2020-11-06T07:05:16 | 2020-11-06T07:05:16 | 178,236,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | def threesumclosest(nums, target):
'''
type nums:List[int]
type target:int
rtype:int
'''
mind = 100000
nums.sort()
for i, v in enumerate(nums[:-2]):
ntgt1 = target - v
for j, u in enumerate(nums[i + 1:-1]):
ntgt2 = ntgt1 - u
if ntgt2 in nums[i + j + 2:]:
return target
for k, w in enumerate(nums[i + j + 2:]):
if w > ntgt2:
break
l = i + j + k + 1
dis1 = abs(w - ntgt2)
dis2 = abs(ntgt2 - nums[l]) if k != 0 else dis1
dis = min(dis1, dis2)
if dis < mind:
mind = dis
res = u + v + w if dis1 <= dis2 else u + v + nums[l]
return res
#print(threesumclosest([-55, -24, -18, -11, -7, -3, 4, 5, 6, 9, 11, 23, 33], 0))
#print(threesumclosest([-1,2,1,-4], 1))
#print(threesumclosest([-1,0,1,1,55], 3))
print(
threesumclosest([
13, 2, 0, -14, -20, 19, 8, -5, -13, -3, 20, 15, 20, 5, 13, 14, -17, -7,
12, -6, 0, 20, -19, -1, -15, -2, 8, -2, -9, 13, 0, -3, -18, -9, -9,
-19, 17, -14, -19, -4, -16, 2, 0, 9, 5, -7, -4, 20, 18, 9, 0, 12, -1,
10, -17, -11, 16, -13, -14, -3, 0, 2, -18, 2, 8, 20, -15, 3, -13, -12,
-2, -19, 11, 11, -10, 1, 1, -10, -2, 12, 0, 17, -19, -7, 8, -19, -17,
5, -5, -10, 8, 0, -12, 4, 19, 2, 0, 12, 14, -9, 15, 7, 0, -16, -5, 16,
-12, 0, 2, -16, 14, 18, 12, 13, 5, 0, 5, 6
], -59))
| [
"35480362+Clement25@users.noreply.github.com"
] | 35480362+Clement25@users.noreply.github.com |
265c1a982e59086f60095200d42064955cf9ed66 | f7d22242393632528f866e4cb4d08ba83550f865 | /Part 11 - XGBoost/xgboost.py | fda346b0b846cb6f0b57ae30026753a18634b205 | [] | no_license | someshjaishwal/Machine-Learning | 5ecc71685e4230f5a031b9368a89fcd34bf94568 | 3f66e282501a87131a364267486f4427bf3fab0b | refs/heads/master | 2020-03-22T05:42:46.492676 | 2018-07-03T13:12:50 | 2018-07-03T13:12:50 | 139,585,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | # -*- coding: utf-8 -*-
# extreme gradient boosting
# basic libararies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:,3:13].values
y = dataset.iloc[:,13].values
### PART 1 - Preprocessing Dataset
# encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
label_x_1 = LabelEncoder()
X[:,1]= label_x_1.fit_transform(X[:,1])
label_x_2 = LabelEncoder()
X[:,2] = label_x_2.fit_transform(X[:,2])
ohen = OneHotEncoder(categorical_features = [1])
X = ohen.fit_transform(X).toarray()
X = X[:,1:]
# splitting training and test set
from sklearn.cross_validation import train_test_split
X_train, X_text, y_train, y_test = train_test_split(X, y, test_size = 0.2,
random_state = 42)
"""
# no need of feature scaling in xtreme gradient boosting
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_text = sc.transform(X_text)
"""
### PART 2 - fitting xgboost on training set
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X_train,y_train)
### PART 3 - Making predictions and Evaluating the model
# predicting testset results
y_pred = classifier.predict(X_text)
# making confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# evaluating model
accuracy = (cm[0,0]+cm[1,1])*100/(cm[0,0]+cm[0,1]+cm[1,0]+cm[1,1])
print ("accuracy :",accuracy,"%")
# evaluation using k-fold cross validation
from sklearn.cross_validation import cross_val_score
accuracy_vec = cross_val_score(estimator = classifier,
X = X_train, y = y_train, cv = 10)
final_accurcay = accuracy_vec.mean()
std_deviation = accuracy_vec.std()
| [
"noreply@github.com"
] | noreply@github.com |
47b9a6bd6f7a0eeeea5383a97b107d05ed17e022 | 505506f12bf43f8693b95d4b19bc4e0aded8cab0 | /agents/runner.py | b42ea639c0ed776070021f875e865efeb36b0840 | [
"BSD-3-Clause"
] | permissive | Aubret/gym-minigrid | 8be3fe596a0a071af4c504d215655114d4c7bc76 | fc622913333da4564a7e3343920ce4415e47c5ab | refs/heads/master | 2021-06-06T20:23:48.382545 | 2021-06-04T12:31:37 | 2021-06-04T12:31:37 | 169,751,153 | 0 | 0 | null | 2019-02-08T14:58:54 | 2019-02-08T14:58:53 | null | UTF-8 | Python | false | false | 5,000 | py | import numpy as np
from baselines.a2c.utils import discount_with_dones
from baselines.common.runners import AbstractEnvRunner
#not use this
class Runner(AbstractEnvRunner):
"""
We use this class to generate batches of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch of experiences
"""
def __init__(self, env, model, nsteps=5, gamma=0.99):
super().__init__(env=env, model=model, nsteps=nsteps)
self.gamma = gamma
#self.batch_action_shape = [x if x is not None else -1 for x in model.train_model.action.shape.as_list()]
self.ob_dtype = model.train_model.X.dtype.as_numpy_dtype
def compute_rewards(self,epi_rewards,epi_dones,last_obs):
if self.gamma > 0.0:
# Discount/bootstrap off value fn
last_values = self.model.value(last_obs, S=None, M=epi_dones).tolist()
for n, (rewards, dones, value) in enumerate(zip(epi_rewards, epi_dones, last_values)):
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
epi_rewards[n] = rewards
return epi_rewards
def run(self):
# We initialize the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
epi_obs, epi_rewards, epi_actions, epi_values, epi_dones = [],[],[],[],[]
mb_states = self.states
for numsteps in range(self.nsteps):
# Given observations, take action and value (V(s))
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones)
# Append the experiences
epi_obs.append(np.copy(self.obs))
epi_actions.append(actions)
epi_values.append(values)
#epi_dones.append(self.dones)
# Take actions in env and look the results
obs, rewards, dones, _ = self.env.step(actions)
self.env.render()
epi_rewards.append(rewards)
epi_dones.append(dones)
if dones: #compute the reward before switching episode
self.obs = self.env.reset()
self.dones = False
self.states=None
epi_rewards = np.asarray(epi_rewards, dtype=np.float32).swapaxes(1, 0)
epi_obs = np.asarray(epi_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape((numsteps + 1,) + self.env.observation_space.shape) # .reshape(self.batch_ob_shape)
epi_actions = np.asarray(epi_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
epi_values = np.asarray(epi_values, dtype=np.float32).swapaxes(1, 0)
epi_dones = np.asarray(epi_dones, dtype=np.bool).swapaxes(1, 0)
epi_masks = epi_dones[:, :-1]
epi_dones = epi_dones[:, 1:]
mb_rewards.extend(self.compute_rewards(epi_rewards,epi_dones,obs))
mb_obs.extend(epi_obs)
mb_actions.extend(epi_actions)
mb_values.extend(epi_values)
mb_dones.extend(epi_dones)
epi_obs, epi_rewards, epi_actions, epi_values, epi_dones = [], [], [], [], []
continue
self.states = states
self.dones = dones
self.obs = obs
#epi_dones.append(self.dones)
print(epi_dones)
if not dones:
epi_rewards = np.asarray(epi_rewards, dtype=np.float32).swapaxes(1, 0)
epi_obs = np.asarray(epi_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape((numsteps + 1,) + self.env.observation_space.shape) # .reshape(self.batch_ob_shape)
epi_actions = np.asarray(epi_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
epi_values = np.asarray(epi_values, dtype=np.float32).swapaxes(1, 0)
epi_dones = np.asarray(epi_dones, dtype=np.bool).swapaxes(1, 0)
epi_masks = epi_dones[:, :-1]
#epi_dones = epi_dones[:, 1:]
#Concat last iteartions
mb_rewards.extend(self.compute_rewards(epi_rewards, epi_dones,obs))
mb_obs.extend(epi_obs)
mb_actions.extend(epi_actions)
mb_values.extend(epi_values)
mb_dones.extend(epi_dones)
# Batch of steps to batch of rollouts
#print(self.batch_action_shape)
#print(mb_actions.shape)
#mb_actions = mb_actions.reshape(self.batch_action_shape)
#mb_actions = mb_actions.reshape([numsteps+1])
mb_rewards = mb_rewards.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values
| [
"lalumiere3@hotmail.fr"
] | lalumiere3@hotmail.fr |
73583adcaea5d524cdb542a439f178769047ef38 | 480cbc014abecd36899915e814157fe7e0d4072f | /tools/tasks.py | 3dc7ec6cf9c2c1af355e90ce242754ed7b321601 | [
"Apache-2.0"
] | permissive | mattcarabine/sync_gateway | 090c48f6a9646f40feda1a36ed10b8a33f7fc9f3 | a8bc5099d3c7185de72829a10311c96c800c01c7 | refs/heads/master | 2021-01-18T20:07:25.023611 | 2016-05-20T17:55:36 | 2016-05-20T17:55:36 | 59,411,033 | 0 | 0 | null | 2016-05-22T12:26:34 | 2016-05-22T12:26:33 | null | UTF-8 | Python | false | false | 34,404 | py | #!/usr/bin/env python
# -*- python -*-
import os
import sys
import tempfile
import time
import subprocess
import string
import re
import platform
import glob
import socket
import threading
import optparse
import atexit
import signal
import urllib
import shutil
import urlparse
class AltExitC(object):
def __init__(self):
self.list = []
self.lock = threading.Lock()
atexit.register(self.at_exit_handler)
def register(self, f):
self.lock.acquire()
self.register_and_unlock(f)
def register_and_unlock(self, f):
try:
self.list.append(f)
finally:
self.lock.release()
def at_exit_handler(self):
self.lock.acquire()
self.list.reverse()
for f in self.list:
try:
f()
except:
pass
def exit(self, status):
self.at_exit_handler()
os._exit(status)
AltExit = AltExitC()
def log(message, end = '\n'):
sys.stderr.write(message + end)
sys.stderr.flush()
class Task(object):
privileged = False
no_header = False
num_samples = 1
interval = 0
def __init__(self, description, command, timeout=None, **kwargs):
self.description = description
self.command = command
self.timeout = timeout
self.__dict__.update(kwargs)
def execute(self, fp):
"""Run the task"""
import subprocess
use_shell = not isinstance(self.command, list)
if "literal" in self.__dict__:
print >> fp, self.literal
return 0
env = None
if "addenv" in self.__dict__:
env = os.environ.copy()
env.update(self.addenv)
try:
p = subprocess.Popen(self.command, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=use_shell, env=env)
except OSError, e:
# if use_shell is False then Popen may raise exception
# if binary is missing. In this case we mimic what
# shell does. Namely, complaining to stderr and
# setting non-zero status code. It's might also
# automatically handle things like "failed to fork due
# to some system limit".
print >> fp, "Failed to execute %s: %s" % (self.command, e)
return 127
p.stdin.close()
from threading import Timer, Event
timer = None
timer_fired = Event()
if self.timeout is not None and hasattr(p, 'kill'):
def on_timeout():
p.kill()
timer_fired.set()
timer = Timer(self.timeout, on_timeout)
timer.start()
try:
while True:
data = p.stdout.read(64 * 1024)
if not data:
break
fp.write(data)
finally:
if timer is not None:
timer.cancel()
timer.join()
# there's a tiny chance that command succeeds just before
# timer is fired; that would result in a spurious timeout
# message
if timer_fired.isSet():
print >> fp, "`%s` timed out after %s seconds" % (self.command, self.timeout)
return p.wait()
def will_run(self):
"""Determine if this task will run on this platform."""
return sys.platform in self.platforms
class TaskRunner(object):
default_name = "couchbase.log"
def __init__(self, verbosity=0):
self.files = {}
self.tasks = {}
self.verbosity = verbosity
self.start_time = time.strftime("%Y%m%d-%H%M%S", time.gmtime())
self.tmpdir = tempfile.mkdtemp()
AltExit.register(self.finalize)
def finalize(self):
try:
for fp in self.files.iteritems():
fp.close()
except:
pass
shutil.rmtree(self.tmpdir, ignore_errors=True)
def collect_file(self, filename):
"""Add a file to the list of files collected. Used to capture the exact
file (including timestamps) from the Couchbase instance.
filename - Absolute path to file to collect.
"""
if not filename in self.files:
self.files[filename] = open(filename, 'r')
else:
log("Unable to collect file '{0}' - already collected.".format(
filename))
def get_file(self, filename):
if filename in self.files:
fp = self.files[filename]
else:
fp = open(os.path.join(self.tmpdir, filename), 'w+')
self.files[filename] = fp
return fp
def header(self, fp, title, subtitle):
separator = '=' * 78
print >> fp, separator
print >> fp, title
print >> fp, subtitle
print >> fp, separator
fp.flush()
def log_result(self, result):
if result == 0:
log("OK")
else:
log("Exit code %d" % result)
def run(self, task):
"""Run a task with a file descriptor corresponding to its log file"""
if task.will_run():
if hasattr(task, 'command_to_print'):
command_to_print = task.command_to_print
else:
command_to_print = task.command
log("%s (%s) - " % (task.description, command_to_print), end='')
if task.privileged and os.getuid() != 0:
log("skipped (needs root privs)")
return
if hasattr(task, 'log_file'):
filename = task.log_file
else:
filename = self.default_name
fp = self.get_file(filename)
if not task.no_header:
self.header(fp, task.description, command_to_print)
for i in xrange(task.num_samples):
if i > 0:
log("Taking sample %d after %f seconds - " % (i+1, task.interval), end='')
time.sleep(task.interval)
result = task.execute(fp)
self.log_result(result)
fp.flush()
elif self.verbosity >= 2:
log('Skipping "%s" (%s): not for platform %s' % (task.description, command_to_print, sys.platform))
def zip(self, filename, node):
"""Write all our logs to a zipfile"""
exe = exec_name("gozip")
prefix = "cbcollect_info_%s_%s" % (node, self.start_time)
files = []
for name, fp in self.files.iteritems():
fp.close()
files.append(fp.name)
fallback = False
try:
p = subprocess.Popen([exe, "-strip-path", "-prefix", prefix, filename] + files,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE)
p.stdin.close()
status = p.wait()
if status != 0:
log("gozip terminated with non-zero exit code (%d)" % status)
except OSError, e:
log("Exception during compression: %s" % e)
fallback = True
if fallback:
log("IMPORTANT:")
log(" Compression using gozip failed.")
log(" Falling back to python implementation.")
log(" Please let us know about this and provide console output.")
self._zip_fallback(filename, prefix, files)
def _zip_fallback(self, filename, prefix, files):
from zipfile import ZipFile, ZIP_DEFLATED
zf = ZipFile(filename, mode='w', compression=ZIP_DEFLATED)
try:
for name in files:
zf.write(name,
"%s/%s" % (prefix, os.path.basename(name)))
finally:
zf.close()
class SolarisTask(Task):
platforms = ['sunos5', 'solaris']
class LinuxTask(Task):
platforms = ['linux2']
class WindowsTask(Task):
platforms = ['win32', 'cygwin']
class MacOSXTask(Task):
platforms = ['darwin']
class UnixTask(SolarisTask, LinuxTask, MacOSXTask):
platforms = SolarisTask.platforms + LinuxTask.platforms + MacOSXTask.platforms
class AllOsTask(UnixTask, WindowsTask):
platforms = UnixTask.platforms + WindowsTask.platforms
def make_curl_task(name, user, password, url,
timeout=60, log_file="couchbase.log", base_task=AllOsTask,
**kwargs):
def make_cmd(pwd):
return ["curl", "-sS", "--proxy", "",
"-u", "%s:%s" % (user, pwd), url]
return base_task(name, make_cmd(password),
timeout=timeout,
log_file=log_file,
command_to_print=make_cmd("*****"), **kwargs)
def make_query_task(statement, user, password, port):
url = "http://127.0.0.1:%s/query/service?statement=%s" % (port, urllib.quote(statement))
return make_curl_task(name="Result of query statement \'%s\'" % statement,
user=user, password=password, url=url)
def basedir():
mydir = os.path.dirname(sys.argv[0])
if mydir == "":
mydir = "."
return mydir
def make_event_log_task():
from datetime import datetime, timedelta
# I found that wmic ntevent can be extremely slow; so limiting the output
# to approximately last month
limit = datetime.today() - timedelta(days=31)
limit = limit.strftime('%Y%m%d000000.000000-000')
return WindowsTask("Event log",
"wmic ntevent where "
"\""
"(LogFile='application' or LogFile='system') and "
"EventType<3 and TimeGenerated>'%(limit)s'"
"\" "
"get TimeGenerated,LogFile,SourceName,EventType,Message "
"/FORMAT:list" % locals())
def make_os_tasks():
programs = " ".join(["moxi", "memcached", "beam.smp",
"couch_compact", "godu", "sigar_port",
"cbq-engine", "indexer", "projector", "goxdcr",
"cbft"])
_tasks = [
UnixTask("uname", "uname -a"),
UnixTask("time and TZ", "date; date -u"),
UnixTask("ntp time",
"ntpdate -q pool.ntp.org || "
"nc time.nist.gov 13 || "
"netcat time.nist.gov 13"),
UnixTask("ntp peers", "ntpq -p"),
UnixTask("raw /etc/sysconfig/clock", "cat /etc/sysconfig/clock"),
UnixTask("raw /etc/timezone", "cat /etc/timezone"),
WindowsTask("System information", "systeminfo"),
WindowsTask("Computer system", "wmic computersystem"),
WindowsTask("Computer OS", "wmic os"),
LinuxTask("System Hardware", "lshw -json || lshw"),
SolarisTask("Process list snapshot", "prstat -a -c -n 100 -t -v -L 1 10"),
SolarisTask("Process list", "ps -ef"),
SolarisTask("Service configuration", "svcs -a"),
SolarisTask("Swap configuration", "swap -l"),
SolarisTask("Disk activity", "zpool iostat 1 10"),
SolarisTask("Disk activity", "iostat -E 1 10"),
LinuxTask("Process list snapshot", "export TERM=''; top -Hb -n1 || top -H n1"),
LinuxTask("Process list", "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,maj_flt,min_flt,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command"),
LinuxTask("Raw /proc/vmstat", "cat /proc/vmstat"),
LinuxTask("Raw /proc/mounts", "cat /proc/mounts"),
LinuxTask("Raw /proc/partitions", "cat /proc/partitions"),
LinuxTask("Raw /proc/diskstats", "cat /proc/diskstats; echo ''", num_samples=10, interval=1),
LinuxTask("Raw /proc/interrupts", "cat /proc/interrupts"),
LinuxTask("Swap configuration", "free -t"),
LinuxTask("Swap configuration", "swapon -s"),
LinuxTask("Kernel modules", "lsmod"),
LinuxTask("Distro version", "cat /etc/redhat-release"),
LinuxTask("Distro version", "lsb_release -a"),
LinuxTask("Distro version", "cat /etc/SuSE-release"),
LinuxTask("Distro version", "cat /etc/issue"),
LinuxTask("Installed software", "rpm -qa"),
# NOTE: AFAIK columns _was_ necessary, but it doesn't appear to be
# required anymore. I.e. dpkg -l correctly detects stdout as not a
# tty and stops playing smart on formatting. Lets keep it for few
# years and then drop, however.
LinuxTask("Installed software", "COLUMNS=300 dpkg -l"),
LinuxTask("Extended iostat", "iostat -x -p ALL 1 10 || iostat -x 1 10"),
LinuxTask("Core dump settings", "find /proc/sys/kernel -type f -name '*core*' -print -exec cat '{}' ';'"),
UnixTask("sysctl settings", "sysctl -a"),
LinuxTask("Relevant lsof output",
"echo %(programs)s | xargs -n1 pgrep | xargs -n1 -r -- lsof -n -p" % locals()),
LinuxTask("LVM info", "lvdisplay"),
LinuxTask("LVM info", "vgdisplay"),
LinuxTask("LVM info", "pvdisplay"),
MacOSXTask("Process list snapshot", "top -l 1"),
MacOSXTask("Disk activity", "iostat 1 10"),
MacOSXTask("Process list",
"ps -Aww -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,"
"stat,wchan:12,start,bsdtime,command"),
WindowsTask("Installed software", "wmic product get name, version"),
WindowsTask("Service list", "wmic service where state=\"running\" GET caption, name, state"),
WindowsTask("Process list", "wmic process"),
WindowsTask("Process usage", "tasklist /V /fo list"),
WindowsTask("Swap settings", "wmic pagefile"),
WindowsTask("Disk partition", "wmic partition"),
WindowsTask("Disk volumes", "wmic volume"),
UnixTask("Network configuration", "ifconfig -a", interval=10,
num_samples=2),
LinuxTask("Network configuration", "echo link addr neigh rule route netns | xargs -n1 -- sh -x -c 'ip $1 list' --"),
WindowsTask("Network configuration", "ipconfig /all", interval=10,
num_samples=2),
LinuxTask("Raw /proc/net/dev", "cat /proc/net/dev"),
LinuxTask("Network link statistics", "ip -s link"),
UnixTask("Network status", "netstat -anp || netstat -an"),
WindowsTask("Network status", "netstat -ano"),
AllOsTask("Network routing table", "netstat -rn"),
LinuxTask("Network socket statistics", "ss -an"),
LinuxTask("Extended socket statistics", "ss -an --info --processes"),
UnixTask("Arp cache", "arp -na"),
LinuxTask("Iptables dump", "iptables-save"),
UnixTask("Raw /etc/hosts", "cat /etc/hosts"),
UnixTask("Raw /etc/resolv.conf", "cat /etc/resolv.conf"),
UnixTask("Raw /etc/nsswitch.conf", "cat /etc/nsswitch.conf"),
WindowsTask("Arp cache", "arp -a"),
WindowsTask("Network Interface Controller", "wmic nic"),
WindowsTask("Network Adapter", "wmic nicconfig"),
WindowsTask("Active network connection", "wmic netuse"),
WindowsTask("Protocols", "wmic netprotocol"),
WindowsTask("Hosts file", "type %SystemRoot%\system32\drivers\etc\hosts"),
WindowsTask("Cache memory", "wmic memcache"),
WindowsTask("Physical memory", "wmic memphysical"),
WindowsTask("Physical memory chip info", "wmic memorychip"),
WindowsTask("Local storage devices", "wmic logicaldisk"),
UnixTask("Filesystem", "df -ha"),
UnixTask("System activity reporter", "sar 1 10"),
UnixTask("System paging activity", "vmstat 1 10"),
UnixTask("System uptime", "uptime"),
UnixTask("couchbase user definition", "getent passwd couchbase"),
UnixTask("couchbase user limits", "su couchbase -c \"ulimit -a\"",
privileged=True),
UnixTask("couchbase user limits", "su couchbase -c \"ulimit -a\"",
privileged=True),
UnixTask("Interrupt status", "intrstat 1 10"),
UnixTask("Processor status", "mpstat 1 10"),
UnixTask("System log", "cat /var/adm/messages"),
LinuxTask("Raw /proc/uptime", "cat /proc/uptime"),
LinuxTask("All logs", "tar cz /var/log/syslog* /var/log/dmesg /var/log/messages* /var/log/daemon* /var/log/debug* /var/log/kern.log* 2>/dev/null",
log_file="syslog.tar.gz", no_header=True),
LinuxTask("Relevant proc data", "echo %(programs)s | "
"xargs -n1 pgrep | xargs -n1 -- sh -c 'echo $1; cat /proc/$1/status; cat /proc/$1/limits; cat /proc/$1/smaps; cat /proc/$1/numa_maps; cat /proc/$1/task/*/sched; echo' --" % locals()),
LinuxTask("Processes' environment", "echo %(programs)s | "
r"xargs -n1 pgrep | xargs -n1 -- sh -c 'echo $1; ( cat /proc/$1/environ | tr \\0 \\n ); echo' --" % locals()),
LinuxTask("NUMA data", "numactl --hardware"),
LinuxTask("NUMA data", "numactl --show"),
LinuxTask("NUMA data", "cat /sys/devices/system/node/node*/numastat"),
UnixTask("Kernel log buffer", "dmesg -H || dmesg"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/transparent_hugepage/enabled"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/transparent_hugepage/defrag"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/redhat_transparent_hugepage/enabled"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/redhat_transparent_hugepage/defrag"),
LinuxTask("Network statistics", "netstat -s"),
LinuxTask("Full raw netstat", "cat /proc/net/netstat"),
LinuxTask("CPU throttling info", "echo /sys/devices/system/cpu/cpu*/thermal_throttle/* | xargs -n1 -- sh -c 'echo $1; cat $1' --"),
make_event_log_task(),
]
return _tasks
# stolen from http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
def iter_flatten(iterable):
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
for f in iter_flatten(e):
yield f
else:
yield e
def flatten(iterable):
return [e for e in iter_flatten(iterable)]
def read_guts(guts, key):
return guts.get(key, "")
def winquote_path(s):
return '"'+s.replace("\\\\", "\\").replace('/', "\\")+'"'
# python's split splits empty string to [''] which doesn't make any
# sense. So this function works around that.
def correct_split(string, splitchar):
rv = string.split(splitchar)
if rv == ['']:
rv = []
return rv
def make_stats_archives_task(guts, initargs_path):
escript = exec_name("escript")
escript_wrapper = find_script("escript-wrapper")
dump_stats = find_script("dump-stats")
stats_dir = read_guts(guts, "stats_dir")
if dump_stats is None or escript_wrapper is None or not stats_dir:
return []
return AllOsTask("stats archives",
[escript,
escript_wrapper,
"--initargs-path", initargs_path, "--",
dump_stats, stats_dir],
no_header=True,
log_file="stats_archives.json")
def make_product_task(guts, initargs_path, options):
root = os.path.abspath(os.path.join(initargs_path, "..", "..", "..", ".."))
dbdir = read_guts(guts, "db_dir")
viewdir = read_guts(guts, "idx_dir")
diag_url = "http://127.0.0.1:%s/diag?noLogs=1" % read_guts(guts, "rest_port")
if options.single_node_diag:
diag_url += "&oneNode=1"
from distutils.spawn import find_executable
lookup_cmd = None
for cmd in ["dig", "nslookup", "host"]:
if find_executable(cmd) is not None:
lookup_cmd = cmd
break
lookup_tasks = []
if lookup_cmd is not None:
lookup_tasks = [UnixTask("DNS lookup information for %s" % node,
"%(lookup_cmd)s '%(node)s'" % locals())
for node in correct_split(read_guts(guts, "nodes"), ",")]
query_tasks = []
query_port = read_guts(guts, "query_port")
if query_port:
def make(statement):
return make_query_task(statement, user="@",
password=read_guts(guts, "memcached_pass"),
port=query_port)
query_tasks = [make("SELECT * FROM system:datastores"),
make("SELECT * FROM system:namespaces"),
make("SELECT * FROM system:keyspaces"),
make("SELECT * FROM system:indexes")]
index_tasks = []
index_port = read_guts(guts, "indexer_http_port")
if index_port:
url='http://127.0.0.1:%s/getIndexStatus' % index_port
index_tasks = [make_curl_task(name="Index definitions are: ",
user="@", password=read_guts(guts, "memcached_pass"), url=url)]
fts_tasks = []
fts_port = read_guts(guts, "fts_http_port")
if fts_port:
url='http://127.0.0.1:%s/api/diag' % fts_port
fts_tasks = [make_curl_task(name="FTS /api/diag: ",
user="@", password=read_guts(guts, "memcached_pass"), url=url)]
_tasks = [
UnixTask("Directory structure",
["ls", "-lRai", root]),
UnixTask("Database directory structure",
["ls", "-lRai", dbdir]),
UnixTask("Index directory structure",
["ls", "-lRai", viewdir]),
UnixTask("couch_dbinfo",
["find", dbdir, "-type", "f",
"-name", "*.couch.*",
"-exec", "couch_dbinfo", "{}", "+"]),
LinuxTask("Database directory filefrag info",
["find", dbdir, "-type", "f", "-exec", "filefrag", "-v", "{}", "+"]),
LinuxTask("Index directory filefrag info",
["find", viewdir, "-type", "f", "-exec", "filefrag", "-v", "{}", "+"]),
WindowsTask("Database directory structure",
"dir /s " + winquote_path(dbdir)),
WindowsTask("Index directory structure",
"dir /s " + winquote_path(viewdir)),
WindowsTask("Version file",
"type " + winquote_path(basedir()) + "\\..\\VERSION.txt"),
WindowsTask("Manifest file",
"type " + winquote_path(basedir()) + "\\..\\manifest.txt"),
WindowsTask("Manifest file",
"type " + winquote_path(basedir()) + "\\..\\manifest.xml"),
LinuxTask("Version file", "cat '%s/VERSION.txt'" % root),
LinuxTask("Manifest file", "cat '%s/manifest.txt'" % root),
LinuxTask("Manifest file", "cat '%s/manifest.xml'" % root),
AllOsTask("Couchbase config", "", literal = read_guts(guts, "ns_config")),
AllOsTask("Couchbase static config", "", literal = read_guts(guts, "static_config")),
AllOsTask("Raw ns_log", "", literal = read_guts(guts, "ns_log")),
# TODO: just gather those in python
WindowsTask("Memcached logs",
"cd " + winquote_path(read_guts(guts, "memcached_logs_path")) + " && " +
"for /f %a IN ('dir /od /b memcached.log.*') do type %a",
log_file="memcached.log"),
UnixTask("Memcached logs",
["sh", "-c", 'cd "$1"; for file in $(ls -tr memcached.log.*); do cat \"$file\"; done', "--", read_guts(guts, "memcached_logs_path")],
log_file="memcached.log"),
[WindowsTask("Ini files (%s)" % p,
"type " + winquote_path(p),
log_file="ini.log")
for p in read_guts(guts, "couch_inis").split(";")],
UnixTask("Ini files",
["sh", "-c", 'for i in "$@"; do echo "file: $i"; cat "$i"; done', "--"] + read_guts(guts, "couch_inis").split(";"),
log_file="ini.log"),
make_curl_task(name="couchbase diags",
user="@",
password=read_guts(guts, "memcached_pass"),
timeout=600,
url=diag_url,
log_file="diag.log"),
make_curl_task(name="master events",
user="@",
password=read_guts(guts, "memcached_pass"),
timeout=300,
url='http://127.0.0.1:%s/diag/masterEvents?o=1' % read_guts(guts, "rest_port"),
log_file="master_events.log",
no_header=True),
make_curl_task(name="ale configuration",
user="@",
password=read_guts(guts, "memcached_pass"),
url='http://127.0.0.1:%s/diag/ale' % read_guts(guts, "rest_port"),
log_file="couchbase.log"),
[AllOsTask("couchbase logs (%s)" % name, "cbbrowse_logs %s" % name,
addenv = [("REPORT_DIR", read_guts(guts, "log_path"))],
log_file="ns_server.%s" % name)
for name in ["debug.log", "info.log", "error.log", "couchdb.log",
"xdcr.log", "xdcr_errors.log",
"views.log", "mapreduce_errors.log",
"stats.log", "babysitter.log", "ssl_proxy.log",
"reports.log", "xdcr_trace.log", "http_access.log",
"http_access_internal.log", "ns_couchdb.log",
"goxdcr.log", "query.log", "projector.log", "indexer.log",
"fts.log", "metakv.log"]],
[AllOsTask("memcached stats %s" % kind,
flatten(["cbstats", "-a", "127.0.0.1:%s" % read_guts(guts, "memcached_port"), kind, "-b", read_guts(guts, "memcached_admin"), "-p", read_guts(guts, "memcached_pass")]),
log_file="stats.log",
timeout=60)
for kind in ["all", "allocator", "checkpoint", "config",
"dcp", "dcpagg",
["diskinfo", "detail"], ["dispatcher", "logs"],
"failovers", ["hash", "detail"],
"kvstore", "kvtimings", "memory",
"prev-vbucket",
"runtimes", "scheduler",
"tap", "tapagg",
"timings", "uuid",
"vbucket", "vbucket-details", "vbucket-seqno",
"warmup", "workload"]],
[AllOsTask("memcached mcstat %s" % kind,
flatten(["mcstat", "-h", "127.0.0.1:%s" % read_guts(guts, "memcached_port"),
"-u", read_guts(guts, "memcached_admin"),
"-P", read_guts(guts, "memcached_pass"), kind]),
log_file="stats.log",
timeout=60)
for kind in ["connections"]],
[AllOsTask("ddocs for %s (%s)" % (bucket, path),
["couch_dbdump", path],
log_file = "ddocs.log")
for bucket in set(correct_split(read_guts(guts, "buckets"), ",")) - set(correct_split(read_guts(guts, "memcached_buckets"), ","))
for path in glob.glob(os.path.join(dbdir, bucket, "master.couch*"))],
[AllOsTask("replication docs (%s)" % (path),
["couch_dbdump", path],
log_file = "ddocs.log")
for path in glob.glob(os.path.join(dbdir, "_replicator.couch*"))],
[AllOsTask("Couchstore local documents (%s, %s)" % (bucket, os.path.basename(path)),
["couch_dbdump", "--local", path],
log_file = "couchstore_local.log")
for bucket in set(correct_split(read_guts(guts, "buckets"), ",")) - set(correct_split(read_guts(guts, "memcached_buckets"), ","))
for path in glob.glob(os.path.join(dbdir, bucket, "*.couch.*"))],
[UnixTask("moxi stats (port %s)" % port,
"echo stats proxy | nc 127.0.0.1 %s" % port,
log_file="stats.log",
timeout=60)
for port in correct_split(read_guts(guts, "moxi_ports"), ",")],
[AllOsTask("mctimings",
["mctimings",
"-u", read_guts(guts, "memcached_admin"),
"-P", read_guts(guts, "memcached_pass"),
"-h", "127.0.0.1:%s" % read_guts(guts, "memcached_port"),
"-v"] + stat,
log_file="stats.log",
timeout=60)
for stat in ([], ["subdoc_execute"])],
make_stats_archives_task(guts, initargs_path)
]
_tasks = flatten([lookup_tasks, query_tasks, index_tasks, fts_tasks, _tasks])
return _tasks
def find_script(name):
dirs = [basedir(), os.path.join(basedir(), "scripts")]
for d in dirs:
path = os.path.join(d, name)
if os.path.exists(path):
log("Found %s: %s" % (name, path))
return path
return None
def get_server_guts(initargs_path):
dump_guts_path = find_script("dump-guts")
if dump_guts_path is None:
log("Couldn't find dump-guts script. Some information will be missing")
return {}
escript = exec_name("escript")
extra_args = os.getenv("EXTRA_DUMP_GUTS_ARGS")
args = [escript, dump_guts_path, "--initargs-path", initargs_path]
if extra_args:
args = args + extra_args.split(";")
print("Checking for server guts in %s..." % initargs_path)
p = subprocess.Popen(args, stdout = subprocess.PIPE)
output = p.stdout.read()
p.wait()
rc = p.returncode
# print("args: %s gave rc: %d and:\n\n%s\n" % (args, rc, output))
tokens = output.rstrip("\0").split("\0")
d = {}
if len(tokens) > 1:
for i in xrange(0, len(tokens), 2):
d[tokens[i]] = tokens[i+1]
return d
def guess_utility(command):
if isinstance(command, list):
command = ' '.join(command)
if not command:
return None
if re.findall(r'[|;&]|\bsh\b|\bsu\b|\bfind\b|\bfor\b', command):
# something hard to easily understand; let the human decide
return command
else:
return command.split()[0]
def dump_utilities(*args, **kwargs):
specific_platforms = { SolarisTask : 'Solaris',
LinuxTask : 'Linux',
WindowsTask : 'Windows',
MacOSXTask : 'Mac OS X' }
platform_utils = dict((name, set()) for name in specific_platforms.values())
class FakeOptions(object):
def __getattr__(self, name):
return None
tasks = make_os_tasks() + make_product_task({}, "", FakeOptions())
for task in tasks:
utility = guess_utility(task.command)
if utility is None:
continue
for (platform, name) in specific_platforms.items():
if isinstance(task, platform):
platform_utils[name].add(utility)
print '''This is an autogenerated, possibly incomplete and flawed list
of utilites used by cbcollect_info'''
for (name, utilities) in sorted(platform_utils.items(), key=lambda x: x[0]):
print "\n%s:" % name
for utility in sorted(utilities):
print " - %s" % utility
sys.exit(0)
def setup_stdin_watcher():
def _in_thread():
sys.stdin.readline()
AltExit.exit(2)
th = threading.Thread(target = _in_thread)
th.setDaemon(True)
th.start()
class CurlKiller:
def __init__(self, p):
self.p = p
def cleanup(self):
if self.p != None:
print("Killing curl...")
os.kill(self.p.pid, signal.SIGKILL)
print("done")
def disarm(self):
self.p = None
def do_upload_and_exit(path, url):
output_fd, output_file = tempfile.mkstemp()
os.close(output_fd)
AltExit.register(lambda: os.unlink(output_file))
args = ["curl", "-sS",
"--output", output_file,
"--write-out", "%{http_code}", "--upload-file", path, url]
AltExit.lock.acquire()
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE)
k = CurlKiller(p)
AltExit.register_and_unlock(k.cleanup)
except Exception, e:
AltExit.lock.release()
raise e
stdout, _ = p.communicate()
k.disarm()
if p.returncode != 0:
sys.exit(1)
else:
if stdout.strip() == '200':
log('Done uploading')
sys.exit(0)
else:
log('HTTP status code: %s' % stdout)
sys.exit(1)
def parse_host(host):
url = urlparse.urlsplit(host)
if not url.scheme:
url = urlparse.urlsplit('https://' + host)
return url.scheme, url.netloc, url.path
def generate_upload_url(parser, options, zip_filename):
upload_url = None
if options.upload_host:
if not options.upload_customer:
parser.error("Need --customer when --upload-host is given")
scheme, netloc, path = parse_host(options.upload_host)
customer = urllib.quote(options.upload_customer)
fname = urllib.quote(zip_filename)
if options.upload_ticket:
full_path = '%s/%s/%d/%s' % (path, customer, options.upload_ticket, fname)
else:
full_path = '%s/%s/%s' % (path, customer, fname)
upload_url = urlparse.urlunsplit((scheme, netloc, full_path, '', ''))
log("Will upload collected .zip file into %s" % upload_url)
return upload_url
def check_ticket(option, opt, value):
if re.match('^\d{1,7}$', value):
return int(value)
else:
raise optparse.OptionValueError(
"option %s: invalid ticket number: %r" % (opt, value))
class CbcollectInfoOptions(optparse.Option):
from copy import copy
TYPES = optparse.Option.TYPES + ("ticket",)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["ticket"] = check_ticket
def find_primary_addr(default = None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
try:
s.connect(("8.8.8.8", 56))
addr, port = s.getsockname()
return addr
except socket.error:
return default
finally:
s.close()
def exec_name(name):
if sys.platform == 'win32':
name += ".exe"
return name
| [
"tleyden@couchbase.com"
] | tleyden@couchbase.com |
72dde4d0cca5ada32dd37e6e36d79b7dc6680cba | 685e7dc080a383d12dd526a510a8f74c34ef2e71 | /tests/nonci/test_compare_pretrained.py | cf57a0df969679003ebcb54d6d1f3d881dc8170d | [
"MIT"
] | permissive | 18813055625/bert-for-tf2 | f1b86351675861ebe710bb4f94e99b89a639f83a | e71d108f0bd8c5af0c4e0b8427b144e996c02fdb | refs/heads/master | 2020-07-29T08:24:33.635201 | 2019-09-09T11:56:37 | 2019-09-09T11:56:37 | 209,729,589 | 0 | 1 | MIT | 2019-09-20T07:16:54 | 2019-09-20T07:16:53 | null | UTF-8 | Python | false | false | 8,706 | py | # coding=utf-8
#
# created by kpe on 27.Mar.2019 at 15:37
#
from __future__ import absolute_import, division, print_function
import unittest
import re
import os
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
import params
from bert import BertModelLayer
from bert.loader import map_from_stock_variale_name, map_to_stock_variable_name, load_stock_weights
from bert.loader import StockBertConfig, map_stock_config_to_params
from bert.tokenization import FullTokenizer
tf.compat.v1.disable_eager_execution()
class TestCompareBertsOnPretrainedWeight(unittest.TestCase):
bert_ckpt_dir = ".models/uncased_L-12_H-768_A-12/"
bert_ckpt_file = bert_ckpt_dir + "bert_model.ckpt"
bert_config_file = bert_ckpt_dir + "bert_config.json"
def test_bert_original_weights(self):
print("bert checkpoint: ", self.bert_ckpt_file)
bert_vars = tf.train.list_variables(self.bert_ckpt_file)
for ndx, var in enumerate(bert_vars):
print("{:3d}".format(ndx), var)
def create_bert_model(self, max_seq_len=18):
bc = None
with tf.io.gfile.GFile(self.bert_config_file, "r") as reader:
bc = StockBertConfig.from_json_string(reader.read())
bert = BertModelLayer.from_params(map_stock_config_to_params(bc),
name="bert")
input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids")
token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids")
output = bert([input_ids, token_type_ids])
model = keras.Model(inputs=[input_ids, token_type_ids], outputs=output)
return model, bert, (input_ids, token_type_ids)
def test_keras_weights(self):
max_seq_len = 18
model, bert, inputs = self.create_bert_model(18)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
model.summary()
for ndx, var in enumerate(bert.trainable_variables):
print("{:3d}".format(ndx), var.name, var.shape)
#for ndx, var in enumerate(model.trainable_variables):
# print("{:3d}".format(ndx), var.name, var.shape)
def test___compare_weights(self):
#tf.reset_default_graph()
max_seq_len = 18
model, bert, inputs = self.create_bert_model(18)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
stock_vars = tf.train.list_variables(self.bert_ckpt_file)
stock_vars = {name: list(shape) for name, shape in stock_vars}
keras_vars = model.trainable_variables
keras_vars = {var.name.split(":")[0]: var.shape.as_list() for var in keras_vars}
matched_vars = set()
unmatched_vars = set()
shape_errors = set()
for name in stock_vars:
bert_name = name
keras_name = map_from_stock_variale_name(bert_name)
if keras_name in keras_vars:
if keras_vars[keras_name] == stock_vars[bert_name]:
matched_vars.add(bert_name)
else:
shape_errors.add(bert_name)
else:
unmatched_vars.add(bert_name)
print("bert -> keras:")
print(" matched count:", len(matched_vars))
print(" unmatched count:", len(unmatched_vars))
print(" shape error count:", len(shape_errors))
print("unmatched:\n", "\n ".join(unmatched_vars))
self.assertEqual(197, len(matched_vars))
self.assertEqual(9, len(unmatched_vars))
self.assertEqual(0, len(shape_errors))
matched_vars = set()
unmatched_vars = set()
shape_errors = set()
for name in keras_vars:
keras_name = name
bert_name = map_to_stock_variable_name(keras_name)
if bert_name in stock_vars:
if stock_vars[bert_name] == keras_vars[keras_name]:
matched_vars.add(keras_name)
else:
shape_errors.add(keras_name)
else:
unmatched_vars.add(keras_name)
print("keras -> bert:")
print(" matched count:", len(matched_vars))
print(" unmatched count:", len(unmatched_vars))
print(" shape error count:", len(shape_errors))
print("unmatched:\n", "\n ".join(unmatched_vars))
self.assertEqual(197, len(matched_vars))
self.assertEqual(0, len(unmatched_vars))
self.assertEqual(0, len(shape_errors))
def predict_on_keras_model(self, input_ids, input_mask, token_type_ids):
max_seq_len = input_ids.shape[-1]
model, bert, k_inputs = self.create_bert_model(max_seq_len)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
load_stock_weights(bert, self.bert_ckpt_file)
k_res = model.predict([input_ids, token_type_ids])
return k_res
def predict_on_stock_model(self, input_ids, input_mask, token_type_ids):
from tests.ext.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint
tf.compat.v1.reset_default_graph()
tf_placeholder = tf.compat.v1.placeholder
max_seq_len = input_ids.shape[-1]
pl_input_ids = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len))
pl_mask = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len))
pl_token_type_ids = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len))
bert_config = BertConfig.from_json_file(self.bert_config_file)
tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_ckpt_dir, "vocab.txt"))
s_model = BertModel(config=bert_config,
is_training=False,
input_ids=pl_input_ids,
input_mask=pl_mask,
token_type_ids=pl_token_type_ids,
use_one_hot_embeddings=False)
tvars = tf.compat.v1.trainable_variables()
(assignment_map, initialized_var_names) = get_assignment_map_from_checkpoint(tvars, self.bert_ckpt_file)
tf.compat.v1.train.init_from_checkpoint(self.bert_ckpt_file, assignment_map)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
s_res = sess.run(
s_model.get_sequence_output(),
feed_dict={pl_input_ids: input_ids,
pl_token_type_ids: token_type_ids,
pl_mask: input_mask,
})
return s_res
def test_direct_keras_to_stock_compare(self):
from tests.ext.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint
bert_config = BertConfig.from_json_file(self.bert_config_file)
tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_ckpt_dir, "vocab.txt"))
# prepare input
max_seq_len = 6
input_str = "Hello, Bert!"
input_tokens = tokenizer.tokenize(input_str)
input_tokens = ["[CLS]"] + input_tokens + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
input_ids = input_ids + [0]*(max_seq_len - len(input_tokens))
input_mask = [1]*len(input_tokens) + [0]*(max_seq_len - len(input_tokens))
token_type_ids = [0]*len(input_tokens) + [0]*(max_seq_len - len(input_tokens))
input_ids = np.array([input_ids], dtype=np.int32)
input_mask = np.array([input_mask], dtype=np.int32)
token_type_ids = np.array([token_type_ids], dtype=np.int32)
print(" tokens:", input_tokens)
print("input_ids:{}/{}:{}".format(len(input_tokens), max_seq_len, input_ids), input_ids.shape, token_type_ids)
s_res = self.predict_on_stock_model(input_ids, input_mask, token_type_ids)
k_res = self.predict_on_keras_model(input_ids, input_mask, token_type_ids)
np.set_printoptions(precision=9, threshold=20, linewidth=200, sign="+", floatmode="fixed")
print("s_res", s_res.shape)
print("k_res", k_res.shape)
print("s_res:\n {}".format(s_res[0, :2, :10]), s_res.dtype)
print("k_res:\n {}".format(k_res[0, :2, :10]), k_res.dtype)
adiff = np.abs(s_res-k_res).flatten()
print("diff:", np.max(adiff), np.argmax(adiff))
self.assertTrue(np.allclose(s_res, k_res, atol=1e-6))
| [
"kpe.git@gmailbox.org"
] | kpe.git@gmailbox.org |
2cb62f842ce83e502bbe612b698ba73756afc2e2 | a4571dd0b2ebb0f3c50a540355910d502796ea6a | /EX - 79.py | b45ee0bd62eb1934035ac3e1b7cbf14cd3fc653c | [] | no_license | Eduflutter/EXE_python | bf2f3d881e97e75a70505635b483fda642f5f6c7 | acc38dabf3d8b694fbe674c54e283cf55a5578d8 | refs/heads/master | 2023-05-01T14:59:30.839428 | 2020-10-02T17:31:23 | 2020-10-02T17:31:23 | 366,124,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | '''
Crie um progrma onde o usário possa
digitar vários valores numéricos e cadrastre-os em
uma lista.Caso o número já axista lá dentro, ele
não será adicionado.No final, serão exibidos todos
os valores únicos digitados, em ordem crescente.
'''
print('\33c')
print('\33[1;34m-\33[1;33m'*40)
print(f'\33[1;32m{":":<0}\33[m\33[1;32m{"CADRASTRO NUMÈRICOS":^38}\33[m\33[1;32m{":":>0}\33[m')
print('\33[1;34m-\33[m'*40)
valor = []
while True:
n = int(input('Digite O valor: '))
if n not in valor:
valor.append(n)
else:
print('Valor duplicado! não adicionado...')
r = str(input('Deseja continuar?: [S / N] ')).upper()[0]
if r in 'N':
break
print('-'*40)
print(f'O banco de dados dos valores forão: ')
print('-'*40)
print(f'{sorted(valor)}') | [
"eduoliveira3939@gmail.com"
] | eduoliveira3939@gmail.com |
88d6df365f47a253326935bb8fac400997ba4126 | 702339cb0a4d0a1f7f01705107d77a4950e6f91d | /Snakefile | be70350f3631faa16423080dcb37b1e6daa5c0c2 | [
"MIT"
] | permissive | hans-vg/snakemake_guppy_basecall | a2c2ad9f9da779f8ce7556fcdf0700f7db7be37c | 76c1a08e2c553a8976108397d292203a1796b81e | refs/heads/main | 2023-08-05T09:45:00.360836 | 2021-09-08T22:39:18 | 2021-09-08T22:39:18 | 400,228,888 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | import glob
configfile: "config.yaml"
inputdirectory=config["directory"]
SAMPLES, = glob_wildcards(inputdirectory+"/{sample}.fast5", followlinks=True)
print(SAMPLES)
wildcard_constraints:
sample="\w+\d+_\w+_\w+\d+_.+_\d"
##### target rules #####
rule all:
input:
expand("basecall/{sample}/sequencing_summary.txt", sample=SAMPLES),
"qc/multiqc.html"
rule make_indvidual_samplefiles:
input:
inputdirectory+"/{sample}.fast5",
output:
"lists/{sample}.txt",
shell:
"basename {input} > {output}"
rule guppy_basecall_persample:
input:
directory=directory(inputdirectory),
samplelist="lists/{sample}.txt",
output:
summary="basecall/{sample}/sequencing_summary.txt",
directory=directory("basecall/{sample}/"),
params:
config["basealgo"]
shell:
"guppy_basecaller -i {input.directory} --input_file_list {input.samplelist} -s {output.directory} -c {params} --trim_barcodes --compress_fastq -x \"auto\" --gpu_runners_per_device 3 --num_callers 2 --chunks_per_runner 200"
#def aggregate_input(wildcards):
# checkpoint_output = checkpoints.guppy_basecall_persample.get(**wildcards).output[1]
# print(checkpoint_output)
# exparr = expand("basecall/{sample}/pass/{runid}.fastq.gz", sample=wildcards.sample, runid=glob_wildcards(os.path.join(checkpoint_output, "pass/", "{runid}.fastq.gz")).runid)
# print(exparr)
# return exparr
#
##SAMPLES, RUNIDS, = glob_wildcards("basecall/{sample}/pass/{runid}.fastq.gz", followlinks=True)
##print(RUNIDS)
##print(SAMPLES)
#
#
#rule fastqc_pretrim:
# input:
# aggregate_input
# output:
# html="qc/fastqc_pretrim/{sample}.html",
# zip="qc/fastqc_pretrim/{sample}_fastqc.zip" # the suffix _fastqc.zip is necessary for multiqc to find the file. If not using multiqc, you are free to choose an arbitrary filename
# params: ""
# log:
# "logs/fastqc_pretrim/{sample}.log"
# threads: 1
# wrapper:
# "0.77.0/bio/fastqc"
#
#rule multiqc:
# input:
# #expand("basecall/{sample}.fastq.gz", sample=SAMPLES)
# #expand("qc/fastqc_pretrim/{sample}_fastqc.zip", sample=SAMPLES)
# expand(rules.fastqc_pretrim.output.zip, sample=SAMPLES)
# output:
# "qc/multiqc.html"
# params:
# "" # Optional: extra parameters for multiqc.
# log:
# "logs/multiqc.log"
# wrapper:
# "0.77.0/bio/multiqc"
##rule fastqc_pretrim:
## input:
## "basecall/{sample}/{failpass}/{runid}.fastq.gz",
## output:
## html="qc/fastqc_pretrim/{sample}_{failpass}_{runid}.html",
## zip="qc/fastqc_pretrim/{sample}_{failpass}_{runid}_fastqc.zip" # the suffix _fastqc.zip is necessary for multiqc to find the file. If not using multiqc, you are free to choose an arbitrary filename
## params: ""
## log:
## "logs/fastqc_pretrim/{sample}_{failpass}_{runid}.log"
## #resources: time_min=320, mem_mb=8000, cpus=1
## threads: 1
## wrapper:
## "v0.75.0/bio/fastqc"
| [
"hvasquezgross@unr.edu"
] | hvasquezgross@unr.edu | |
86e15099b852b3ba1d7f58082cd64ac62fd06500 | 74b8a63615281a74a3646c9a03928bea60c3c6f3 | /pymccrgb/tests/context.py | 25bf269f3088eab6a0e29d1b098ee1e77eea81e9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | lswsyx/pymccrgb | 8d3df8200633b9b9918b8d7ec7ee84baa49750c6 | dc8ad2e46cbe6ff8081c32fa11bce68f869baafa | refs/heads/master | 2023-02-23T20:38:14.727935 | 2020-08-06T21:18:05 | 2020-08-06T21:18:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
import pymccrgb
| [
"robertmsare@gmail.com"
] | robertmsare@gmail.com |
bb9b8448866a42aee485331c76d2d094853127b4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_231/ch22_2020_06_20_19_00_14_584797.py | bd79702d66dbec13e717be885a2a86143f73ec2b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | c= int(input('quantos cigarros vc fuma por dia?'))
a= int(input('ha quantos anos?'))
t= ((10*c*a*365)/1440)
print(t)
| [
"you@example.com"
] | you@example.com |
889a37fe8215598757625f98d9e00660675b6457 | 991d762127850817be2da9fbbb6ba4601d1c1252 | /test_trellis.py | 30f974fdbe80962a1faaa2093d97f0f28a5025fb | [] | no_license | tomjoy/trellis | 8699c264a1d3e287ae145488a172552a2a8c1c64 | ce5e7dfc6cff6386a9ee216ed9be7436816c4512 | refs/heads/master | 2016-09-11T10:33:13.288062 | 2014-11-18T08:19:33 | 2014-11-18T08:19:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,364 | py | from test_sets import *
from peak import context
from peak.events.activity import EventLoop, TwistedEventLoop, Time, NOT_YET
from peak.events import trellis, stm, collections, activity
from peak.util.decorators import rewrap, decorate as d
from peak.util.extremes import Max
import unittest, heapq, mocker, types, sys
try:
import testreactor
except ImportError:
testreactor = None # either twisted or testreactor are missing
try:
import wx
except ImportError:
wx = None
class EventLoopTestCase(unittest.TestCase):
def setUp(self):
self.state = context.new()
self.state.__enter__()
super(EventLoopTestCase, self).setUp()
self.configure_context()
def tearDown(self):
super(EventLoopTestCase, self).tearDown()
self.state.__exit__(None, None, None)
def configure_context(self):
pass
class TestListener(stm.AbstractListener):
def __repr__(self): return self.name
class TestSubject(stm.AbstractSubject):
def __repr__(self): return self.name
class DummyError(Exception): pass
class UndirtyListener(TestListener):
def dirty(self):
return False
try:
set
except NameError:
from sets import Set as set
if wx:
class TestWxEventLoop(EventLoopTestCase):
def configure_context(self):
from peak.events.activity import EventLoop, WXEventLoop
EventLoop <<= WXEventLoop
self.app = wx.PySimpleApp(redirect=False)
self.app.ExitOnFrameDelete = False
def testSequentialCalls(self):
log = []
EventLoop.call(log.append, 1)
EventLoop.call(log.append, 2)
EventLoop.call(log.append, 3)
EventLoop.call(log.append, 4)
EventLoop.call(EventLoop.stop)
EventLoop.run()
self.assertEqual(log, [1,2,3,4])
# XXX this should test timing stuff, but the only way to do that
# is with a wx mock, which I haven't time for as yet.
if testreactor:
class TestReactorEventLoop(EventLoopTestCase, testreactor.ReactorTestCase):
def configure_context(self):
from peak.events.activity import Time, EventLoop
from twisted.internet import reactor
Time <<= lambda: Time()
Time.time = reactor.getTime
EventLoop <<= TwistedEventLoop
def testSequentialCalls(self):
log = []
EventLoop.call(log.append, 1)
EventLoop.call(log.append, 2)
EventLoop.call(log.append, 3)
EventLoop.call(log.append, 4)
class IdleTimer(trellis.Component):
trellis.attrs(
idle_timeout = 20,
busy = False,
)
idle_for = trellis.maintain(
lambda self: self.idle_for.begins_with(not self.busy),
initially=NOT_YET
)
trellis.maintain() # XXX should be perform
def alarm(self):
if self.idle_for[self.idle_timeout] and EventLoop.running:
log.append(5)
EventLoop.stop()
it = IdleTimer()
EventLoop.run()
self.assertEqual(log, [1,2,3,4,5])
class TestLinks(unittest.TestCase):
def setUp(self):
self.l1 = TestListener(); self.l1.name = 'l1'
self.l2 = TestListener(); self.l1.name = 'l2'
self.s1 = TestSubject(); self.s1.name = 's1'
self.s2 = TestSubject(); self.s2.name = 's2'
self.lk11 = stm.Link(self.s1, self.l1)
self.lk12 = stm.Link(self.s1, self.l2)
self.lk21 = stm.Link(self.s2, self.l1)
self.lk22 = stm.Link(self.s2, self.l2)
def verify_subjects(self, items):
for link, nxt, prev in items:
self.failUnless(link.next_subject is nxt)
if isinstance(link,stm.Link):
self.failUnless(link.prev_subject is prev)
def verify_listeners(self, items):
for link, nxt, prev in items:
self.failUnless(link.next_listener is nxt)
if isinstance(link,stm.Link):
self.failUnless(link.prev_listener is prev)
def testBreakIterSubjects(self):
it = self.l1.iter_subjects()
self.failUnless(it.next() is self.s2)
self.lk21.unlink()
self.failUnless(it.next() is self.s1)
def testBreakIterListeners(self):
it = self.s1.iter_listeners()
self.failUnless(it.next() is self.l2)
self.lk11.unlink()
self.failUnless(it.next() is self.l1)
def testLinkSetup(self):
self.verify_subjects([
(self.l1, self.lk21, None), (self.l2, self.lk22, None),
(self.lk21, self.lk11, None), (self.lk11, None, self.lk21),
(self.lk22, self.lk12, None), (self.lk12, None, self.lk22),
])
self.verify_listeners([
(self.s1, self.lk12, None), (self.s2, self.lk22, None),
(self.lk22, self.lk21, self.s2), (self.lk21, None, self.lk22),
(self.lk12, self.lk11, self.s1), (self.lk11, None, self.lk12),
])
def testUnlinkListenerHeadSubjectTail(self):
self.lk21.unlink()
self.verify_subjects([
(self.l1, self.lk11, None), (self.lk11, None, None)
])
self.verify_listeners([
(self.s2, self.lk22, None), (self.lk22, None, self.s2)
])
def testUnlinkListenerTailSubjectHead(self):
self.lk12.unlink()
self.verify_subjects([
(self.l2, self.lk22, None), (self.lk22, None, None),
])
self.verify_listeners([
(self.s1, self.lk11, None), (self.lk11, None, self.s1),
])
def a(f):
def g(self):
return self.ctrl.atomically(f, self)
return rewrap(f, g)
class TestController(unittest.TestCase):
def setUp(self):
self.ctrl = stm.Controller()
self.t0 = TestListener(); self.t0.name='t0';
self.t1 = TestListener(); self.t1.name='t1'; self.t1.layer = 1
self.t2 = TestListener(); self.t2.name='t2'; self.t2.layer = 2
self.t3 = UndirtyListener(); self.t3.name='t3'
self.s1 = TestSubject(); self.s2 = TestSubject()
self.s1.name = 's1'; self.s2.name = 's2'
def tearDown(self):
# Verify correct cleanup in all scenarios
for k,v in dict(
undo=[], managers={}, queues={}, layers=[], reads={}, writes={},
has_run={}, destinations=None, routes=None,
current_listener=None, readonly=False, in_cleanup=False,
active=False, at_commit=[], to_retry={}
).items():
val = getattr(self.ctrl, k)
self.assertEqual(val, v, '%s: %r' % (k,val))
def testScheduleSimple(self):
t1 = TestListener(); t1.name='t1'
t2 = TestListener(); t2.name='t2'
self.assertEqual(self.ctrl.layers, [])
self.assertEqual(self.ctrl.queues, {})
self.ctrl.schedule(t1)
self.ctrl.schedule(t2)
self.assertEqual(self.ctrl.layers, [0])
self.assertEqual(self.ctrl.queues, {0: {t1:1, t2:1}})
self.ctrl.cancel(t1)
self.assertEqual(self.ctrl.layers, [0])
self.assertEqual(self.ctrl.queues, {0: {t2:1}})
self.ctrl.cancel(t2)
# tearDown will assert that everything has been cleared
def testThreadLocalController(self):
self.failUnless(isinstance(trellis.ctrl, stm.Controller))
self.failUnless(isinstance(trellis.ctrl, stm.threading.local))
def testHeapingCancel(self):
# verify that cancelling the last listener of a layer keeps
# the 'layers' list in heap order
self.ctrl.schedule(self.t0)
self.ctrl.schedule(self.t2)
self.ctrl.schedule(self.t1)
layers = self.ctrl.layers
self.assertEqual(layers, [0, 2, 1])
self.ctrl.cancel(self.t0)
self.assertEqual(heapq.heappop(layers), 1)
self.assertEqual(heapq.heappop(layers), 2)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}, 2: {self.t2:1}})
self.ctrl.queues.clear()
def testDoubleAndMissingCancelOrSchedule(self):
self.ctrl.schedule(self.t2)
self.ctrl.cancel(self.t0)
self.ctrl.cancel(self.t2)
self.ctrl.cancel(self.t2)
self.ctrl.schedule(self.t1)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
self.ctrl.schedule(self.t1)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
self.ctrl.cancel(self.t1)
def testScheduleLayerBump(self):
# listener layer must be at least source layer + 1
self.ctrl.schedule(self.t1)
self.ctrl.schedule(self.t1, 0)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
self.ctrl.schedule(self.t1, 1)
self.assertEqual(self.ctrl.queues, {2: {self.t1:1}})
self.assertEqual(self.t1.layer, 2)
self.ctrl.cancel(self.t1)
d(a)
def testScheduleRollback(self):
# when running atomically, scheduling is an undo-logged operation
self.ctrl.schedule(self.t1)
self.ctrl.rollback_to(0)
def testCleanup(self):
self.ctrl.schedule(self.t0)
def raiser():
# XXX need to actually run one rule, plus start another w/error
raise DummyError
try:
self.ctrl.atomically(self.runAs, self.t0, raiser)
except DummyError:
pass
def testSubjectsMustBeAtomic(self):
self.assertRaises(AssertionError, self.ctrl.lock, self.s1)
self.assertRaises(AssertionError, self.ctrl.used, self.s1)
self.assertRaises(AssertionError, self.ctrl.changed, self.s1)
d(a)
def testLockAcquiresManager(self):
class Dummy:
def __enter__(*args): pass
def __exit__(*args): pass
mgr = self.s1.manager = Dummy()
self.ctrl.lock(self.s1)
self.assertEqual(self.ctrl.managers, {mgr:0})
self.ctrl.lock(self.s2)
self.assertEqual(self.ctrl.managers, {mgr:0})
d(a)
def testReadWrite(self):
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(self.ctrl.writes, {})
self.ctrl.current_listener = self.t0
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:self.t0})
self.ctrl.reads.clear() # these would normally be handled by
self.ctrl.writes.clear() # the run() method's try/finally
self.ctrl.current_listener = None # reset
d(a)
def testNoReadDuringCommit(self):
self.ctrl.readonly = True
self.assertRaises(RuntimeError, self.ctrl.changed, self.s1)
self.ctrl.readonly = False # normally reset by ctrl.run_rule()
d(a)
def testRecalcOnWrite(self):
stm.Link(self.s1, self.t0)
stm.Link(self.s2, self.t1)
stm.Link(self.s2, self.t0)
self.ctrl.current_listener = self.t1
self.ctrl.changed(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.writes, {self.s1:self.t1, self.s2:self.t1})
sp = self.ctrl.savepoint(); self.ctrl.has_run[self.t1] = self.t1
self.ctrl._process_writes(self.t1)
# Only t0 is notified, not t1, since t1 is the listener
self.assertEqual(self.ctrl.queues, {2: {self.t0:1}})
self.ctrl.rollback_to(sp)
self.ctrl.current_listener = None # reset
d(a)
def testDependencyUpdatingAndUndo(self):
stm.Link(self.s1, self.t0)
s3 = TestSubject()
stm.Link(s3, self.t0)
self.assertEqual(list(self.t0.iter_subjects()), [s3, self.s1])
self.ctrl.current_listener = self.t0
self.ctrl.used(self.s1)
self.ctrl.used(self.s2)
sp = self.ctrl.savepoint()
self.ctrl._process_reads(self.t0)
self.assertEqual(list(self.t0.iter_subjects()), [self.s2, self.s1])
self.ctrl.rollback_to(sp)
self.assertEqual(list(self.t0.iter_subjects()), [s3, self.s1])
self.ctrl.current_listener = None # reset
def runAs(self, listener, rule):
listener.run = rule
self.ctrl.run_rule(listener)
d(a)
def testIsRunningAndHasRan(self):
def rule():
self.assertEqual(self.ctrl.current_listener, self.t1)
self.assertEqual(self.ctrl.has_run, {self.t1: 0})
sp = self.ctrl.savepoint()
self.runAs(self.t1, rule)
self.assertEqual(self.ctrl.current_listener, None)
self.assertEqual(self.ctrl.has_run, {self.t1: 0})
d(a)
def testIsRunningButHasNotRan(self):
def rule():
self.assertEqual(self.ctrl.current_listener, self.t1)
self.assertEqual(self.ctrl.has_run, {})
sp = self.ctrl.savepoint()
self.t1.run = rule; self.ctrl.initialize(self.t1) # uninit'd rule
self.assertEqual(self.ctrl.current_listener, None)
self.assertEqual(self.ctrl.has_run, {})
d(a)
def testScheduleUndo(self):
sp = self.ctrl.savepoint()
self.ctrl.schedule(self.t2)
self.assertEqual(self.ctrl.queues, {2: {self.t2:1}})
self.ctrl.rollback_to(sp)
self.assertEqual(self.ctrl.queues, {})
def testNestedReadOnly(self):
log = []
def aRule():
log.append(trellis.ctrl.readonly); return 1
c1 = trellis.Cell(aRule)
c2 = trellis.Cell(lambda: c1.value * aRule())
c3 = trellis.Performer(lambda: c2.value)
self.assertEqual(log, [True, True])
d(a)
def testWriteProcessingInRun(self):
stm.Link(self.s1, self.t0)
stm.Link(self.s2, self.t1)
stm.Link(self.s2, self.t3)
stm.Link(self.s2, self.t0)
def rule():
self.ctrl.changed(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.writes, {self.s1:self.t1, self.s2:self.t1})
self.runAs(self.t1, rule)
# Only t0 is notified, not t1, since t1 is the listener & t3 is !dirty
self.assertEqual(self.ctrl.writes, {})
self.assertEqual(self.ctrl.queues, {2: {self.t0:1}})
self.ctrl.cancel(self.t0)
d(a)
def testReadProcessingInRun(self):
stm.Link(self.s1, self.t0)
s3 = TestSubject()
stm.Link(s3, self.t0)
self.assertEqual(list(self.t0.iter_subjects()), [s3, self.s1])
def rule():
self.ctrl.used(self.s1)
self.ctrl.used(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1, self.s2:1})
self.runAs(self.t0, rule)
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(list(self.t0.iter_subjects()), [self.s2, self.s1])
d(a)
def testReadOnlyDuringMax(self):
def rule():
self.assertEqual(self.ctrl.readonly, True)
self.t0.layer = Max
self.assertEqual(self.ctrl.readonly, False)
self.runAs(self.t0, rule)
self.assertEqual(self.ctrl.readonly, False)
d(a)
def testRunClearsReadWriteOnError(self):
def rule():
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:1})
try:
self.runAs(self.t0, rule)
except DummyError:
pass
else:
raise AssertionError("Error should've propagated")
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(self.ctrl.writes, {})
d(a)
def testSimpleCycle(self):
stm.Link(self.s1, self.t1)
stm.Link(self.s2, self.t2)
def rule0():
self.ctrl.used(self.s1)
self.ctrl.changed(self.s1)
def rule1():
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
def rule2():
self.ctrl.used(self.s2)
self.ctrl.changed(self.s1)
self.runAs(self.t0, rule0)
self.runAs(self.t1, rule1)
self.runAs(self.t2, rule2)
try:
self.ctrl._retry()
except stm.CircularityError, e:
self.assertEqual(e.args[0],
{self.t0: set([self.t1]), self.t1: set([self.t2]),
self.t2: set([self.t0, self.t1])})
else:
raise AssertionError("Should've caught a cycle")
d(a)
def testSimpleRetry(self):
def rule():
pass
self.runAs(self.t0, rule)
self.runAs(self.t1, rule)
self.runAs(self.t2, rule)
self.assertEqual(set(self.ctrl.has_run),set([self.t0,self.t1,self.t2]))
self.ctrl.to_retry[self.t1]=1
self.ctrl._retry()
self.assertEqual(set(self.ctrl.has_run), set([self.t0]))
self.ctrl.to_retry[self.t0]=1
self.ctrl._retry()
d(a)
def testNestedNoRetry(self):
def rule0():
self.t1.run=rule1; self.ctrl.initialize(self.t1)
def rule1():
pass
self.runAs(self.t2, rule1)
self.runAs(self.t0, rule0)
self.ctrl.schedule(self.t1)
self.assertEqual(self.ctrl.to_retry, {})
self.assertEqual(
set(self.ctrl.has_run), set([self.t0, self.t2])
)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
def testRunScheduled(self):
log = []
self.t1.run = lambda: log.append(True)
def go():
self.ctrl.schedule(self.t1)
self.ctrl.atomically(go)
self.assertEqual(log, [True])
def testRollbackReschedules(self):
sp = []
def rule0():
self.ctrl.rollback_to(sp[0])
self.assertEqual(self.ctrl.queues, {0: {self.t0:1}})
self.ctrl.cancel(self.t0)
self.t0.run = rule0
def go():
self.ctrl.schedule(self.t0)
sp.append(self.ctrl.savepoint())
self.ctrl.atomically(go)
def testManagerCantCreateLoop(self):
class Mgr:
def __enter__(self): pass
def __exit__(*args):
self.ctrl.schedule(self.t1)
log = []
def rule1():
log.append(True)
self.t1.run = rule1
self.t0.run = lambda:self.ctrl.manage(Mgr())
self.ctrl.atomically(self.ctrl.schedule, self.t0)
self.assertEqual(log, [])
self.ctrl.atomically(lambda:None)
self.assertEqual(log, [True])
d(a)
def testNotifyOnChange(self):
stm.Link(self.s2, self.t2)
stm.Link(self.s2, self.t3)
self.ctrl.changed(self.s2)
self.ctrl.current_listener = self.t0
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.queues, {2: {self.t2:1}})
self.ctrl.cancel(self.t2)
self.ctrl.writes.clear()
self.ctrl.current_listener = None # reset
def testCommitCanLoop(self):
log=[]
def go():
log.append(True)
self.t0.run = go
self.ctrl.atomically(self.ctrl.on_commit, self.ctrl.schedule, self.t0)
self.assertEqual(log,[True])
d(a)
def testNoUndoDuringUndo(self):
def undo():
self.ctrl.on_undo(redo)
def redo():
raise AssertionError("Should not be run")
self.ctrl.on_undo(undo)
self.ctrl.rollback_to(0)
d(a)
def testReentrantRollbackToMinimumTarget(self):
sp = self.ctrl.savepoint()
# these 2 rollbacks will be ignored, since they target a higher sp.
# note that both are needed for testing, as one is there to potentially
# set a new target, and the other is there to make the offset wrong if
# the rollback stops prematurely.
self.ctrl.on_undo(self.ctrl.rollback_to, sp+100)
self.ctrl.on_undo(self.ctrl.rollback_to, sp+100)
sp2 = self.ctrl.savepoint()
# ensure that there's no way this test can pass unless rollback_to
# notices re-entrant invocations (because it would overflow the stack)
for i in range(sys.getrecursionlimit()*2):
# request a rollback all the way to 0; this target should be used
# in place of the sp2 target or sp+100 targets, since it will be
# the lowest target encountered during the rollback.
self.ctrl.on_undo(self.ctrl.rollback_to, sp)
self.ctrl.rollback_to(sp2) # ask to rollback to posn 2
self.assertEqual(self.ctrl.savepoint(), sp) # but should rollback to 0
d(a)
def testNestedRule(self):
def rule1():
self.assertEqual(set(self.ctrl.has_run), set([self.t0, self.t1]))
self.assertEqual(self.ctrl.current_listener, self.t1)
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:self.t1})
self.t2.run=rule2; self.ctrl.initialize(self.t2)
self.assertEqual(set(self.ctrl.has_run), set([self.t0, self.t1]))
self.assertEqual(self.ctrl.current_listener, self.t1)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:self.t1, s3:self.t2})
def rule2():
self.assertEqual(set(self.ctrl.has_run), set([self.t0, self.t1]))
self.assertEqual(self.ctrl.current_listener, self.t2)
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(self.ctrl.writes, {self.s2:self.t1})
self.ctrl.used(self.s2)
self.ctrl.changed(s3)
def rule0():
pass
s3 = TestSubject(); s3.name = 's3'
self.runAs(self.t0, rule0)
self.runAs(self.t1, rule1)
self.assertEqual(
set(self.ctrl.has_run),
set([self.t1, self.t0]) # t2 was new, so doesn't show
)
self.assertEqual(list(self.t1.iter_subjects()), [self.s1])
self.assertEqual(list(self.t2.iter_subjects()), [self.s2])
self.ctrl.rollback_to(self.ctrl.has_run[self.t1]) # should undo both t1/t2
def testUndoLogSpansMultipleRecalcs(self):
c1 = trellis.Value(False, discrete=True)
c2 = trellis.Cell(lambda: (c1.value, log.append(trellis.savepoint())))
log = []; c2.value; log = []; c1.value = True
self.failUnless(len(log)==2 and log[1]>log[0], log)
def testUndoPostCommitCancelsUndoOfCommitSchedule(self):
c1 = trellis.Value(False, discrete=True)
def c2():
c1.value
log.append(trellis.savepoint())
if len(log)==2:
raise DummyError
c2 = trellis.Cell(c2)
log = []; c2.value; log = [];
# This will raise a different error if undoing the on-commit stack
# causes an underflow:
self.assertRaises(DummyError, setattr, c1, 'value', True)
class TestCells(mocker.MockerTestCase):
ctrl = stm.ctrl
def tearDown(self):
# make sure the old controller is back
trellis.install_controller(self.ctrl)
def testValueBasics(self):
self.failUnless(issubclass(trellis.Value, trellis.AbstractCell))
self.failUnless(issubclass(trellis.Value, stm.AbstractSubject))
v = trellis.Value()
self.assertEqual(v.value, None)
self.assertEqual(v._set_by, trellis._sentinel)
self.assertEqual(v._reset, trellis._sentinel)
v.value = 21
self.assertEqual(v._set_by, trellis._sentinel)
d(a)
def testValueUndo(self):
v = trellis.Value(42)
self.assertEqual(v.value, 42)
sp = self.ctrl.savepoint()
v.value = 43
self.assertEqual(v.value, 43)
self.ctrl.rollback_to(sp)
self.assertEqual(v.value, 42)
d(a)
def testValueUsed(self):
v = trellis.Value(42)
ctrl = self.mocker.replace(self.ctrl) #'peak.events.stm.ctrl')
ctrl.used(v)
self.mocker.replay()
trellis.install_controller(ctrl)
self.assertEqual(v.value, 42)
def testDiscrete(self):
v = trellis.Value(None, True)
v.value = 42
self.assertEqual(v.value, None)
def testValueChanged(self):
v = trellis.Value(42)
old_ctrl, ctrl = self.ctrl, self.mocker.replace(self.ctrl)
ctrl.lock(v)
ctrl.changed(v)
self.mocker.replay()
trellis.install_controller(ctrl)
v.value = 43
self.assertEqual(v.value, 43)
def testValueUnchanged(self):
v = trellis.Value(42)
ctrl = self.mocker.replace(self.ctrl)
ctrl.lock(v)
mocker.expect(ctrl.changed(v)).count(0)
self.mocker.replay()
trellis.install_controller(ctrl)
v.value = 42
self.assertEqual(v.value, 42)
d(a)
def testValueSetLock(self):
v = trellis.Value(42)
v.value = 43
self.assertEqual(v.value, 43)
self.assertEqual(v._set_by, None)
def go():
v.value = 99
t = TestListener(); t.name = 't'
t.run = go
self.assertRaises(trellis.InputConflict, self.ctrl.run_rule, t)
self.assertEqual(v.value, 43)
def go():
v.value = 43
t = TestListener(); t.name = 't'
t.run = go
self.ctrl.run_rule(t)
self.assertEqual(v.value, 43)
def testReadOnlyCellBasics(self):
log = []
c = trellis.Cell(lambda:log.append(1))
self.failUnless(type(c) is trellis.ReadOnlyCell)
c.value
self.assertEqual(log,[1])
c.value
self.assertEqual(log,[1])
def testDiscreteValue(self):
log = []
v = trellis.Value(False, True)
c = trellis.Cell(lambda: log.append(v.value))
self.assertEqual(log,[])
c.value
self.assertEqual(log,[False])
del log[:]
v.value = True
self.assertEqual(log, [True, False])
self.assertEqual(v.value, False)
del log[:]
v.value = False
self.assertEqual(log, [])
def testCellConstructor(self):
self.failUnless(type(trellis.Cell(value=42)) is trellis.Value)
self.failUnless(type(trellis.Cell(lambda:42)) is trellis.ReadOnlyCell)
self.failUnless(type(trellis.Cell(lambda:42, value=42)) is trellis.Cell)
def testRuleChain(self):
v = trellis.Value(0)
log = []
c1 = trellis.Cell(lambda:int(v.value/2))
c2 = trellis.Cell(lambda:log.append(c1.value))
c2.value
self.assertEqual(log, [0])
v.value = 1
self.assertEqual(log, [0])
v.value = 2
self.assertEqual(log, [0, 1])
def testConstant(self):
for v in (42, [57], "blah"):
c = trellis.Constant(v)
self.assertEqual(c.value, v)
self.assertEqual(c.get_value(), v)
self.failIf(hasattr(c,'set_value'))
self.assertRaises(AttributeError, setattr, c, 'value', v)
self.assertEqual(repr(c), "Constant(%r)" % (v,))
def testRuleToConstant(self):
log = []
def go():
log.append(1)
return 42
c = trellis.Cell(go)
self.assertEqual(c.value, 42)
self.assertEqual(log, [1])
self.failUnless(isinstance(c, trellis.ConstantRule))
self.assertEqual(repr(c), "Constant(42)")
self.assertEqual(c.value, 42)
self.assertEqual(c.get_value(), 42)
self.assertEqual(c.rule, None)
self.assertEqual(log, [1])
self.failIf(c.dirty())
c.__class__ = trellis.ReadOnlyCell # transition must be reversible to undo
self.failIf(isinstance(c, trellis.ConstantRule))
def testModifierIsAtomic(self):
log = []
d(trellis.modifier)
def do_it():
self.failUnless(self.ctrl.active)
self.assertEqual(self.ctrl.current_listener, None)
log.append(True)
return log
rv = do_it()
self.failUnless(rv is log)
self.assertEqual(log, [True])
d(a)
def testModifierAlreadyAtomic(self):
log = []
d(trellis.modifier)
def do_it():
self.failUnless(self.ctrl.active)
self.assertEqual(self.ctrl.current_listener, None)
log.append(True)
return log
rv = do_it()
self.failUnless(rv is log)
self.assertEqual(log, [True])
d(a)
def testModifierFromCell(self):
v1, v2 = trellis.Value(42), trellis.Value(99)
d(trellis.modifier)
def do_it():
v1.value = v1.value * 2
self.assertEqual(self.ctrl.reads, {v1:1})
def rule():
v2.value
do_it()
self.assertEqual(self.ctrl.reads, {v2:1})
trellis.Cell(rule).value
self.assertEqual(v1.value, 84)
def testDiscreteToConstant(self):
log = []
c1 = trellis.ReadOnlyCell(lambda:True, False, True)
c2 = trellis.Cell(lambda:log.append(c1.value))
c2.value
self.assertEqual(log, [True, False])
self.failUnless(isinstance(c1, trellis.ConstantRule))
def testReadWriteCells(self):
C = trellis.Cell(lambda: (F.value-32) * 5.0/9, -40)
F = trellis.Cell(lambda: (C.value * 9.0)/5 + 32, -40)
self.assertEqual(C.value, -40)
self.assertEqual(F.value, -40)
C.value = 0
self.assertEqual(C.value, 0)
self.assertEqual(F.value, 32)
def testSelfDependencyDoesNotIncreaseLayer(self):
c1 = trellis.Value(23)
c2 = trellis.Cell(lambda: c1.value + c2.value, 0)
self.assertEqual(c2.value, 23)
self.assertEqual(c2.layer, 1)
c1.value = 19
self.assertEqual(c2.value, 42)
self.assertEqual(c2.layer, 1)
def testSettingOccursForEqualObjects(self):
d1 = {}; d2 = {}
c1 = trellis.Value()
c1.value = d1
self.failUnless(c1.value is d1)
c1.value = d2
self.failUnless(c1.value is d2)
def testRepeat(self):
def counter():
if counter.value == 10:
return counter.value
trellis.repeat()
return counter.value + 1
counter = trellis.ReadOnlyCell(counter, 1)
self.assertEqual(counter.value, 10)
d(a)
def testTodoRollbackFuture(self):
sp = self.ctrl.savepoint()
tv = trellis.TodoValue(dict)
self.assertEqual(tv._savepoint, None)
tv.get_future()[1] = 2
self.assertEqual(tv._savepoint, sp)
sp2 = self.ctrl.savepoint()
tv.get_future()[2] = 3
self.assertEqual(tv._savepoint, sp)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
self.assertEqual(tv._savepoint, None)
d(a)
def testTodoRollbackSet(self):
sp = self.ctrl.savepoint()
tv = trellis.TodoValue(dict)
self.assertEqual(tv._savepoint, None)
tv.get_future()[1] = 2
self.assertEqual(tv._savepoint, sp)
sp2 = self.ctrl.savepoint()
tv.value = {2:3}
self.assertEqual(tv._savepoint, sp)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
self.assertEqual(tv._savepoint, None)
d(a)
def testFullRollbackList(self):
l = trellis.List()
sp = self.ctrl.savepoint()
l.append(1)
self.ctrl.on_undo(lambda:None)
sp2 = self.ctrl.savepoint()
l.append(2)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
d(a)
def testFullRollbackDict(self):
d = trellis.Dict()
sp = self.ctrl.savepoint()
d[1] = 2
self.ctrl.on_undo(lambda:None)
sp2 = self.ctrl.savepoint()
d[2] = 3
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
d(a)
def testFullRollbackSet(self):
s = trellis.Set()
sp = self.ctrl.savepoint()
s.add(1)
self.ctrl.on_undo(lambda:None)
sp2 = self.ctrl.savepoint()
s.add(2)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
def run_modifier_and_rule(self, func, rule):
d(self.ctrl.atomically)
def go():
self.ctrl.schedule(trellis.Cell(rule))
func.sp = self.ctrl.savepoint()
trellis.modifier(func)()
def testDictUndo(self):
def do_it():
dd[1] = 2
self.ctrl.on_undo(lambda:None)
do_it.sp2 = self.ctrl.savepoint()
dd[4] = 6
del dd[5]
def rule():
if dict(dd)=={4:5, 5:6}: return
self.assertEqual(dict(dd), {1:2, 4:6})
self.ctrl.rollback_to(do_it.sp2)
self.assertEqual(self.ctrl.savepoint(), do_it.sp)
dd = trellis.Dict()
dd[4] = 5
dd[5] = 6
self.assertEqual(dict(dd), {4:5, 5:6})
self.run_modifier_and_rule(do_it, rule)
self.assertEqual(dict(dd), {4:5, 5:6})
def testSetAndObservingUndo(self):
def do_it():
s.add(1)
self.ctrl.on_undo(lambda:None)
do_it.sp2 = self.ctrl.savepoint()
s.add(3)
s.remove(4)
def rule():
if set(s)==set([4,5]): return
self.assertEqual(set(s), set([1,3,5]))
self.ctrl.rollback_to(do_it.sp2)
self.assertEqual(self.ctrl.savepoint(), do_it.sp)
s = trellis.Set([])
o = collections.Observing(keys=s)
s.update([4,5])
self.assertEqual(set(s), set([4,5]))
self.assertEqual(set(o._watching), set([4,5]))
self.run_modifier_and_rule(do_it, rule)
self.assertEqual(set(s), set([4,5]))
self.assertEqual(set(o._watching), set([4,5]))
class TestDefaultEventLoop(unittest.TestCase):
def setUp(self):
self.loop = EventLoop()
self.ctrl = trellis.ctrl
def testCallAndPoll(self):
log = []
self.loop.call(log.append, 1)
self.loop.call(log.append, 2)
self.assertEqual(log, [])
self.loop.poll()
self.assertEqual(log, [1])
self.loop.poll()
self.assertEqual(log, [1, 2])
self.loop.poll()
self.assertEqual(log, [1, 2])
d(a)
def testLoopIsNonAtomic(self):
self.assertRaises(AssertionError, self.loop.poll)
self.assertRaises(AssertionError, self.loop.flush)
self.assertRaises(AssertionError, self.loop.run)
def testCallAndFlush(self):
log = []
self.loop.call(log.append, 1)
self.loop.call(log.append, 2)
self.loop.call(self.loop.call, log.append, 3)
self.assertEqual(log, [])
self.loop.flush()
self.assertEqual(log, [1, 2])
self.loop.poll()
self.assertEqual(log, [1, 2, 3])
self.loop.poll()
self.assertEqual(log, [1, 2, 3])
def testUndoOfCall(self):
log = []
def do():
self.loop.call(log.append, 1)
sp = self.ctrl.savepoint()
self.loop.call(log.append, 2)
self.ctrl.rollback_to(sp)
self.loop.call(log.append, 3)
self.ctrl.atomically(do)
self.assertEqual(log, [])
self.loop.flush()
self.assertEqual(log, [1, 3])
def testScheduleUndo(self):
t = Time()
t.auto_update = False
t20 = t[20]
log = []
d(trellis.Cell)
def checktime():
t.reached(t20)
log.append(t._events[t20._when])
d(trellis.Performer)
def err_after_reached():
if len(t._schedule)>1:
raise DummyError
self.assertRaises(DummyError, checktime.get_value)
self.assertEqual(t._schedule, [t20._when, Max])
self.assertEqual(dict(t._events), {t20._when:log[0]})
del checktime
self.failUnless(isinstance(log.pop(), trellis.Sensor))
self.assertEqual(dict(t._events), {})
self.assertEqual(log, [])
def force_rollback(self):
d(trellis.Performer)
def do_it():
raise DummyError
def testUpdateUndo(self):
t = Time()
t.auto_update = False
t20 = t[20]
d(trellis.Cell)
def checktime():
if t.reached(t20):
self.force_rollback()
checktime.value
self.assertEqual(t._schedule, [t20._when, Max])
self.assertEqual(list(t._events), [t20._when])
self.assertRaises(DummyError, t.advance, 20)
self.assertEqual(t._schedule, [t20._when, Max])
self.assertEqual(list(t._events), [t20._when])
class TestTasks(unittest.TestCase):
ctrl = trellis.ctrl
def testRunAtomicallyInLoop(self):
log = []
def f():
self.failUnless(self.ctrl.active)
log.append(1)
yield activity.Pause
self.failUnless(self.ctrl.active)
log.append(2)
t = activity.TaskCell(f)
self.assertEqual(log, [])
t._loop.flush()
self.assertEqual(log, [1])
t._loop.flush()
self.assertEqual(log, [1, 2])
def testDependencyAndCallback(self):
log = []
v = trellis.Value(42)
v1 = trellis.Value(1)
c1 = trellis.Cell(lambda: v1.value*2)
def f():
while v.value:
log.append(v.value)
v1.value = v.value
yield activity.Pause
t = activity.TaskCell(f)
check = []
for j in 42, 57, 99, 106, 23, None:
self.assertEqual(log, check)
v.value = j
if j: check.append(j)
for i in range(5):
t._loop.flush()
if j: self.assertEqual(c1.value, j*2)
self.assertEqual(log, check)
def testPauseAndCall(self):
log = []
class TaskExample(trellis.Component):
trellis.attrs(
start = False,
stop = False
)
def wait_for_start(self):
log.append("waiting to start")
while not self.start:
yield activity.Pause
def wait_for_stop(self):
while not self.stop:
log.append("waiting to stop")
yield activity.Pause
activity.task()
def demo(self):
yield self.wait_for_start()
log.append("starting")
yield self.wait_for_stop()
log.append("stopped")
self.assertEqual(log, [])
t = TaskExample()
EventLoop.flush()
self.assertEqual(log, ['waiting to start'])
log.pop()
t.start = True
EventLoop.flush()
self.assertEqual(log, ['starting', 'waiting to stop'])
log.pop()
log.pop()
t.stop = True
EventLoop.flush()
self.assertEqual(log, ['stopped'])
def testValueReturns(self):
log = []
def f1():
yield 42
def f2():
yield f1(); yield activity.resume()
def f3():
yield f2(); v = activity.resume()
log.append(v)
t = activity.TaskCell(f3)
EventLoop.flush()
self.assertEqual(log, [42])
log = []
def f1():
yield activity.Return(42)
t = activity.TaskCell(f3)
EventLoop.flush()
self.assertEqual(log, [42])
def testErrorPropagation(self):
log = []
def f1():
raise DummyError
def f2():
try:
yield f1(); activity.resume()
except DummyError:
log.append(True)
else:
pass
t = activity.TaskCell(f2)
self.assertEqual(log, [])
EventLoop.flush()
self.assertEqual(log, [True])
def testSendAndThrow(self):
log = []
class SendThrowIter(object):
count = 0
def next(self):
if self.count==0:
self.count = 1
def f(): yield 99
return f()
raise StopIteration
def send(self, value):
log.append(value)
def f(): raise DummyError; yield None
return f()
def throw(self, typ, val, tb):
log.append(typ)
log.append(val.__class__) # type(val) is instance in Py<2.5
log.append(type(tb))
raise StopIteration
def fs(): yield SendThrowIter()
t = activity.TaskCell(fs)
self.assertEqual(log, [])
EventLoop.flush()
self.assertEqual(log, [99, DummyError,DummyError, types.TracebackType])
def testResumeOnlyOnceUntilFlushed(self):
log = []
c1 = trellis.Value(1)
c2 = trellis.Value(2)
def f():
for i in range(3):
c1.value, c2.value
log.append(i)
yield activity.Pause
t = activity.TaskCell(f)
self.assertEqual(log, [])
EventLoop.flush()
self.assertEqual(log, [0])
c1.value = 3
self.assertEqual(log, [0])
c2.value = 4
EventLoop.flush()
self.assertEqual(log, [0, 1])
def additional_tests():
import doctest, sys
files = [
'README.txt', 'STM-Observer.txt', 'Activity.txt', 'Collections.txt',
'Internals.txt',
][(sys.version<'2.4')*3:] # All but Internals+Collections use decorator syntax
try:
from sqlalchemy.orm.attributes import ClassManager
except ImportError:
pass
else:
files.insert(0, 'SQLAlchemy.txt')
return doctest.DocFileSuite(
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE, *files
)
| [
"tomjoy002@gmail.com"
] | tomjoy002@gmail.com |
d2fd857768b784b5d412fcfde44b925623531940 | 62d2d16c3042c4c3737f02a7b0a5a23961fc3bc3 | /exers/notas.py | 1b0fb4137568709465ba2b6ed78b072308c164be | [] | no_license | Jose-Humberto-07/pythonFaculdade | 9e3d196a2b27ed34b182519db7f0e0b0a3ac6be2 | e7a03cca421c8656b3169dfc8fe5ac5973e21176 | refs/heads/main | 2023-05-26T05:03:19.838093 | 2021-06-03T00:53:22 | 2021-06-03T00:53:22 | 369,333,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py |
#funcao
def media(ap1, ap2):
m = (ap1 + ap2) / 2
return m
nome = []
ap1 = []
ap2 = []
print("===============controle de notas=============")
#=[0,1,2,3,4,5]
for c in range(3):
print("Qual o nome do ",(c+1),"° aluno? ")
nome.append(input())
print("Qual a nota AP1 do " + nome[c])
ap1.append(float(input()))
print("Qual a nota AP2 do " + nome[c])
ap2.append(float(input()))
print()
print("-------------------------------------------------------")
print()
print("===============================================================")
for c in range(3):
media = media(ap1[c], ap2[c])
print("Nome: " + nome[c])
print("Primeira nota (AP1): " , ap1[c])
print("Segunda nota (AP2): " , ap2[c])
print("Média: " , media)
| [
"jhpnascimento96@gmail.com"
] | jhpnascimento96@gmail.com |
6751b2b46e1d04b8b4096859890d818f7342a742 | 60cc5a46e6b48716ee734526cdde74a993921a88 | /pysignup/backends/memory.py | 279145f454fe6d85579ee27a41d12d74b4d8db08 | [] | no_license | pietvanzoen/signup | fc65e4a26301e7fd7dc28af328488464b733fff2 | 80c62cb1841b83f439b547add758ae9ccaddd00d | refs/heads/master | 2021-01-16T19:31:06.049713 | 2013-10-08T20:22:18 | 2013-10-08T20:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | from collections import OrderedDict
from . import base
class MemoryStore(type):
def __new__(cls, name, bases, namespace, **kwargs):
result = type.__new__(cls, name, bases, namespace)
result._store = OrderedDict()
return result
class MemoryModelMixin(metaclass=MemoryStore):
@classmethod
def get(cls, id=None):
if id is None:
return cls._store.values()
elif id in cls._store:
return cls._store[id]
else:
raise base.NotFound("{} not found".format(id))
@classmethod
def put(cls, *args, **kwargs):
kwargs['id'] = len(cls._store) + 1
self = super().put(*args, **kwargs)
cls._store[id] = self
return self
class Schedule(MemoryModelMixin, base.Schedule):
pass
class ScheduleDate(MemoryModelMixin, base.ScheduleDate):
pass
class ScheduleSignup(MemoryModelMixin, base.ScheduleSignup):
pass
| [
"m@schmichael.com"
] | m@schmichael.com |
077349d5fcd423d4c28bef9cf4945a2d36546d58 | 0b36cff195d540e96f50f87c2b984235d958905c | /input_file_with_xy.py | 112ea05be72c8c61be1a1e99aa0d8f2640760816 | [] | no_license | JeanLouis1606/premiers_scripts_python | a41adea7b5c6d01c3d7538e2e3039e034bd15f4b | c0b348f736990900ff5596e295a350265ecf1741 | refs/heads/master | 2020-08-02T07:03:57.755530 | 2019-09-27T14:10:07 | 2019-09-27T14:10:07 | 211,271,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | with open("/usr/share/dict/words") as inputfile:
for line in inputfile:
if len(line)<15:
if (line[0] == 'x' and line[1] == 'y'):
line = line.strip()
print(line)
| [
"jeanlouisfuccellaro@MacBook-Pro-de-jl-2.local"
] | jeanlouisfuccellaro@MacBook-Pro-de-jl-2.local |
0e9d83f11f89926b5b0ac3d2b057dcf7f4fed023 | 4817b6eea29d9d952ef91899efee23becd741757 | /ryu/app/network_awareness3/shortest_forwarding.py | b60beb9a8db1ce446fdc203f2b65f082d326ae88 | [
"Apache-2.0"
] | permissive | zspcchinu/Optimal-routing-using-RL-with-SDN | 153eec7aae93e852278e02e4bac39a6fa9be4a8a | b7993c7f328ab6bafdcb6dab56f4ecd83ef0fe14 | refs/heads/master | 2022-06-17T21:26:41.293024 | 2020-05-17T03:42:16 | 2020-05-17T03:42:16 | 264,542,516 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 14,357 | py | from __future__ import absolute_import
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# conding=utf-8
from builtins import range
import logging
import struct
import networkx as nx
from operator import attrgetter
from ryu import cfg
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import arp
from ryu.lib import hub
import numpy as np
import random
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
from . import network_awareness
from . import network_monitor
from . import network_delay_detector
from . import setting
from ryu.app import simple_switch_13
from ryu.app.rl_module.network_RLModule import network_RLModule
CONF = cfg.CONF
class ShortestForwarding(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
"network_awareness": network_awareness.NetworkAwareness,
"network_monitor": network_monitor.NetworkMonitor,
"network_delay_detector": network_delay_detector.NetworkDelayDetector}
WEIGHT_MODEL = {'hop': 'weight', 'delay': "delay", "bw": "bw"}
def __init__(self, *args, **kwargs):
super(ShortestForwarding, self).__init__(*args, **kwargs)
self.name = 'shortest_forwarding'
self.awareness = kwargs["network_awareness"]
self.monitor = kwargs["network_monitor"]
self.delay_detector = kwargs["network_delay_detector"]
self.mac_to_port = {}
self.datapaths = {}
self.rl = None
self.weight = self.WEIGHT_MODEL[CONF.weight]
self.rl_start_thread = hub.spawn(self._start_rl)
def _start_rl(self):
self.rl = network_RLModule(self.awareness)
def set_weight_mode(self, weight):
self.weight = weight
if self.weight == self.WEIGHT_MODEL['hop']:
self.awareness.get_shortest_paths(weight=self.weight)
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def add_flow(self, dp, p, match, actions, idle_timeout=0, hard_timeout=0):
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=p,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
def send_flow_mod(self, datapath, flow_info, src_port, dst_port):
parser = datapath.ofproto_parser
actions = []
actions.append(parser.OFPActionOutput(dst_port))
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
self.add_flow(datapath, 1, match, actions,
idle_timeout=15, hard_timeout=60)
def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
actions = []
if dst_port:
actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))
msg_data = None
if buffer_id == datapath.ofproto.OFP_NO_BUFFER:
if data is None:
return None
msg_data = data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=buffer_id,
data=msg_data, in_port=src_port, actions=actions)
return out
def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
out = self._build_packet_out(datapath, buffer_id,
src_port, dst_port, data)
if out:
datapath.send_msg(out)
def get_port(self, dst_ip, access_table):
# access_table: {(sw,port) :(ip, mac)}
if access_table:
if isinstance(list(access_table.values())[0], tuple):
for key in list(access_table.keys()):
if dst_ip == access_table[key][0]:
dst_port = key[1]
return dst_port
return None
def get_link_to_port(self, link_to_port, src_dpid, dst_dpid):
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("dpid:%s->dpid:%s is not in links" % (
src_dpid, dst_dpid))
return None
def flood(self, msg):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
for dpid in self.awareness.access_ports:
for port in self.awareness.access_ports[dpid]:
if (dpid, port) not in list(self.awareness.access_table.keys()):
datapath = self.datapaths[dpid]
out = self._build_packet_out(
datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER, port, msg.data)
datapath.send_msg(out)
self.logger.debug("Flooding msg")
def arp_forwarding(self, msg, src_ip, dst_ip):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
result = self.awareness.get_host_location(dst_ip)
if result: # host record in access table.
datapath_dst, out_port = result[0], result[1]
datapath = self.datapaths[datapath_dst]
out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER,
out_port, msg.data)
datapath.send_msg(out)
self.logger.debug("Reply ARP to knew host")
else:
self.flood(msg)
def get_path(self, src, dst, weight):
shortest_paths = self.awareness.shortest_paths
graph = self.awareness.graph
return shortest_paths.get(src).get(dst)[0]
if weight == self.WEIGHT_MODEL['hop']:
return shortest_paths.get(src).get(dst)[0]
elif weight == self.WEIGHT_MODEL['delay']:
# If paths existed, return it, else figure it out and save it.
try:
paths = shortest_paths.get(src).get(dst)
return paths[0]
except:
paths = self.awareness.k_shortest_paths(graph, src, dst,
weight=weight)
shortest_paths.setdefault(src, {})
shortest_paths[src].setdefault(dst, paths)
return paths[0]
elif weight == self.WEIGHT_MODEL['bw']:
result = self.monitor.get_best_path_by_bw(graph, shortest_paths)
paths = result[1]
best_path = paths.get(src).get(dst)
return best_path
def get_sw(self, dpid, in_port, src, dst):
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst)
if dst_location:
dst_sw = dst_location[0]
return src_sw, dst_sw
def install_flow(self, datapaths, link_to_port, access_table, path,
flow_info, buffer_id, data=None):
''' path=[dpid1, dpid2...]
flow_info=(eth_type, src_ip, dst_ip, in_port)
'''
if path is None or len(path) == 0:
self.logger.info("Path error!")
return
in_port = flow_info[3]
first_dp = datapaths[path[0]]
out_port = first_dp.ofproto.OFPP_LOCAL
back_info = (flow_info[0], flow_info[2], flow_info[1])
# inter_link
if len(path) > 2:
for i in range(1, len(path)-1):
port = self.get_link_to_port(link_to_port, path[i-1], path[i])
port_next = self.get_link_to_port(link_to_port,
path[i], path[i+1])
if port and port_next:
src_port, dst_port = port[1], port_next[0]
datapath = datapaths[path[i]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port)
self.logger.debug("inter_link flow install")
if len(path) > 1:
# the last flow entry: tor -> host
port_pair = self.get_link_to_port(link_to_port, path[-2], path[-1])
if port_pair is None:
self.logger.info("Port is not found")
return
src_port = port_pair[1]
dst_port = self.get_port(flow_info[2], access_table)
if dst_port is None:
self.logger.info("Last port is not found.")
return
last_dp = datapaths[path[-1]]
self.send_flow_mod(last_dp, flow_info, src_port, dst_port)
self.send_flow_mod(last_dp, back_info, dst_port, src_port)
# the first flow entry
port_pair = self.get_link_to_port(link_to_port, path[0], path[1])
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
out_port = port_pair[0]
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
self.send_flow_mod(first_dp, back_info, out_port, in_port)
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
# src and dst on the same datapath
else:
out_port = self.get_port(flow_info[2], access_table)
if out_port is None:
self.logger.info("Out_port is None in same dp")
return
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
self.send_flow_mod(first_dp, back_info, out_port, in_port)
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
result = self.get_sw(datapath.id, in_port, ip_src, ip_dst)
#print("ip src adn ip dst:", ip_src, ip_dst)
if result:
src_sw, dst_sw = result[0], result[1]
if dst_sw:
path = []
if setting.ALGORITHM is setting.RL:
path = self.get_rl_path(src_sw, dst_sw, weight=self.weight)
self.logger.info("RL path is between switches %s to %s: is %s" % (src_sw, dst_sw,path))
elif setting.ALGORITHM is setting.SHORTEST_PATH:
path = self.get_path(src_sw, dst_sw, weight=self.weight)
self.logger.info("Ryu PATH %s<-->%s: %s" % (ip_src, ip_dst, path))
else:
self.logger.error("Unknown Algorithm for path calculation")
#print("RL path between src and dest is", path)
flow_info = (eth_type, ip_src, ip_dst, in_port)
self.install_flow(self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, path,
flow_info, msg.buffer_id, msg.data)
return
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
'''
In packet_in handler, we need to learn access_table by ARP.
Therefore, the first packet from UNKOWN host MUST be ARP.
'''
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
arp_pkt = pkt.get_protocol(arp.arp)
ip_pkt = pkt.get_protocol(ipv4.ipv4)
if isinstance(arp_pkt, arp.arp):
self.logger.debug("ARP processing")
self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip)
arp_src_ip = arp_pkt.src_ip
arp_dst_ip = arp_pkt.dst_ip
mac = arp_pkt.src_mac
# record the access info
self.awareness.register_access_info(datapath.id, in_port, arp_src_ip, mac)
if isinstance(ip_pkt, ipv4.ipv4):
self.logger.debug("IPV4 processing")
if len(pkt.get_protocols(ethernet.ethernet)):
eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype
self.shortest_forwarding(msg, eth_type, ip_pkt.src, ip_pkt.dst)
def get_rl_path(self, src, dst, weight):
return self.rl.rl_optimal_path(src, dst)
| [
"chithambaram.singaravelupoonkodi@sjsu.edu"
] | chithambaram.singaravelupoonkodi@sjsu.edu |
d9bdb178ecc13cd0d02f628d51c3fc104d950945 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/power_of_three.py | 0c90784597ced25c72515a818f2ab265938bf1d4 | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 862 | py | # https://leetcode.com/problems/power-of-three/
"""
Given an integer n, return true if it is a power of three. Otherwise, return false.
An integer n is a power of three, if there exists an integer x such that n == 3x.
Example 1:
Input: n = 27
Output: true
Example 2:
Input: n = 0
Output: false
Example 3:
Input: n = 9
Output: true
Constraints:
-231 <= n <= 231 - 1
Follow up: Could you solve it without loops/recursion?
"""
from math import log
def is_power_of_three(n: int) -> bool:
if n <= 0:
return False
val = round(log(n, 3))
return 3**val == n
def is_power_of_three(n: int) -> bool:
def helper(n: int):
if n <= 0:
return False
if n == 1:
return True
div, residual = divmod(n, 3)
if residual:
return False
return helper(div)
return helper(n)
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
04543567fbdf013acee68970b2130008939f9517 | 29ff0f91f49117d214f6f776b59a2769ad5b38d5 | /tests/test_adapters.py | e260ecfa2d316ea75354eb7e2626740e65f0aa78 | [
"MIT"
] | permissive | caioraposo/uceasy | 5f770595f99e847cff75a99f82cb78196857f981 | 921103f64c69c31aa6fc05fdf474c17a575a5525 | refs/heads/master | 2021-03-13T10:17:26.033997 | 2020-04-13T19:37:49 | 2020-04-13T19:37:49 | 246,669,260 | 0 | 0 | MIT | 2020-03-11T20:05:53 | 2020-03-11T20:05:53 | null | UTF-8 | Python | false | false | 494 | py | from uceasy.adapters import ADAPTERS
def test_illumiprocessor_help():
cmd = ADAPTERS["illumiprocessor"](["--help"], capture_output=True)
assert "usage: illumiprocessor" in cmd[0]
def test_trinity_help():
cmd = ADAPTERS["trinity"](["--help"], capture_output=True)
assert "usage: phyluce_assembly_assemblo_trinity" in cmd[0]
def test_spades_help():
cmd = ADAPTERS["spades"](["--help"], capture_output=True)
assert "usage: phyluce_assembly_assemblo_spades" in cmd[0]
| [
"caioraposo@protonmail.com"
] | caioraposo@protonmail.com |
b335b27973d0716c0d68c2be7d60cf3fe2f5edf8 | 87270a041c6acb4d0a8926fef53d73259a59011c | /examen2CDI.py | 5f995d46bcb6d230e3eb486016f13fb83485c10e | [] | no_license | marcazu/CDI-FIB | c83b16ccbcde5bc40833cdba6b3dcdabac07f6c3 | c5df3c5279437d992877bd535d361bda312cecac | refs/heads/main | 2023-01-06T10:58:25.281409 | 2020-11-15T18:38:08 | 2020-11-15T18:38:08 | 313,098,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,023 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 17:53:30 2020
@author: Marc
"""
import numpy as np
from scipy import misc
from math import sqrt,log2,log
import matplotlib.pyplot as plt
from PIL import Image
import pywt
"-----------------EXERCICI PREFIJO-----------------"
def prefix_code(lst):
output = []
sumatorio = 0
for i in lst:
sumatorio += 2**(-i)
if(sumatorio > 1):
return output
else:
for index, i in enumerate(lst):
if(index is 0):
output.append(bin(0)[2:].zfill(i))
else:
for j in range(0, 2**i):
prefix = False
aux = bin(j)[2:].zfill(i)
for binword in output:
prefix = ((aux.startswith(binword)) or (binword.startswith(aux)))
if prefix :
break
if not prefix:
output.append(aux)
break
return output
prova = [3,3,3,5,5,6,6,7,8,8,9,9,9]
binarywords = prefix_code(prova)
if not binarywords:
print ("no")
else:
print("yes")
[print(binword)for binword in binarywords]
"-----------------EXERCICI code LZ77-----------------"
def LZ77Code(mensaje,S=12,W=18):
code=[[0,0,mensaje[0]]]
mydict=[[0,0,mensaje[0]]]
i=1#donde estamos leyendo carácteres
ahead=W-S
lookahead=mensaje[1:1+ahead]
old=str(mensaje[max(0,i-S-1):max(0,i)])
while i < len(mensaje):
offset=0
length=0
char=lookahead[0]
window = old+lookahead
#miramos matches
for j in range(len(old)-1,-1,-1):
if old[j] == lookahead[0]:
#tenemos algun match
match=True
izq=j+1
der=len(old)+1
maxlen=1
#longest prefix match
while match and der <len(window):
if window[izq] == window[der]:
izq+=1
der+=1
maxlen+=1
else:
match=False
#extendemos carácteres extra
if maxlen> length :
offset= len(old) -j
length= maxlen
try :
char= window[der]
except:
try:
char= window[i+length]
except:
char=window[der-1]
length -=1
if length == 0:
offset=0
code=code+[[offset,length,char]]
i += length+1
old=str(mensaje[max(0,i-S):i])
lookahead= str(mensaje[i:ahead+i])
code[-1]=[code[-1][0],code[-1][1]+1,'EOF']
return code
mensaje = 'abcdeabaebbadab'
code = LZ77Code(mensaje, 12, 18)
print(code)
, 0.0128, 0.016, , 0.0051, 0.0102, ]
"-----------------EXERCICI decode LZ77-----------------"
def LZ77Decode(codigo):
mensaje=''
for i in codigo:
if i[0] != 0:
pos=len(mensaje)-i[0]
word=mensaje[pos:pos+ i[1]]
extension= ""
mensaje += word
if i[0] <= i[1]:#debemos extender el último simbolo
mensaje += mensaje[i[0]+1:i[1]+1]
mensaje+= i[2]
return mensaje[:-3]
def LZ77Decode(codigo):
mensaje=''
for i in codigo:
if i[0] != 0:
pos=len(mensaje) - i[0]
word=mensaje[pos:pos+ i[1]]
extension= ""
mensaje += word
if i[0] <= i[1]:#debemos extender el último simbolo
mensaje += mensaje[i[0]+1:i[1]+1]
mensaje+= i[2]
return mensaje[:-3]
def LZ78Decode(codigo):
mensaje=''
diccionario=[]
n=len(codigo)
for i in range(n-1):
indice=codigo[i][0]
letra=codigo[i][1]
if indice==0:
mensaje+=letra
diccionario+=[letra]
else:
palabra=diccionario[indice-1]+letra
mensaje+=palabra
diccionario+=[palabra]
indice=codigo[n-1][0]
letra=codigo[n-1][1]
if indice>0:
palabra=diccionario[indice-1]
mensaje+=palabra
return mensaje, diccionario
"-----------------EXERCICI quins son codis-----------------"
def prefix_code(code):
for binword in code:
for aux in code:
if(binword !=aux):
prefix = ((aux.startswith(binword)) or (binword.startswith(aux)))
if(prefix):
return ("no")
return ("yes")
code = ['00','11','001','111','0111', '01111','10000']
print(prefix_code(code))
"-----------------EXERCICI MATRIU ORTOGONAL-----------------"
"S'ha de mirar quina matriu multiplicada per la transposada dona algo rollo [1,0,0][0,1,0][0,0,1]
matrix1=[[2/3,2/3,1/3],
[-(sqrt(2))/2,(sqrt(2))/2,0],
[-(sqrt(2))/6,-(sqrt(2))/6,2*(sqrt(2))/3]]
matrix2=[[2/3,2/3,1/3],
[(sqrt(2))/2,(sqrt(2))/2,(sqrt(2))/4],
[-(sqrt(2))/6,-(sqrt(2))/6,2*(sqrt(2))/3]]
matrix3= [[2,2,1],
[4,4,2],
[-2,-2,8]]
matrix4 = [[2,2,1],
[-2,2,0],
[-2,-2,8]]
"fer aixo x cada 1"
mat = np.asarray(matrix1)
"print(mat.transpose())"
mat1T=(np.dot(mat,mat.transpose()))
mat = np.asarray(matrix2)
"print(mat.transpose())"
mat2T=(np.dot(mat,mat.transpose()))
mat = np.asarray(matrix3)
"print(mat.transpose())"
mat3T=(np.dot(mat,mat.transpose()))
mat = np.asarray(matrix4)
"print(mat.transpose())"
mat4T=(np.dot(mat,mat.transpose()))
"*---------------------------------------------------------------------------*"
"-----------------EXERCICI RATIO D COMPRESSIÖ-----------------"
def ratio_de_compression(pixeles,escala,entradas,pixelesB):
num = pixeles*pixeles*log2(escala)
den = (pixeles/pixelesB)*(pixeles/pixelesB)*log2(entradas) + entradas*pixelesB*pixelesB*log2(escala)
return num/den
ratio_de_compression(512,128,128,4)
"-----------------EXERCICI WAVELET-----------------"
l2 = [0.28,0.8481,0.4271,-0.141]
l3= [-0.32,0.2481,-0.1729,-0.741]
l = [0.28,0.8481,0.4271,-0.141]
suman = 0
suma2 = 0
for i in l:
suman += i
suma2 += (i**2)
print(suman,"=", sqrt(2))
print("1 =",suma2)
print((l[2]*l[0])+(l[3]*l[1]))
"-----------------EXERCICI BLOC DE COLORS-----------------"
def idc_bloque(p):
"Substituir x valor de l'exercici"
c = [[0.0, -0.5773502691896257, 0.8164965809277261, 0.0],
[0.0, -0.5773502691896257, -0.40824829046386313, -0.7071067811865475],
[0.0, -0.5773502691896257, -0.408248290463863, 0.7071067811865477],
[1.0, 0.0, 0.0, 0.0]]
ct = np.transpose(c)
return (np.tensordot(np.tensordot(ct,p,axes=([1],[0])), c, axes = ([1],[0]))).reshape(-1)
fig = plt.figure()
array = np.zeros((4,4))
array = array.astype(int)
for i in range(4):
for j in range(4):
array[i][j] = 1
m = idc_bloque(array)
fig.add_subplot(4,4,i*4+j+1).axis('off')
plt.imshow(m.reshape((4,4)))
array[i][j] = 0
def LZ78Decode(codigo):
mensaje=''
diccionario=[]
n=len(codigo)
for i in range(n-1):
indice=codigo[i][0]
letra=codigo[i][1]
if indice==0:
mensaje+=letra
diccionario+=[letra]
else:
palabra=diccionario[indice-1]+letra
mensaje+=palabra
diccionario+=[palabra]
indice=codigo[n-1][0]
letra=codigo[n-1][1]
if indice>0:
palabra=diccionario[indice-1]
mensaje+=palabra
return mensaje, diccionario | [
"95marc@gmail.com"
] | 95marc@gmail.com |
9f59f49f87adf975eba07fe96fc5a30a73485cc8 | fc678a0a5ede80f593a29ea8f43911236ed1b862 | /77-Combinations.py | 91ae53a9008ceabe92c1973817d59e67f8a8c2c3 | [] | no_license | dq-code/leetcode | 4be0b1b154f8467aa0c07e08b5e0b6bd93863e62 | 14dcf9029486283b5e4685d95ebfe9979ade03c3 | refs/heads/master | 2020-12-13T15:57:30.171516 | 2017-11-07T17:43:19 | 2017-11-07T17:43:19 | 35,846,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
def helper(comb, start, k):
if k == 0:
res.append(comb)
return
for i in range(start, n + 1):
if n - start + 1 >= k:
helper(comb + [i], i + 1, k - 1)
res = []
helper([], 1, k)
return res
| [
"qiandeng@Qians-iMac.local"
] | qiandeng@Qians-iMac.local |
225d48e80f10f03bb995d8fb1ba7892453a63f12 | 3a3529f566957d8d79afc7d1ebe533fba239ab7c | /forum/urls.py | 3af8ed2a2bc3f64f573748ac7ea101fcff412b82 | [] | no_license | zhufree/sample | 9dc4f3aef86322487b9f252163d8b17937651ee7 | ba765b5e0a91e53d179f3593c0578a31e2bddfd8 | refs/heads/master | 2021-10-28T18:49:27.091127 | 2021-10-21T02:06:35 | 2021-10-21T02:06:35 | 41,006,994 | 30 | 5 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | __author__ = 'zhufree'
from django.conf.urls import url
from .views import *
urlpatterns = [
# Examples:
url(r'^$', index),
url(r'^p/(?P<id>\d+)/$', single_post),
url(r'^topic/(?P<id>\d+)/$', show_topic),
url(r'^post/$', post),
]
| [
"zhufree2013@gmail.com"
] | zhufree2013@gmail.com |
ffed5598e099bdd416d547810817ad878dfc91b7 | 1ad482ad03c0241cc39067c47d5b046dd8371fa9 | /core/migrations/0009_auto_20210319_2041.py | 7333b546d4720a4839edcd38dc03bc34a4667bec | [] | no_license | momentum-team-7/django-habit-tracker-drehan7 | aa421911434d0d548de232cb2f12f4ac11ddf509 | 471950de16de47fea0a020809e98d5f8abdaceb1 | refs/heads/main | 2023-03-31T11:33:00.586816 | 2021-03-27T18:55:27 | 2021-03-27T18:55:27 | 348,402,252 | 0 | 0 | null | 2021-03-22T15:49:06 | 2021-03-16T15:39:35 | JavaScript | UTF-8 | Python | false | false | 506 | py | # Generated by Django 3.1.7 on 2021-03-19 20:41
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20210319_2019'),
]
operations = [
migrations.AlterField(
model_name='habitlog',
name='date',
field=models.DateField(default=datetime.datetime(2021, 3, 19, 20, 41, 1, 263174, tzinfo=utc), unique=True),
),
]
| [
"d.rehan7@gmail.com"
] | d.rehan7@gmail.com |
1ba71e9765761905566094b0343c991430cf2236 | 2bcbd13a1d91ada88ec490de767f7e4cb01d6232 | /reading file.py | b48a01dd7da758360e07d7f6b2906043fae49ec3 | [] | no_license | Naveen131298/hackertest | 406484933418489940ebb305700f16d8f39aa685 | 5b5a34ba6f779b564279248ce73f3a2a58e89f57 | refs/heads/master | 2020-12-05T22:30:35.424802 | 2020-10-11T18:16:02 | 2020-10-11T18:16:02 | 232,261,603 | 0 | 1 | null | 2020-10-11T18:16:03 | 2020-01-07T06:50:13 | Python | UTF-8 | Python | false | false | 215 | py | import os
import glob
def read_pdf(path):
for pdf_file in glob.glob(path + '/*.pdf'):
print(pdf_file)
pdf_location = os.path.join(os.getcwd())
read_pdf(pdf_location)
print(os.path.join(os.getcwd()))
| [
"naveenmurugan72@gmail.com"
] | naveenmurugan72@gmail.com |
799ae55b2b7a4557348b168f0a3fc74d923f5fd4 | 2cd0a84aefb8a7141d1c8da99845a8ada0cc009c | /tensorflow/python/ops/rnn_cell.py | 9aa2314e5e65b02c0d4f7ee1661b77200ea50ef1 | [
"Apache-2.0"
] | permissive | hholst80/tensorflow-old | d466cee96eac717524ab8e4ee85275ce28bb5d68 | 79df325975402e03df89747947ff5b7f18407c52 | refs/heads/master | 2022-12-20T22:07:40.427519 | 2016-05-13T09:57:24 | 2016-05-13T09:57:24 | 58,914,336 | 1 | 1 | Apache-2.0 | 2022-12-09T21:52:14 | 2016-05-16T08:00:04 | C++ | UTF-8 | Python | false | false | 26,838 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
class RNNCell(object):
"""Abstract object representing an RNN cell.
An RNN cell, in the most abstract setting, is anything that has
a state -- a vector of floats of size self.state_size -- and performs some
operation that takes inputs of size self.input_size. This operation
results in an output of size self.output_size and a new state.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by a super-class, MultiRNNCell,
defined later. Every RNNCell must have the properties below and and
implement __call__ with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: 2D Tensor with shape [batch_size x self.input_size].
state: 2D Tensor with shape [batch_size x self.state_size].
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A 2D Tensor with shape [batch_size x self.output_size]
- New state: A 2D Tensor with shape [batch_size x self.state_size].
"""
raise NotImplementedError("Abstract method")
@property
def input_size(self):
"""Integer: size of inputs accepted by this cell."""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""Integer: size of state used by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return state tensor (shape [batch_size x state_size]) filled with 0.
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A 2D Tensor of shape [batch_size x state_size] filled with zeros.
"""
zeros = array_ops.zeros(
array_ops.pack([batch_size, self.state_size]), dtype=dtype)
zeros.set_shape([None, self.state_size])
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None):
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = tanh(linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None):
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = tanh(linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: int, The dimensionality of the inputs into the LSTM cell,
by default equal to num_units.
"""
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
self._forget_bias = forget_bias
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return 2 * self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = array_ops.split(1, 2, state)
concat = linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
new_h = tanh(new_c) * sigmoid(o)
return new_h, array_ops.concat(1, [new_c, new_h])
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
It uses peep-hole connections, optional cell clipping, and an optional
projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1, forget_bias=1.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: int, The dimensionality of the inputs into the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of the training.
"""
self._num_units = num_units
self._input_size = input_size
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
if num_proj:
self._state_size = num_units + num_proj
self._output_size = num_proj
else:
self._state_size = 2 * num_units
self._output_size = num_units
@property
def input_size(self):
return self._num_units if self._input_size is None else self._input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
actual_input_size = inputs.get_shape().as_list()[1]
if self._input_size and self._input_size != actual_input_size:
raise ValueError("Actual input size not same as specified: %d vs %d." %
(actual_input_size, self._input_size))
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
return m, array_ops.concat(1, [c, m])
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, input_size):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
input_size: integer, the size of the inputs before projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if input_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if input_size < 1:
raise ValueError("Parameter input_size must be > 0: %d." % input_size)
self._cell = cell
self._input_size = input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = linear(inputs, self._cell.input_size, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def input_size(self):
return 1
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
embedding = vs.get_variable("embedding", [self._embedding_classes,
self._embedding_size],
initializer=initializer)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
Raises:
ValueError: if cells is empty (not allowed) or if their sizes don't match.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for i in xrange(len(cells) - 1):
if cells[i + 1].input_size != cells[i].output_size:
raise ValueError("In MultiRNNCell, the input size of each next"
" cell must match the output size of the previous one."
" Mismatched output size in cell %d." % i)
self._cells = cells
@property
def input_size(self):
return self._cells[0].input_size
@property
def output_size(self):
return self._cells[-1].output_size
@property
def state_size(self):
return sum([cell.state_size for cell in self._cells])
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, array_ops.concat(1, new_states)
class SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
_, init_state = self._cell_fn(None, None)
state_shape = init_state.get_shape()
self._state_size = state_shape.with_rank(2)[1].value
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s",
self._cell_name, state_shape)
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError("`args` must be specified")
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
initializer=init_ops.constant_initializer(bias_start))
return res + bias_term
| [
"henrik.holst@frostbite.com"
] | henrik.holst@frostbite.com |
f44f7184c5e26e6031fc36a3813d8d0f6e48eb80 | 8e2474e41b37a54774610fa7519d6000d8fb01d8 | /application/migrations/0004_auto_20170417_2205.py | c3402c87417ffe72d634a2f5884ff0600c1a7637 | [] | no_license | dezdem0na/make-an-appointment | 446548b911b6f8960e6afee204a5be2f0a519329 | 1695f4544c668c84ba02b3723ff2925b74c2f8e3 | refs/heads/master | 2021-01-19T23:16:23.506988 | 2017-04-23T12:58:45 | 2017-04-23T12:58:45 | 88,956,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-17 19:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0003_auto_20170417_2204'),
]
operations = [
migrations.AlterField(
model_name='application',
name='patient_name_first',
field=models.CharField(max_length=255, verbose_name='Имя'),
),
migrations.AlterField(
model_name='application',
name='patient_name_last',
field=models.CharField(max_length=255, verbose_name='Фамилия'),
),
migrations.AlterField(
model_name='application',
name='patient_name_middle',
field=models.CharField(blank=True, max_length=255, verbose_name='Отчество'),
),
]
| [
"natalie.reshetnikova@gmail.com"
] | natalie.reshetnikova@gmail.com |
ed3cb703c2428ed63dfa7f758269be8f2bb0a7af | 885c1cab7931d010b6711af652c9a79e2f7490c2 | /MyDjangoProjects/mydjangosite/mydjangosite/wsgi.py | a5dfbab0db7f5cb9860a1848e33e3de5a5df1a1b | [] | no_license | shreyash14s/Manjo | 3f1c11f7234cd12783c5e60a05cbf3f5ae9ca21d | b0ea4e3ef31d7853a8288e06a84bf556c4908d63 | refs/heads/master | 2021-01-17T21:56:38.905412 | 2015-08-15T17:45:39 | 2015-08-15T17:45:39 | 40,665,170 | 0 | 1 | null | 2015-08-13T15:05:27 | 2015-08-13T15:05:27 | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for mydjangosite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mydjangosite.settings")
application = get_wsgi_application()
| [
"sanjay.india96@gmail.com"
] | sanjay.india96@gmail.com |
360e98c144781868bcbc8c5a13d5f42085348077 | 066d05a826430a05fc1c333bd988ef702a8ed364 | /lesson3/ex4.py | 9835c43e2cca47438c789a51671c6be04f77085c | [] | no_license | se7entyse7en/learning-tensorflow | 4a5693ea49cd5a88add241301b565d672f28a318 | 1377739d54998c773e90b30dd57f3a407e19e245 | refs/heads/master | 2021-01-12T15:26:34.812952 | 2016-11-10T22:02:25 | 2016-11-10T22:02:25 | 71,783,274 | 1 | 1 | null | 2019-05-11T23:24:10 | 2016-10-24T11:45:52 | Jupyter Notebook | UTF-8 | Python | false | false | 853 | py | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
filename = 'MarshOrchid.jpg'
image = mpimg.imread(filename)
x = tf.Variable(image, name='x')
model = tf.initialize_all_variables()
with tf.Session() as session:
session.run(model)
shape = tf.shape(x)
height, width, depth = session.run(shape)
# Slice the left half
left_sliced = tf.slice(x, [0, 0, 0], [height, int(width / 2), depth])
# Mirror pixels along the vertical axis of the left half
left_mirrored_sliced = tf.reverse_sequence(
left_sliced, np.ones(height) * int(width / 2), 1, batch_dim=0)
# Paste the two slices to obtain the left half mirrored on the right half
pasted = tf.concat(1, [left_sliced, left_mirrored_sliced])
result = session.run(pasted)
plt.imshow(result)
plt.show()
| [
"loumarvincaraig@gmail.com"
] | loumarvincaraig@gmail.com |
c39a87caae3620cf991bb70f79a8ef12cfc44fbe | 7fda36e97edc2fbcf8ad2fc01ea070f881e756d2 | /tuts100_exercise11.py | cb42f99394e09547e422659a6eb8794f6e8a0eee | [] | no_license | RashadGhzi/My-Repository | 6b386521cea3b40ce0be065ca53b2bd633b36197 | 91f5897bdfb869a0d30f49693b87ed1244488e3d | refs/heads/main | 2023-08-16T07:34:24.357667 | 2021-10-12T11:44:46 | 2021-10-12T11:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import re
str = '''Email: northamerica@tata.com
email = rashadarsh78@gmail.com
priya@yahoo.com
email = meeting @2pm
Website: www.northamerica.tata.com
shubhamg199630@gmail.com
harrygoodboy@gamil.com
Directions: View map fass
indian no. +91 5588-940010
harry bhai lekin
indian no. ++91 5588-000000'''
item = re.compile(r'\S+@\S+')
item_1 = item.findall(str)
print(item_1)
i = 1
for email in item_1:
with open('harry_larry.txt', 'a') as file:
file.write(f'Email_{i}:{email}\n\n')
i += 1 | [
"rashadarsh0@gmail.com"
] | rashadarsh0@gmail.com |
c8327228eb9f84c7971a01151d5d026d74f669aa | 929d9121a74f3167e620bf4f2c9073f84e57d422 | /mysite/mainapp/forms.py | b7cb0e2cd653d5d80b50e5bea1012688afae1a98 | [] | no_license | zoriana-melnyk/HealthyEating | f7c91caa82830c112ca132fef6d172d867eb3868 | 4fea7ab1e7a5a8438c9d57288dd3c294a4a06bff | refs/heads/master | 2023-04-21T07:54:56.288872 | 2021-05-11T22:29:24 | 2021-05-11T22:29:24 | 333,217,246 | 0 | 0 | null | 2021-05-11T22:29:25 | 2021-01-26T21:05:50 | Python | UTF-8 | Python | false | false | 302 | py | from django.forms import ModelForm
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2'] | [
"48826458+zoriana-melnyk@users.noreply.github.com"
] | 48826458+zoriana-melnyk@users.noreply.github.com |
425ccb10cc834c44b5fad2c6f6259a5ce46223e7 | 3babd5887c70ff85a2254882c36e35b127a8905d | /dbhandler.py | 756e6cefac6a212f9c4f414705806bf6d4b51364 | [] | no_license | angyay0/demo-devops-users | 1109e443fbaf087e9e632e8902bfcb968bb0cc34 | f170ae65358993a48f6a073895ca2585fa087ba1 | refs/heads/master | 2022-12-02T06:09:55.470309 | 2020-08-13T21:36:10 | 2020-08-13T21:36:10 | 287,142,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from userapi.models import LoginResponse
import psycopg2
import json
#Load and return json as dict
def ReadFromJson():
with open("db.json") as file:
return (json.load(file))
#Database operations to retrieve and save users
class DBHandler:
def __init__(self):
self.credentials = ReadFromJson()
self.connection = None
def connect(self):
try:
#Connect
return True
except:
return False
def hasAccess(self, data):
try:
print (data)
return LoginResponse("Valid-Token","Angel","angyay0")
except:
return LoginResponse("","","")
def storeUser(self, data):
try:
print (data)
#return ("Fallo interno", False)
return LoginResponse("Valid-Token","Angel","angyay0")
except:
return LoginResponse("","","") | [
"aperez@mstecs.com"
] | aperez@mstecs.com |
47a5f7dac1c21ccd1fd6d4200b4a2068c776886a | a28946f264ebb2648b6e31f709d5bdf845bf0b50 | /lyrics.py | dd21c6cbdc825e9feddcc055895f4296128f90e2 | [
"MIT"
] | permissive | py-paulo/lyrics | e6be77132a353000b939941ea751c19534b0068e | 9bc1596fa9acc423aedace747ddb2e2339258331 | refs/heads/master | 2023-04-07T10:21:22.899483 | 2021-04-08T10:39:39 | 2021-04-08T10:39:39 | 354,976,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import urllib.request
from bs4 import BeautifulSoup
uri_base = 'http://www.google.com/search?q='
artist = 'Milton Nascimento'
music = 'Clube da Esquina n 2'
query_quote = ('%s %s letra' % (artist, music)).replace(' ', '+')
req = urllib.request.Request(uri_base+query_quote, headers={
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,applica'
'tion/signed-exchange;v=b3;q=0.9',
'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',
'referer': 'https://www.google.com/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/'
'537.36'
})
# with urllib.request.urlopen(req) as response:
# html = response.read().decode('utf-8', errors='ignore')
# with open('html.music.example.html', 'w') as fp:
# fp.write(html)
with open('html.music.example.html') as fp:
soup = BeautifulSoup(fp, 'html.parser')
for sub_soup in soup.find_all('div'):
if 'data-lyricid' in sub_soup.attrs:
for index, div in enumerate(sub_soup):
next_div = div.find_next()
spans = next_div.find_all('span')
for span in spans:
print(span.text)
break
| [
"paulo.rb.beserra@gmail.com"
] | paulo.rb.beserra@gmail.com |
566fdde94b7a27a1ac308ac870b09e58209d60fc | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetAppendRequest.py | 3e1af80821fc15b93a0a4328c878c0180e7b136d | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 4,058 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetAppendModel import AlipayMarketingCampaignDiscountBudgetAppendModel
class AlipayMarketingCampaignDiscountBudgetAppendRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetAppendModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetAppendModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.append'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
8c093d5bdd4b85a2f36c0adbc7b3a65e995fff87 | ecf96ce18d8c1bfc20b667f2bd2050dbf4286fb7 | /weights.py | 0729d7f47c0b1130a2ba5b4be4d84b925acf9bcb | [] | no_license | wiktoraleksanderkaczor/PyNN | 900ab16df0dedec9591193c6527e595c47d36bf0 | 9cd594be39f6e62ab095595cdf956282b2bf88fc | refs/heads/master | 2021-08-19T11:39:11.097391 | 2021-06-11T01:02:20 | 2021-06-11T01:02:20 | 158,038,948 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | # Module: weights.py
# Definition of some useful weight initialization functions.
import numpy as np
np.random.seed(123)
def random_init(num_prev_neurons, precision):
"""
Initializes the weights using a random number generated from a seed.
Args:
num_prev_neurons (int): The number of neurons in the previous layer.
precision (numpy.dtype): The numpy dtype for the network precision.
Returns:
weights (numpy.array): A 1-dimensional array of the randomly initialized weights for a neuron.
"""
# Storing weights for each connection to each neuron in the next layer.
weights = np.random.rand(num_prev_neurons)
return weights.astype(precision)
def xavier_init(num_prev_neurons, precision):
"""
Initializes the weights using the xavier weight initialization algorithm.
Args:
num_prev_neurons (int): The number of neurons in the previous layer.
precision (numpy.dtype): The numpy dtype for the network precision.
Returns:
weights (numpy.array): A 1-dimensional array of the xavier initialized weights for a neuron.
"""
# Setting seed based on number of previous neurons.
#np.random.seed(num_prev_neurons)
lower = -(1.0 / np.sqrt(num_prev_neurons))
upper = (1.0 / np.sqrt(num_prev_neurons))
# Storing weights for each connection to each neuron in the next layer.
weights = np.random.rand(num_prev_neurons)
return weights.astype(precision)
| [
"wiktoraleksanderkaczor@gmail.com"
] | wiktoraleksanderkaczor@gmail.com |
c9580567614da5bed9f9c744137f3d463eb77515 | dac7d0abff54dbeb9e6587f17866a34b5e7f3948 | /Cobbity/compare.py | ec3b6cf07d175832a7fb04e914de1c0c894bf84c | [] | no_license | KipCrossing/EMI_Field | 5665aba5ff5fbf4a4d42fc9b3efc9aa3b3f51eea | e52142648388a25d26f682986c586cd1827e31e0 | refs/heads/master | 2020-05-22T12:37:42.892290 | 2019-09-12T01:27:24 | 2019-09-12T01:27:24 | 186,342,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | import pandas as pd
df_OpenEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_OpenEM.xyz", header=None, delimiter=r"\s+")
df_DUALEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_DUALEM.xyz", header=None, delimiter=r"\s+")
print(df_OpenEM.head())
print(df_DUALEM.head())
New_OpenEM_readings = []
New_OpenEM_lon = []
New_OpenEM_lat = []
sum = 0
for read in df_OpenEM[2].tolist():
if read > -9999:
New_OpenEM_readings.append(read)
New_OpenEM_lon.append(df_OpenEM[0].tolist()[sum])
New_OpenEM_lat.append(df_OpenEM[1].tolist()[sum])
sum += 1
print(len(New_OpenEM_lon),len(New_OpenEM_lat),len(New_OpenEM_readings))
New_DUALEM_readings = []
New_DUALEM_lon = []
New_DUALEM_lat = []
sum = 0
for read in df_DUALEM[2].tolist():
if read > -9999:
New_DUALEM_readings.append(read)
New_DUALEM_lon.append(df_DUALEM[0].tolist()[sum])
New_DUALEM_lat.append(df_DUALEM[1].tolist()[sum])
sum += 1
print(len(New_DUALEM_lon),len(New_DUALEM_lat),len(New_DUALEM_readings))
data = {"DUALEM": New_DUALEM_readings,"OpenEM": New_OpenEM_readings,"X1":New_DUALEM_lon,"X2":New_OpenEM_lon,"Y1":New_DUALEM_lat,"Y2":New_OpenEM_lat}
df_out = pd.DataFrame(data, columns=["DUALEM","OpenEM","X1","X2","Y1","Y2"])
df_out.to_csv("~/Cobbity/Output/compare_Smooth_DUALEM_OpenEm.csv")
count = 0
for i in New_DUALEM_lon:
if New_DUALEM_lon[count] == New_OpenEM_lon[count] and New_DUALEM_lat[count] == New_OpenEM_lat[count]:
print(count)
count += 1
| [
"kip.crossing@gmail.com"
] | kip.crossing@gmail.com |
f2a5384d6168682cc2ed2c5a6b873ece4666dcf3 | 417c3cceaa1e010c7747db4e9ea4ffabfff92732 | /learning_user/basic_app/views.py | dcdc631b7e9fae41fb151a10f7f520363b17169b | [] | no_license | ibukun-brain/django-deployment-example | ef54c2a26153026c68986fc41d53725fdb52743a | b984426f7108a40f15ba5cc3dbdd86f3b93fa298 | refs/heads/master | 2023-05-31T21:21:55.624611 | 2021-07-04T05:29:44 | 2021-07-04T05:29:44 | 382,760,477 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | from django.shortcuts import render
from basic_app.forms import UserInfoForm, UserProfileInfoForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
# Create your views here.
def base(request):
return render(request, 'basic_app/base.html')
@login_required
def special(request):
return HttpResponse("You're logged in, Nice!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def index(request):
return render(request, 'basic_app/index.html')
def registration(request):
registered = False
if request.method == 'POST':
user_form = UserInfoForm(request.POST)
profile_form = UserProfileInfoForm(request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserInfoForm()
profile_form = UserProfileInfoForm()
return render(request, 'basic_app/registration.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse('ACCOUNT NOT ACTIVE')
else:
print("Someone tried to login and failed!")
print('Username: {} and password: {}'.format(username, password))
return HttpResponse('Invalid login details supplied!')
else:
return render(request, 'basic_app/login.html')
| [
"ibukunolaifa1984@gmail.com"
] | ibukunolaifa1984@gmail.com |
19d14b124965f2f461568792ad34bb6bbd4dc10d | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/367ff95d4d3d3770fa7b/snippet.py | 6cd51cef4fd2bff70541bd8d5ea0c23646114dd5 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,003 | py | from collections import defaultdict
from django.db.models.signals import *
class DisableSignals(object):
def __init__(self, disabled_signals=None):
self.stashed_signals = defaultdict(list)
self.disabled_signals = disabled_signals or [
pre_init, post_init,
pre_save, post_save,
pre_delete, post_delete,
pre_migrate, post_migrate,
]
def __enter__(self):
for signal in self.disabled_signals:
self.disconnect(signal)
def __exit__(self, exc_type, exc_val, exc_tb):
for signal in self.stashed_signals.keys():
self.reconnect(signal)
def disconnect(self, signal):
self.stashed_signals[signal] = signal.receivers
signal.receivers = []
def reconnect(self, signal):
signal.receivers = self.stashed_signals.get(signal, [])
del self.stashed_signals[signal]
# Example usage:
# with DisableSignals():
# user.save() # will not call any signals
| [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
8ffe7365488fff3d284791da93f2ec10bd6e22b7 | e45060b2fb6c5911249f930703db06af74292a14 | /src/misc.py | 2ef953d46b21e269cfe78df97b2fb7c04182704b | [
"MIT"
] | permissive | Guigouu/clonesquad-ec2-pet-autoscaler | a6dd350acaa559fba6caf209c439579a5c7ab15a | 33eee544a5c208caf548869c2e714d9ebc7f0be6 | refs/heads/master | 2023-01-07T01:27:33.725418 | 2020-11-05T21:28:02 | 2020-11-05T21:28:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,784 | py | import os
import sys
import re
import hashlib
import json
import math
import gzip
# Hack: Force gzip to have a deterministic output (See https://stackoverflow.com/questions/264224/setting-the-gzip-timestamp-from-python/264303#264303)
class GzipFakeTime:
def time(self):
return 1.1
gzip.time = GzipFakeTime()
import base64
import boto3
from datetime import datetime
from datetime import timezone
from datetime import timedelta
import requests
from requests_file import FileAdapter
from collections import defaultdict
import pdb
import debug as Dbg
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch_all
patch_all()
def is_sam_local():
return "AWS_SAM_LOCAL" in os.environ and os.environ["AWS_SAM_LOCAL"] == "true"
import cslog
log = cslog.logger(__name__)
def is_direct_launch():
return len(sys.argv) > 1
def utc_now():
return datetime.now(tz=timezone.utc) # datetime.utcnow()
def epoch():
return datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc)
def seconds_from_epoch_utc(now=None):
if now is None: now = utc_now()
return int((now - epoch()).total_seconds())
def seconds2utc(seconds):
return datetime.utcfromtimestamp(int(seconds)).replace(tzinfo=timezone.utc)
def str2utc(s, default=None):
try:
return datetime.fromisoformat(s)
except:
return default
return None
def sha256(s):
m = hashlib.sha256()
m.update(bytes(s,"utf-8"))
return m.hexdigest()
def abs_or_percent(value, default, max_value):
v = default
try:
if value.endswith("%"):
v = math.ceil(float(value[:-1])/100.0 * max_value)
else:
v = int(value)
except:
pass
return v
def str2duration_seconds(s):
try:
return int(s)
except:
# Parse timedelta metadata
meta = s.split(",")
metas = {}
for m in meta:
k, v = m.split("=")
metas[k] = float(v)
return timedelta(**metas).total_seconds()
def decode_json(value):
if value is None:
return None
if value.startswith("b'"):
value = value[2:][:-1]
try:
uncompress = gzip.decompress(base64.b64decode(value))
value = str(uncompress, "utf-8")
except:
pass
return json.loads(value)
def encode_json(value, compress=False):
value_j = json.dumps(value, sort_keys=True, default=str)
if compress:
compressed = gzip.compress(bytes(value_j, "utf-8"), compresslevel=9)
value_j = str(base64.b64encode(compressed), "utf-8")
return value_j
def Session():
s = requests.Session()
s.mount('file://', FileAdapter())
return s
url_cache = {}
def get_url(url):
global url_cache
if url is None or url == "":
return None
if url in url_cache:
return url_cache[url]
# internal: protocol management
internal_str = "internal:"
if url.startswith(internal_str):
filename = url[len(internal_str):]
paths = [os.getcwd(), "/opt" ]
if "LAMBDA_TASK_ROOT" in os.environ:
paths.insert(0, os.environ["LAMBDA_TASK_ROOT"])
if "CLONESQUAD_DIR" in os.environ:
paths.append(os.environ["CLONESQUAD_DIR"])
for path in paths:
for sub_path in [".", "custo", "resources" ]:
try:
f = open("%s/%s/%s" % (path, sub_path, filename), "rb")
except:
continue
url_cache[url] = f.read()
return url_cache[url]
log.warning("Fail to read internal url '%s'!" % url)
return None
# s3:// protocol management
if url.startswith("s3://"):
m = re.search("^s3://([-.\w]+)/(.*)", url)
if len(m.groups()) != 2:
return None
bucket, key = [m.group(1), m.group(2)]
client = boto3.client("s3")
try:
response = client.get_object(
Bucket=bucket,
Key=key)
url_cache[url] = response["Body"].read()
return url_cache[url]
except Exception as e:
log.warning("Failed to fetch S3 url '%s' : %s" % (url, e))
return None
# <other>:// protocols management
s = Session()
try:
response = s.get(url)
except Exception as e:
log.warning("Failed to fetch url '%s' : %s" % (url, e))
return None
if response is not None:
url_cache[url] = response.content
return url_cache[url]
return None
def parse_line_as_list_of_dict(string, leading_keyname="_", default=None):
if string is None:
return default
def _remove_escapes(s):
return s.replace("\\;", ";").replace("\\,", ",").replace("\\=", "=")
l = []
for d in re.split("(?<!\\\\);", string):
if d == "": continue
el = re.split("(?<!\\\\),", d)
key = el[0]
if key == "": continue
dct = defaultdict(str)
dct[leading_keyname] = _remove_escapes(key) #.replace("\\,", ",")
for item in el[1:]:
i_el = re.split("(?<!\\\\)=", item, maxsplit=1)
dct[i_el[0]] = _remove_escapes(i_el[1]) if len(i_el) > 1 else True
l.append(dct)
return l
def dynamodb_table_scan(client, table_name, max_size=32*1024*1024):
xray_recorder.begin_subsegment("misc.dynamodb_table_scan")
items = []
size = 0
response = None
while response is None or "LastEvaluatedKey" in response:
query = {
"TableName": table_name,
"ConsistentRead": True
}
if response is not None and "LastEvaluatedKey" in response: query["ExclusiveStartKey"] = response["LastEvaluatedKey"]
response = client.scan(**query)
if "Items" not in response: raise Exception("Failed to scan table '%s'!" % self.table_name)
# Flatten the structure to make it more useable
for i in response["Items"]:
item = {}
for k in i:
item[k] = i[k][list(i[k].keys())[0]]
# Do not manage expired records
if "ExpirationTime" in item:
expiration_time = int(item["ExpirationTime"])
if seconds_from_epoch_utc() > expiration_time:
continue
if max_size != -1:
item_size = 0
for k in item: item_size += len(item[k])
if size + item_size > max_size:
break # Truncate too big DynamoDB table
else:
size += item_size
items.append(item)
log.debug("Table scan of '%s' returned %d items." % (table_name, len(items)))
xray_recorder.end_subsegment()
return items
| [
"jeancharlesjorel@gmail.com"
] | jeancharlesjorel@gmail.com |
97a1c88e30cf7f7e198ab1dfadc80c3db447a9ba | 1a324f9aefc9cc6f858effea02501f0885611c28 | /search_engine_demo/googlesearch/urls.py | 5b1087f1554c44a6d214a1fd5274cc1b42dc6ba4 | [] | no_license | AmamiRena/py | 72c55180209b1c18a5484fa37f4b4d6abac746f9 | 0f9f3b05fed09b2fff329a103426dde718f798cf | refs/heads/master | 2021-10-25T17:30:19.419609 | 2021-10-22T01:26:08 | 2021-10-22T01:26:08 | 143,627,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from django.urls import path
from .views import *
urlpatterns = [
path('', index),
path('search', SearchView.as_view(), name="google-search-view")
] | [
"mayuzumi159@gmail.com"
] | mayuzumi159@gmail.com |
8cb2376ed52ba4138dc95464f109798211500d6a | 4d9b71dc822dd62cade383629ea8ef469d2e83ae | /planning/SpCoNavi0.1.py | d05de2b52e4530add0ef3afd16f9a86a6519b889 | [
"MIT"
] | permissive | sunnySKYwhy/SpCoNavi | cb2eaded8de5c0d5ec254d415dcc3418783db7f1 | 88edac8b204ad58380a00685f7d5159d5d937271 | refs/heads/master | 2023-03-19T23:52:29.411030 | 2020-02-19T11:57:54 | 2020-02-19T11:57:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,383 | py | #coding:utf-8
###########################################################
# SpCoNavi: Spatial Concept-based Path-Planning Program (開発中)
# Akira Taniguchi 2018/12/13-2019/3/10-
###########################################################
##########---遂行タスク---##########
#テスト実行・デバッグ
#ムダの除去・さらなる高速化
##########---作業終了タスク---##########
##文字コードをsjisのままにした
##現状、Xtは2次元(x,y)として計算(角度(方向)θは考慮しない)
##配列はlistかnumpy.arrayかを注意
##地図が大きいとメモリを大量に消費する・処理が重くなる恐れがある
##状態遷移確率(動作モデル)は確定モデルで近似計算する
##range() -> xrange()
##numbaのjitで高速化(?)and並列化(?)
##PathはROSの座標系と2次元配列上のインデックスの両方を保存する
##ViterbiPathの計算でlogを使う:PathWeightMapは確率で計算・保存、Transitionはlogで計算・保存する
##事前計算できるものはできるだけファイル読み込みする形にもできるようにした
###(単語辞書生成、単語認識結果(N-best)、事前計算可能な確率値、Transition(T_horizonごとに保持)、・・・)
##Viterbiの計算処理をTransitionをそのまま使わないように変更した(ムダが多く、メモリ消費・処理時間がかかる要因)
##Viterbiのupdate関数を一部numpy化(高速化)
#sum_i_GaussMultiがnp.arrayになっていなかった(?)⇒np.array化したが計算上変わらないはず (2019/02/17)⇒np.arrayにすると、numbaがエラーを吐くため元に戻した.
###未確認・未使用
#pi_2_pi
#Prob_Triangular_distribution_pdf
#Motion_Model_Odometry
#Motion_Model_Odometry_No_theta
###確認済み
#ReadParameters
#ReadSpeech
#SpeechRecognition
#WordDictionaryUpdate2
#SavePath
#SaveProbMap
#ReadMap
#ReadCostMap
#PathPlanner
#ViterbiPath
##########---保留---##########
#状態遷移確率(動作モデル)を確率モデルで計算する実装
#状態数の削減のための近似手法の実装
#並列処理
#SendPath
#SendProbMap
#PathDistance
#PostProbXt
##############################################
import os
import sys
import glob
import time
import random
import numpy as np
import scipy as sp
#from numpy.random import multinomial #,uniform #,dirichlet
from scipy.stats import multivariate_normal,multinomial #,t,invwishart,rv_discrete
#from numpy.linalg import inv, cholesky
from math import pi as PI
from math import cos,sin,sqrt,exp,log,degrees,radians,atan2 #,gamma,lgamma,fabs,fsum
from __init__ import *
from JuliusNbest_dec import *
from submodules import *
from numba import jit, njit, prange
from scipy.io import mmwrite, mmread
from scipy.sparse import lil_matrix, csr_matrix
from itertools import izip
#マップを読み込む⇒確率値に変換⇒2次元配列に格納
def ReadMap(outputfile):
#outputfolder + trialname + navigation_folder + map.csv
gridmap = np.loadtxt(outputfile + "map.csv", delimiter=",")
print "Read map: " + outputfile + "map.csv"
return gridmap
#コストマップを読み込む⇒確率値に変換⇒2次元配列に格納
def ReadCostMap(outputfile):
#outputfolder + trialname + navigation_folder + contmap.csv
costmap = np.loadtxt(outputfile + "costmap.csv", delimiter=",")
print "Read costmap: " + outputfile + "contmap.csv"
return costmap
#場所概念の学習済みパラメータを読み込む
def ReadParameters(particle_num, filename):
#THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
r = particle_num
i = 0
for line in open(filename + 'index' + str(r) + '.csv', 'r'): ##読み込む
itemList = line[:-1].split(',')
#print itemList
if (i == 0):
L = len(itemList) -1
elif (i == 1):
K = len(itemList) -1
i += 1
print "L:",L,"K:",K
W_index = []
i = 0
#テキストファイルを読み込み
for line in open(filename + 'W_list' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
if(i == 0):
for j in xrange(len(itemList)):
if (itemList[j] != ""):
W_index = W_index + [itemList[j]]
i = i + 1
#####パラメータW、μ、Σ、φ、πを入力する#####
Mu = [ np.array([ 0.0, 0.0 ]) for i in xrange(K) ] #[ np.array([[ 0.0 ],[ 0.0 ]]) for i in xrange(K) ] #位置分布の平均(x,y)[K]
Sig = [ np.array([ [0.0, 0.0],[0.0, 0.0] ]) for i in xrange(K) ] #位置分布の共分散(2×2次元)[K]
W = [ [0.0 for j in xrange(len(W_index))] for c in xrange(L) ] #場所の名前(多項分布:W_index次元)[L]
#theta = [ [0.0 for j in xrange(DimImg)] for c in xrange(L) ]
Pi = [ 0.0 for c in xrange(L)] #場所概念のindexの多項分布(L次元)
Phi_l = [ [0.0 for i in xrange(K)] for c in xrange(L) ] #位置分布のindexの多項分布(K次元)[L]
i = 0
##Muの読み込み
for line in open(filename + 'mu' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
Mu[i] = np.array([ float(itemList[0]) , float(itemList[1]) ])
#Mu[i] = np.array([[ float(itemList[0]) ],[ float(itemList[1]) ]])
i = i + 1
i = 0
##Sigの読み込み
for line in open(filename + 'sig' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
Sig[i] = np.array([[ float(itemList[0]), float(itemList[1]) ], [ float(itemList[2]), float(itemList[3]) ]])
i = i + 1
##phiの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'phi' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != "":
Phi_l[c][i] = float(itemList[i])
c = c + 1
##Piの読み込み
for line in open(filename + 'pi' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
Pi[i] = float(itemList[i])
##Wの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'W' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
#print c,i,itemList[i]
W[c][i] = float(itemList[i])
c = c + 1
"""
##thetaの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'theta' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
#print c,i,itemList[i]
theta[c][i] = float(itemList[i])
c = c + 1
"""
THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
return THETA
#音声ファイルを読み込み
def ReadSpeech(num):
# wavファイルを指定
files = glob.glob(speech_folder_go)
files.sort()
speech_file = files[num]
return speech_file
#音声データを受け取り、音声認識を行う⇒文字列配列を渡す・保存
def SpeechRecognition(speech_file, W_index, step, trialname, outputfile):
##学習した単語辞書を用いて音声認識し、BoWを得る
St = RecogNbest( speech_file, step, trialname )
#print St
Otb_B = [0 for i in xrange(len(W_index))] #[[] for j in xrange(len(St))]
for j in xrange(len(St)):
for i in xrange(5):
St[j] = St[j].replace("<s>", "")
St[j] = St[j].replace("</s>", "")
St[j] = St[j].replace(" <s> ", "")
St[j] = St[j].replace("<sp>", "")
St[j] = St[j].replace(" </s>", "")
St[j] = St[j].replace(" ", " ")
St[j] = St[j].replace("\n", "")
print j,St[j]
Otb = St[j].split(" ")
for j2 in xrange(len(Otb)):
#print n,j,len(Otb_Samp[r][n])
for i in xrange(len(W_index)):
#print W_index[i].decode('sjis'),Otb[j]
if (W_index[i].decode('sjis') == Otb[j2] ): #'utf8'
Otb_B[i] = Otb_B[i] + 1
#print W_index[i].decode('sjis'),Otb[j]
print Otb_B
# 認識結果をファイル保存
f = open( outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_St.csv" , "w") # , "sjis" )
for i in xrange(len(St)):
f.write(St[i].encode('sjis'))
f.write('\n')
f.close()
return Otb_B
#角度を[-π,π]に変換(参考:https://github.com/AtsushiSakai/PythonRobotics)
def pi_2_pi(angle):
return (angle + PI) % (2 * PI) - PI
#三角分布の確率密度関数
def Prob_Triangular_distribution_pdf(a,b):
prob = max( 0, ( 1 / (sqrt(6)*b) ) - ( abs(a) / (6*(b**2)) ) )
return prob
#確率分布の選択
def Motion_Model_Prob(a,b):
if (MotionModelDist == "Gauss"):
p = multivariate_normal.pdf(a, 0, b)
elif (MotionModelDist == "Triangular"):
p = Prob_Triangular_distribution_pdf(a, b)
return p
#オドメトリ動作モデル(確率ロボティクスp.122) #現状、不使用
def Motion_Model_Odometry(xt,ut,xt_1):
#ut = (xt_1_bar, xt_bar), xt_1_bar = (x_bar, y_bar, theta_bar), xt_bar = (x_dash_bar, y_dash_bar, theta_dash_bar)
x_dash, y_dash, theta_dash = xt
x, y, theta = xt_1
xt_1_bar, xt_bar = ut
x_dash_bar, y_dash_bar, theta_dash_bar = xt_bar
x_bar, y_bar, theta_bar = xt_1_bar
delta_rot1 = atan2(y_dash_bar - y_bar, x_dash_bar - x_bar) - theta_bar
delta_trans = sqrt( (x_dash_bar - x_bar)**2 + (y_dash_bar - y_bar)**2 )
delta_rot2 = theta_dash_bar - theta_bar - delta_rot1
delta_rot1_hat = atan2(y_dash - y, x_dash - x) - theta
delta_trans_hat = sqrt( (x_dash - x)**2 + (y_dash - y)**2 )
delta_rot2_hat = theta_dash - theta - delta_rot1_hat
p1 = Motion_Model_Prob(pi_2_pi(delta_rot1 - delta_rot1_hat), odom_alpha1*(delta_rot1_hat**2) + odom_alpha2*(delta_trans_hat**2))
p2 = Motion_Model_Prob(delta_trans - delta_trans_hat, odom_alpha3*(delta_trans_hat**2) + odom_alpha4*(delta_rot1_hat**2+delta_rot2_hat**2))
p3 = Motion_Model_Prob(pi_2_pi(delta_rot2 - delta_rot2_hat), odom_alpha1*(delta_rot2_hat**2) + odom_alpha2*(delta_trans_hat**2))
return p1*p2*p3
#オドメトリ動作モデル(簡略版) #角度は考慮せず、移動量に応じて確率が決まる(ドーナツ型分布)
def Motion_Model_Odometry_No_theta(xt,ut,xt_1):
#ut = (xt_1_bar, xt_bar), xt_1_bar = (x_bar, y_bar), xt_bar = (x_dash_bar, y_dash_bar)
#utは相対的な位置関係で良い
x_dash, y_dash = xt
x, y = xt_1
delta_trans = cmd_vel #sqrt( (x_dash_bar - x_bar)**2 + (y_dash_bar - y_bar)**2 )
delta_trans_hat = sqrt( (x_dash - x)**2 + (y_dash - y)**2 )
p2 = Motion_Model_Prob( delta_trans - delta_trans_hat, odom_alpha3*(delta_trans_hat**2) )
return p2 #p1*p2*p3
#動作モデル(独自) #角度は考慮せず、移動先位置に応じて確率が決まる(ガウス分布)
def Motion_Model_Original(xt,ut,xt_1):
xt = np.array(xt)
#ut = np.array(ut)
xt_1 = np.array(xt_1)
dist = np.sum((xt-xt_1)**2)
px = Motion_Model_Prob( xt[0] - (xt_1[0]+ut[0]), odom_alpha3*dist )
py = Motion_Model_Prob( xt[1] - (xt_1[1]+ut[1]), odom_alpha3*dist )
return px*py
#ROSの地図座標系をPython内の2次元配列のインデックス番号に対応付ける
def Map_coordinates_To_Array_index(X):
X = np.array(X)
Index = np.round( (X - origin) / resolution ).astype(int) #四捨五入してint型にする
return Index
#Python内の2次元配列のインデックス番号からROSの地図座標系への変換
def Array_index_To_Map_coordinates(Index):
Index = np.array(Index)
X = np.array( (Index * resolution) + origin )
return X
#gridmap and costmap から確率の形のCostMapProbを得ておく
@jit(parallel=True)
def CostMapProb_jit(gridmap, costmap):
CostMapProb = (100.0 - costmap) / 100.0 #コストマップを確率の形にする
#gridの数値が0(非占有)のところだけ数値を持つようにマスクする
GridMapProb = 1*(gridmap == 0) #gridmap * (gridmap != 100) * (gridmap != -1) #gridmap[][]が障害物(100)または未探索(-1)であれば確率0にする
return CostMapProb * GridMapProb
#@jit(nopython=True, parallel=True)
@jit(parallel=True) #並列化されていない?1CPUだけ使用される
def PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K):
PostProbMap = np.zeros((map_length,map_width))
#愚直な実装(for文の多用)
#memo: np.vectorize or np.frompyfunc の方が処理は早い?
for length in prange(map_length):
for width in prange(map_width):
if (CostMapProb[length][width] != 0.0): #(gridmap[length][width] != -1) and (gridmap[length][width] != 100): #gridmap[][]が障害物(100)または未探索(-1)であれば計算を省く
X_temp = Array_index_To_Map_coordinates([width, length]) #地図と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in xrange(K)]) for c in xrange(L) ]
#sum_c_ProbCtsum_i = np.sum( LookupTable_ProbCt * sum_i_GaussMulti )
PostProbMap[length][width] = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i
return CostMapProb * PostProbMap
@jit(parallel=True)
def PostProb_ij(Index_temp,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K):
if (CostMapProb[Index_temp[1]][Index_temp[0]] != 0.0):
X_temp = Array_index_To_Map_coordinates(Index_temp) #地図と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in xrange(K)]) for c in xrange(L) ] ##########np.array( ) !!! np.arrayにすると、numbaがエラーを吐く
PostProb = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i
else:
PostProb = 0.0
return PostProb
#@jit(parallel=True) #並列化されていない?1CPUだけ使用される
def PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K): #,IndexMap):
PostProbMap = np.array([ [ PostProb_ij([width, length],Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) for width in xrange(map_width) ] for length in xrange(map_length) ])
return CostMapProb * PostProbMap
#@jit(nopython=True, parallel=True)
#@jit #(parallel=True) #なぜかエラーが出る
def Transition_log_jit(state_num,IndexMap_one_NOzero,MoveIndex_list):
#Transition = np.ones((state_num,state_num)) * approx_log_zero
Transition = [[approx_log_zero for j in range(state_num)] for i in range(state_num)]
print "Memory OK"
#print IndexMap_one_NOzero
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
for n in prange(state_num):
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #.tolist() #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
for c in prange(len(MoveIndex_list_n_list)):
#print c
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[n][m] = 0.0 #1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
# print n,m,c
return Transition
def Transition_sparse_jit(state_num,IndexMap_one_NOzero,MoveIndex_list):
Transition = lil_matrix((state_num,state_num)) #[[0 for j in range(state_num)] for i in range(state_num)])
print "Memory OK"
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
for n in xrange(state_num):
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #.tolist() #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
for c in xrange(len(MoveIndex_list_n_list)):
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero): #try:
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[n,m] = 1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
# print n,m,c
#Transition_csr = Transition.tocsr()
#print "Transformed sparse csr format OK"
return Transition.tocsr() #Transition_csr
#動的計画法によるグローバルパス推定(SpCoNaviの計算)
def PathPlanner(S_Nbest, X_init, THETA, CostMapProb): #gridmap, costmap):
print "[RUN] PathPlanner"
#THETAを展開
W, W_index, Mu, Sig, Pi, Phi_l, K, L = THETA
#ROSの座標系の現在位置を2次元配列のインデックスにする
X_init_index = X_init ###TEST #Map_coordinates_To_Array_index(X_init)
print "Initial Xt:",X_init_index
#MAPの縦横(length and width)のセルの長さを計る
map_length = len(CostMapProb) #len(costmap)
map_width = len(CostMapProb[0]) #len(costmap[0])
print "MAP[length][width]:",map_length,map_width
#事前計算できるものはしておく
LookupTable_ProbCt = np.array([multinomial.pmf(S_Nbest, sum(S_Nbest), W[c])*Pi[c] for c in xrange(L)]) #Ctごとの確率分布 p(St|W_Ct)×p(Ct|Pi) の確率値
###SaveLookupTable(LookupTable_ProbCt, outputfile)
###LookupTable_ProbCt = ReadLookupTable(outputfile) #事前計算結果をファイル読み込み(計算する場合と大差ないかも)
print "Please wait for PostProbMap"
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
if (os.path.isfile(output) == False) or (UPDATE_PostProbMap == 1): #すでにファイルがあれば作成しない
#PathWeightMap = PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #マルチCPUで高速化できるかも #CostMapProb * PostProbMap #後の処理のために、この時点ではlogにしない
PathWeightMap = PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #,IndexMap)
#[TEST]計算結果を先に保存
SaveProbMap(PathWeightMap, outputfile)
else:
PathWeightMap = ReadProbMap(outputfile)
#print "already exists:", output
print "[Done] PathWeightMap."
#[メモリ・処理の軽減]初期位置のセルからT_horizonよりも離れた位置のセルをすべて2次元配列から消す([(2*T_horizon)+1][(2*T_horizon)+1]の配列になる)
Bug_removal_savior = 0 #座標変換の際にバグを生まないようにするためのフラグ
x_min = X_init_index[0] - T_horizon
x_max = X_init_index[0] + T_horizon
y_min = X_init_index[1] - T_horizon
y_max = X_init_index[1] + T_horizon
if (x_min>=0 and x_max<=map_width and y_min>=0 and y_max<=map_length):
PathWeightMap = PathWeightMap[x_min:x_max+1, y_min:y_max+1] # X[-T+I[0]:T+I[0],-T+I[1]:T+I[1]]
X_init_index = [T_horizon, T_horizon]
#再度、MAPの縦横(length and width)のセルの長さを計る
map_length = len(PathWeightMap)
map_width = len(PathWeightMap[0])
else:
print "[WARNING] The initial position (or init_pos +/- T_horizon) is outside the map."
Bug_removal_savior = 1 #バグを生まない(1)
#print X_init, X_init_index
#計算量削減のため状態数を減らす(状態空間を一次元配列にする⇒0の要素を除く)
#PathWeight = np.ravel(PathWeightMap)
PathWeight_one_NOzero = PathWeightMap[PathWeightMap!=0.0]
state_num = len(PathWeight_one_NOzero)
print "PathWeight_one_NOzero state_num:", state_num
#地図の2次元配列インデックスと一次元配列の対応を保持する
IndexMap = np.array([[(i,j) for j in xrange(map_width)] for i in xrange(map_length)])
IndexMap_one_NOzero = IndexMap[PathWeightMap!=0.0].tolist() #先にリスト型にしてしまう #実装上、np.arrayではなく2次元配列リストにしている
print "IndexMap_one_NOzero"
#1次元配列上の初期位置
if (X_init_index in IndexMap_one_NOzero):
X_init_index_one = IndexMap_one_NOzero.index(X_init_index)
else:
print "[ERROR] The initial position is not a movable position on the map."
#print X_init, X_init_index
X_init_index_one = 0
print "Initial index", X_init_index_one
#移動先候補のインデックス座標のリスト(相対座標)
MoveIndex_list = MovePosition_2D([0,0]) #.tolist()
#MoveIndex_list = np.round(MovePosition(X_init_index)).astype(int)
print "MoveIndex_list"
"""
#状態遷移確率(動作モデル)の計算
print "Please wait for Transition"
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse.mtx" # + "_Transition_log.csv"
if (os.path.isfile(output_transition) == False): #すでにファイルがあれば作成しない
#IndexMap_one_NOzero内の2次元配列上のインデックスと一致した要素のみ確率1を持つようにする
#Transition = Transition_log_jit(state_num,IndexMap_one_NOzero,MoveIndex_list)
Transition = Transition_sparse_jit(state_num,IndexMap_one_NOzero,MoveIndex_list)
#[TEST]計算結果を先に保存
#SaveTransition(Transition, outputfile)
SaveTransition_sparse(Transition, outputfile)
else:
Transition = ReadTransition_sparse(state_num, outputfile) #ReadTransition(state_num, outputfile)
#print "already exists:", output_transition
Transition_one_NOzero = Transition #[PathWeightMap!=0.0]
print "[Done] Transition distribution."
"""
#Viterbi Algorithmを実行
Path_one = ViterbiPath(X_init_index_one, np.log(PathWeight_one_NOzero), state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init, Bug_removal_savior) #, Transition_one_NOzero)
#1次元配列のインデックスを2次元配列のインデックスへ⇒ROSの座標系にする
Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])
if ( Bug_removal_savior == 0):
Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon
else:
Path_2D_index_original = Path_2D_index
Path_ROS = Array_index_To_Map_coordinates(Path_2D_index_original) #ROSのパスの形式にできればなおよい
#Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す
print "Init:", X_init
print "Path:\n", Path_2D_index_original
return Path_2D_index_original, Path_ROS, PathWeightMap
#移動位置の候補:現在の位置(2次元配列のインデックス)の近傍8セル+現在位置1セル
def MovePosition_2D(Xt):
PostPosition_list = np.array([ [-1,-1],[-1,0],[-1,1], [0,-1],[0,0], [0,1], [1,-1],[1,0],[1,1] ])*cmd_vel + np.array(Xt)
return PostPosition_list
#Viterbi Path計算用関数(参考:https://qiita.com/kkdd/items/6cbd949d03bc56e33e8e)
#@jit(parallel=True)
def update(cost, trans, emiss):
COST = 0 #COST, INDEX = range(2) #0,1
arr = [c[COST]+t for c, t in zip(cost, trans)]
max_arr = max(arr)
#print max_arr + emiss, arr.index(max_arr)
return max_arr + emiss, arr.index(max_arr)
#なぜか重くてTが進まない(不採用)
def update_sparse(cost, trans, emiss):
COST = 0 #COST, INDEX = range(2) #0,1
trans_log = [(trans[0,i]==0)*approx_log_zero for i in xrange(trans.get_shape()[1])] #trans.toarray()
arr = [c[COST]+t for c, t in zip(cost, trans_log)]
#index = [i for i in xrange(trans.get_shape()[1])]
#arr = [c[COST]+np.log(trans[0,t]) for c, t in zip(cost, index)]
max_arr = max(arr)
#print max_arr + emiss, arr.index(max_arr)
return max_arr + emiss, arr.index(max_arr)
@jit #jitはコードによってエラーが出る場合があるので注意
def update_lite(cost, n, emiss, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition):
#Transition = np.array([approx_log_zero for j in prange(state_num)]) #emissのindex番号に応じて、これをつくる処理を入れる
for i in prange(len(Transition)):
Transition[i] = approx_log_zero
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
count_t = 0
for c in prange(len(MoveIndex_list_n_list)): #prangeの方がxrangeより速い
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[m] = 0.0 #1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
count_t += 1
#計算上おかしい場合はエラー表示を出す.
if (count_t == 0): #遷移確率がすべて0.移動できないということを意味する.
print "[ERROR] All transition is approx_log_zero."
elif (count_t == 1): #遷移確率がひとつだけある.移動可能な座標が一択.
print "[WARNING] One transition is zero."
#trans = Transition #np.array(Transition)
arr = cost + Transition #trans
#max_arr = np.max(arr)
max_arr_index = np.argmax(arr)
#return max_arr + emiss, np.where(arr == max_arr)[0][0] #np.argmax(arr)#arr.index(max_arr)
return arr[max_arr_index] + emiss, max_arr_index
#def transition(m, n):
# return [[1.0 for i in xrange(m)] for j in xrange(n)]
#def emission(n):
# return [random.random() for j in xrange(n)]
#ViterbiPathを計算してPath(軌道)を返す
#@jit(parallel=True) #print関係(?)のエラーが出たので一時避難
def ViterbiPath(X_init, PathWeight, state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init_original, Bug_removal_savior): #, Transition):
#Path = [[0,0] for t in xrange(T_horizon)] #各tにおけるセル番号[x,y]
print "Start Viterbi Algorithm"
INDEX = 1 #COST, INDEX = range(2) #0,1
INITIAL = (approx_log_zero, X_init) # (cost, index) #indexに初期値の一次元配列インデックスを入れる
#print "Initial:",X_init
cost = [INITIAL for i in prange(len(PathWeight))]
cost[X_init] = (0.0, X_init) #初期位置は一意に与えられる(確率log(1.0))
trellis = []
e = PathWeight #emission(nstates[i])
m = [i for i in prange(len(PathWeight))] #Transition #transition(nstates[i-1], nstates[i]) #一つ前から現在への遷移
Transition = np.array([approx_log_zero for j in prange(state_num)]) #参照渡しになってしまう
temp = 1
#Forward
print "Forward"
for i in prange(T_horizon): #len(nstates)): #計画区間まで1セルずつ移動していく+1+1
#このfor文の中でiを別途インディケータとして使わないこと
print "T:",i+1
if (i+1 == T_restart):
outputname_restart = outputfile + "T"+str(T_restart)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
trellis = ReadTrellis(outputname_restart, i+1)
cost = trellis[-1]
if (i+1 >= T_restart):
#cost = [update(cost, t, f) for t, f in zip(m, e)]
#cost = [update_sparse(cost, Transition[t], f) for t, f in zip(m, e)] #なぜか遅い
cost_np = np.array([cost[c][0] for c in prange(len(cost))])
#Transition = np.array([approx_log_zero for j in prange(state_num)]) #参照渡しになってしまう
#cost = [update_lite(cost_np, t, e[t], state_num,IndexMap_one_NOzero,MoveIndex_list) for t in prange(len(e))]
cost = [update_lite(cost_np, t, f, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition) for t, f in izip(m, e)] #izipの方がメモリ効率は良いが、zipとしても処理速度は変わらない
trellis.append(cost)
#print "i", i, [(c[COST], c[INDEX]) for c in cost] #前のノードがどこだったか(どこから来たか)を記録している
if (SAVE_T_temp == temp):
#Backward temp
last = [trellis[-1][j][0] for j in xrange(len(trellis[-1]))]
path_one = [last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず
#print "last",last,"max",path
for x in reversed(trellis):
path_one = [x[path_one[0]][INDEX]] + path_one
#print "x", len(x), x
path_one = path_one[1:len(path_one)] #初期位置と処理上追加した最後の遷移を除く
SavePathTemp(X_init_original, path_one, i+1, outputname, IndexMap_one_NOzero, Bug_removal_savior)
if (SAVE_Trellis == 1):
SaveTrellis(trellis, outputname, i+1)
temp = 0
temp += 1
#最後の遷移確率は一様にすればよいはず
e_last = [0.0]
m_last = [[0.0 for i in range(len(PathWeight))]]
cost = [update(cost, t, f) for t, f in zip(m_last, e_last)]
trellis.append(cost)
#Backward
print "Backward"
#last = [trellis[-1][i][0] for i in xrange(len(trellis[-1]))]
path = [0] #[last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず
#print "last",last,"max",path
for x in reversed(trellis):
path = [x[path[0]][INDEX]] + path
#print "x", len(x), x
path = path[1:len(path)-1] #初期位置と処理上追加した最後の遷移を除く
print 'Maximum prob path:', path
return path
#推定されたパスを(トピックかサービスで)送る
#def SendPath(Path):
#パスをファイル保存する(形式未定)
def SavePath(X_init, Path, Path_ROS, outputname):
print "PathSave"
if (SAVE_X_init == 1):
# ロボット初期位置をファイル保存(index)
np.savetxt(outputname + "_X_init.csv", X_init, delimiter=",")
# ロボット初期位置をファイル保存(ROS)
np.savetxt(outputname + "_X_init_ROS.csv", Array_index_To_Map_coordinates(X_init), delimiter=",")
# 結果をファイル保存(index)
np.savetxt(outputname + "_Path.csv", Path, delimiter=",")
# 結果をファイル保存(ROS)
np.savetxt(outputname + "_Path_ROS.csv", Path_ROS, delimiter=",")
print "Save Path: " + outputname + "_Path.csv and _Path_ROS.csv"
#パスをファイル保存する(形式未定)
def SavePathTemp(X_init, Path_one, temp, outputname, IndexMap_one_NOzero, Bug_removal_savior):
print "PathSaveTemp"
#1次元配列のインデックスを2次元配列のインデックスへ⇒ROSの座標系にする
Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])
if ( Bug_removal_savior == 0):
Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon
else:
Path_2D_index_original = Path_2D_index
Path_ROS = Array_index_To_Map_coordinates(Path_2D_index_original) #
#Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す
# 結果をファイル保存(index)
np.savetxt(outputname + "_Path" + str(temp) + ".csv", Path_2D_index_original, delimiter=",")
# 結果をファイル保存(ROS)
np.savetxt(outputname + "_Path_ROS" + str(temp) + ".csv", Path_ROS, delimiter=",")
print "Save Path: " + outputname + "_Path" + str(temp) + ".csv and _Path_ROS" + str(temp) + ".csv"
def SaveTrellis(trellis, outputname, temp):
print "SaveTrellis"
# 結果をファイル保存
np.save(outputname + "_trellis" + str(temp) + ".npy", trellis) #, delimiter=",")
print "Save trellis: " + outputname + "_trellis" + str(temp) + ".npy"
def ReadTrellis(outputname, temp):
print "ReadTrellis"
# 結果をファイル保存
trellis = np.load(outputname + "_trellis" + str(temp) + ".npy") #, delimiter=",")
print "Read trellis: " + outputname + "_trellis" + str(temp) + ".npy"
return trellis
#パス計算のために使用したLookupTable_ProbCtをファイル保存する
def SaveLookupTable(LookupTable_ProbCt, outputfile):
# 結果をファイル保存
output = outputfile + "LookupTable_ProbCt.csv"
np.savetxt( output, LookupTable_ProbCt, delimiter=",")
print "Save LookupTable_ProbCt: " + output
#パス計算のために使用したLookupTable_ProbCtをファイル読み込みする
def ReadLookupTable(outputfile):
# 結果をファイル読み込み
output = outputfile + "LookupTable_ProbCt.csv"
LookupTable_ProbCt = np.loadtxt(output, delimiter=",")
print "Read LookupTable_ProbCt: " + output
return LookupTable_ProbCt
#パス計算のために使用した確率値コストマップをファイル保存する
def SaveCostMapProb(CostMapProb, outputfile):
# 結果をファイル保存
output = outputfile + "CostMapProb.csv"
np.savetxt( output, CostMapProb, delimiter=",")
print "Save CostMapProb: " + output
#パス計算のために使用した確率値コストマップをファイル読み込みする
def ReadCostMapProb(outputfile):
# 結果をファイル読み込み
output = outputfile + "CostMapProb.csv"
CostMapProb = np.loadtxt(output, delimiter=",")
print "Read CostMapProb: " + output
return CostMapProb
#パス計算のために使用した確率値マップを(トピックかサービスで)送る
#def SendProbMap(PathWeightMap):
#パス計算のために使用した確率値マップをファイル保存する
def SaveProbMap(PathWeightMap, outputfile):
# 結果をファイル保存
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
np.savetxt( output, PathWeightMap, delimiter=",")
print "Save PathWeightMap: " + output
#パス計算のために使用した確率値マップをファイル読み込みする
def ReadProbMap(outputfile):
# 結果をファイル読み込み
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
PathWeightMap = np.loadtxt(output, delimiter=",")
print "Read PathWeightMap: " + output
return PathWeightMap
def SaveTransition(Transition, outputfile):
# 結果をファイル保存
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_log.csv"
#np.savetxt(outputfile + "_Transition_log.csv", Transition, delimiter=",")
f = open( output_transition , "w")
for i in xrange(len(Transition)):
for j in xrange(len(Transition[i])):
f.write(str(Transition[i][j]) + ",")
f.write('\n')
f.close()
print "Save Transition: " + output_transition
def ReadTransition(state_num, outputfile):
Transition = [[approx_log_zero for j in xrange(state_num)] for i in xrange(state_num)]
# 結果をファイル読み込み
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_log.csv"
#Transition = np.loadtxt(outputfile + "_Transition_log.csv", delimiter=",")
i = 0
#テキストファイルを読み込み
for line in open(output_transition, 'r'):
itemList = line[:-1].split(',')
for j in xrange(len(itemList)):
if itemList[j] != '':
Transition[i][j] = float(itemList[j])
i = i + 1
print "Read Transition: " + output_transition
return Transition
def SaveTransition_sparse(Transition, outputfile):
# 結果をファイル保存(.mtx形式)
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse"
mmwrite(output_transition, Transition)
print "Save Transition: " + output_transition
def ReadTransition_sparse(state_num, outputfile):
#Transition = [[0 for j in xrange(state_num)] for i in xrange(state_num)]
# 結果をファイル読み込み
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse.mtx"
Transition = mmread(output_transition).tocsr() #.todense()
print "Read Transition: " + output_transition
return Transition
##単語辞書読み込み書き込み追加
def WordDictionaryUpdate2(step, filename, W_list):
LIST = []
LIST_plus = []
i_best = len(W_list)
hatsuon = [ "" for i in xrange(i_best) ]
TANGO = []
##単語辞書の読み込み
for line in open('./lang_m/' + lang_init, 'r'):
itemList = line[:-1].split(' ')
LIST = LIST + [line]
for j in xrange(len(itemList)):
itemList[j] = itemList[j].replace("[", "")
itemList[j] = itemList[j].replace("]", "")
TANGO = TANGO + [[itemList[1],itemList[2]]]
#print TANGO
if (1):
##W_listの単語を順番に処理していく
for c in xrange(i_best): # i_best = len(W_list)
#W_list_sj = unicode(MI_best[c][i], encoding='shift_jis')
W_list_sj = unicode(W_list[c], encoding='shift_jis')
if len(W_list_sj) != 1: ##1文字は除外
#for moji in xrange(len(W_list_sj)):
moji = 0
while (moji < len(W_list_sj)):
flag_moji = 0
#print len(W_list_sj),str(W_list_sj),moji,W_list_sj[moji]#,len(unicode(W_list[i], encoding='shift_jis'))
for j in xrange(len(TANGO)):
if (len(W_list_sj)-2 > moji) and (flag_moji == 0):
#print TANGO[j],j
#print moji
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]+"_"+W_list_sj[moji+2]) and (W_list_sj[moji+1] == "_"):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 3
flag_moji = 1
for j in xrange(len(TANGO)):
if (len(W_list_sj)-1 > moji) and (flag_moji == 0):
#print TANGO[j],j
#print moji
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]+W_list_sj[moji+1]):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 2
flag_moji = 1
#print len(W_list_sj),moji
for j in xrange(len(TANGO)):
if (len(W_list_sj) > moji) and (flag_moji == 0):
#else:
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 1
flag_moji = 1
print W_list_sj,hatsuon[c]
else:
print W_list_sj, "(one name)" #W_list[c]
print JuliusVer,HMMtype
if (JuliusVer == "v4.4" and HMMtype == "DNN"):
#hatsuonのすべての単語の音素表記を"*_I"にする
for i in xrange(len(hatsuon)):
hatsuon[i] = hatsuon[i].replace("_S","_I")
hatsuon[i] = hatsuon[i].replace("_B","_I")
hatsuon[i] = hatsuon[i].replace("_E","_I")
#hatsuonの単語の先頭の音素を"*_B"にする
for i in xrange(len(hatsuon)):
#onsohyoki_index = onsohyoki.find(target)
hatsuon[i] = hatsuon[i].replace("_I","_B", 1)
#hatsuonの単語の最後の音素を"*_E"にする
hatsuon[i] = hatsuon[i][0:-2] + "E "
#hatsuonの単語の音素の例外処理(N,q)
hatsuon[i] = hatsuon[i].replace("q_S","q_I")
hatsuon[i] = hatsuon[i].replace("q_B","q_I")
hatsuon[i] = hatsuon[i].replace("N_S","N_I")
#print type(hatsuon),hatsuon,type("N_S"),"N_S"
##各場所の名前の単語ごとに
meishi = u'名詞'
meishi = meishi.encode('shift-jis')
##単語辞書ファイル生成
fp = open( filename + '/WDnavi.htkdic', 'w')
for list in xrange(len(LIST)):
if (list < 3):
fp.write(LIST[list])
#if (UseLM == 1):
if (1):
##新しい単語を追加
c = 0
for mi in xrange(i_best): # i_best = len(W_list)
if hatsuon[mi] != "":
if ((W_list[mi] in LIST_plus) == False): #同一単語を除外
flag_tango = 0
for j in xrange(len(TANGO)):
if(W_list[mi] == TANGO[j][0]):
flag_tango = -1
if flag_tango == 0:
LIST_plus = LIST_plus + [W_list[mi]]
fp.write(LIST_plus[c] + "+" + meishi +" [" + LIST_plus[c] + "] " + hatsuon[mi])
fp.write('\n')
c = c+1
fp.close()
########################################
if __name__ == '__main__':
print "[START] SpCoNavi."
#学習済みパラメータフォルダ名を要求
trialname = sys.argv[1]
#print trialname
#trialname = raw_input("trialname?(folder) >")
#読み込むパーティクル番号を要求
particle_num = sys.argv[2] #0
#ロボット初期位置の候補番号を要求
init_position_num = sys.argv[3] #0
#音声命令のファイル番号を要求
speech_num = sys.argv[4] #0
i = 0
#重みファイルを読み込み
for line in open(datafolder + trialname + '/'+ str(step) + '/weights.csv', 'r'): ##読み込む
if (i == 0):
MAX_Samp = int(line)
i += 1
#最大尤度のパーティクル番号を保存
particle_num = MAX_Samp
if (SAVE_time == 1):
#開始時刻を保持
start_time = time.time()
##FullPath of folder
filename = datafolder + trialname + "/" + str(step) +"/"
print filename, particle_num
outputfile = outputfolder + trialname + navigation_folder
outputname = outputfile + "T"+str(T_horizon)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
#Makedir( outputfolder + trialname )
Makedir( outputfile )
#Makedir( outputname )
#学習済みパラメータの読み込み #THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
THETA = ReadParameters(particle_num, filename)
W_index = THETA[1]
##単語辞書登録
if (os.path.isfile(filename + '/WDnavi.htkdic') == False): #すでに単語辞書ファイルがあれば作成しない
WordDictionaryUpdate2(step, filename, W_index)
else:
print "Word dictionary already exists:", filename + '/WDnavi.htkdic'
if (os.path.isfile(outputfile + "CostMapProb.csv") == False): #すでにファイルがあれば計算しない
##マップの読み込み
gridmap = ReadMap(outputfile)
##コストマップの読み込み
costmap = ReadCostMap(outputfile)
#コストマップを確率の形にする
CostMapProb = CostMapProb_jit(gridmap, costmap)
#確率化したコストマップの書き込み
SaveCostMapProb(CostMapProb, outputfile)
else:
#確率化したコストマップの読み込み
CostMapProb = ReadCostMapProb(outputfile)
##音声ファイルを読み込み
speech_file = ReadSpeech(int(speech_num))
if (SAVE_time == 1):
#音声認識開始時刻(初期化読み込み処理終了時刻)を保持
start_recog_time = time.time()
time_init = start_recog_time - start_time
fp = open( outputname + "_time_init.txt", 'w')
fp.write(str(time_init)+"\n")
fp.close()
#音声認識
S_Nbest = SpeechRecognition(speech_file, W_index, step, trialname, outputfile)
if (SAVE_time == 1):
#音声認識終了時刻(PP開始時刻)を保持
end_recog_time = time.time()
time_recog = end_recog_time - start_recog_time
fp = open( outputname + "_time_recog.txt", 'w')
fp.write(str(time_recog)+"\n")
fp.close()
#パスプランニング
Path, Path_ROS, PathWeightMap = PathPlanner(S_Nbest, X_candidates[int(init_position_num)], THETA, CostMapProb) #gridmap, costmap)
if (SAVE_time == 1):
#PP終了時刻を保持
end_pp_time = time.time()
time_pp = end_pp_time - end_recog_time
fp = open( outputname + "_time_pp.txt", 'w')
fp.write(str(time_pp)+"\n")
fp.close()
#パスの移動距離
#Distance = PathDistance(Path)
#パスを送る
#SendPath(Path)
#パスを保存
SavePath(X_candidates[int(init_position_num)], Path, Path_ROS, outputname)
#確率値マップを送る
#SendProbMap(PathWeightMap)
#確率値マップを保存(PathPlanner内部で実行)
#####SaveProbMap(PathWeightMap, outputname)
print "[END] SpCoNavi."
########################################
| [
"a.taniguchi@em.ci.ritsumei.ac.jp"
] | a.taniguchi@em.ci.ritsumei.ac.jp |
9102058651fbf91cbac1b616a121c35f0eb0973e | 8ab173ee437170afd5e4179f4e44d46b829f3ab0 | /Validation/RecoTrack/python/plotting/html.py | 04c09289f18ce2745bf5d1c2b56af89db89b9cc2 | [
"Apache-2.0"
] | permissive | suchandradutta/cmssw | 6b085313fe15868bd3f7dfddfb850debe111410e | ed3aa96ca24548294076d466db17b0bca44d1359 | refs/heads/Phase2Digitizer_91X_170420 | 2023-06-25T09:47:56.111691 | 2017-04-20T09:59:31 | 2017-04-20T09:59:31 | 12,500,444 | 1 | 1 | null | 2018-11-06T10:34:46 | 2013-08-31T04:15:48 | C++ | UTF-8 | Python | false | false | 25,818 | py | import os
import collections
def _lowerFirst(s):
return s[0].lower()+s[1:]
_sampleName = {
"RelValMinBias": "Min Bias",
"RelValTTbar": "TTbar",
"RelValQCD_Pt_600_800": "QCD Pt 600 to 800",
"RelValQCD_Pt_3000_3500": "QCD Pt 3000 to 3500",
"RelValQCD_FlatPt_15_3000": "QCD Flat Pt 15 to 3000",
"RelValZMM": "ZMuMu",
"RelValWjet_Pt_3000_3500": "Wjet Pt 3000 to 3500",
"RelValH125GGgluonfusion": "Higgs to gamma gamma",
"RelValSingleElectronPt35": "Single Electron Pt 35",
"RelValSingleElectronPt35Extended": "Single Electron Pt 35 (extended eta)",
"RelValSingleElectronPt10": "Single Electron Pt 10",
"RelValSingleMuPt10": "Single Muon Pt 10",
"RelValSingleMuPt10Extended": "Single Muon Pt 10 (extended eta)",
"RelValSingleMuPt100": "Single Muon Pt 100",
"RelValTenMuE_0_200": "Ten muon Pt 0-200",
}
_sampleFileName = {
"RelValMinBias": "minbias",
"RelValTTbar": "ttbar",
"RelValQCD_Pt_600_800": "qcd600",
"RelValQCD_Pt_3000_3500": "qcd3000",
"RelValQCD_FlatPt_15_3000": "qcdflat",
"RelValZMM": "zmm",
"RelValWjet_Pt_3000_3500": "wjet3000",
"RelValH125GGgluonfusion": "hgg",
"RelValSingleElectronPt35": "ele35",
"RelValSingleElectronPt35Extended": "ele35ext",
"RelValSingleElectronPt10": "ele10",
"RelValSingleMuPt10": "mu10",
"RelValSingleMuPt10Extended": "mu10ext",
"RelValSingleMuPt100": "mu100",
"RelValTenMuE_0_200": "tenmu200",
}
_allTPEfficName = "All tracks (all TPs)"
_fromPVName = "Tracks from PV"
_fromPVAllTPName = "Tracks from PV (all TPs)"
_conversionName = "Tracks for conversions"
_gsfName = "Electron GSF tracks"
def _toHP(s):
return "High purity "+_lowerFirst(s)
def _allToHP(s):
return s.replace("All", "High purity")
def _ptCut(s):
return s.replace("Tracks", "Tracks pT > 0.9 GeV").replace("tracks", "tracks pT > 0.9 GeV")
_trackQualityNameOrder = collections.OrderedDict([
("seeding_seeds", "Seeds"),
("seeding_seedsa", "Seeds A"),
("seeding_seedsb", "Seeds B"),
("seeding_seedstripl", "Seeds triplets"),
("seeding_seedspair", "Seeds pairs"),
("building_", "Built tracks"),
("", "All tracks"),
("highPurity", "High purity tracks"),
("Pt09", "Tracks pT > 0.9 GeV"),
("highPurityPt09", "High purity tracks pT > 0.9 GeV"),
("ByOriginalAlgo", "All tracks by originalAlgo"),
("highPurityByOriginalAlgo", "High purity tracks by originalAlgo"),
("ByAlgoMask", "All tracks by algoMask"),
("highPurityByAlgoMask", "High purity tracks by algoMask"),
("btvLike", "BTV-like"),
("ak4PFJets", "AK4 PF jets"),
("allTPEffic_", _allTPEfficName),
("allTPEffic_highPurity", _allToHP(_allTPEfficName)),
("fromPV_", _fromPVName),
("fromPV_highPurity", _toHP(_fromPVName)),
("fromPV_Pt09", _ptCut(_fromPVName)),
("fromPV_highPurityPt09", _toHP(_ptCut(_fromPVName))),
("fromPVAllTP_", _fromPVAllTPName),
("fromPVAllTP_highPurity", _toHP(_fromPVAllTPName)),
("fromPVAllTP_Pt09", _ptCut(_fromPVAllTPName)),
("fromPVAllTP_highPurityPt09", _toHP(_ptCut(_fromPVAllTPName))),
("fromPVAllTP2_", _fromPVAllTPName.replace("PV", "PV v2")),
("fromPVAllTP2_highPurity", "High purity "+_lowerFirst(_fromPVAllTPName).replace("PV", "PV v2")),
("fromPVAllTP2_Pt09", _fromPVAllTPName.replace("Tracks", "Tracks pT > 0.9 GeV").replace("PV", "PV v2")),
("fromPVAllTP2_highPurityPt09", _toHP(_ptCut(_fromPVAllTPName)).replace("PV", "PV v2")),
("conversion_", _conversionName),
("gsf_", _gsfName),
])
_trackAlgoName = {
"ootb": "Out of the box",
"iter0" : "Iterative Step 0",
"iter1" : "Iterative Step 1",
"iter2" : "Iterative Step 2",
"iter3" : "Iterative Step 3",
"iter4" : "Iterative Step 4",
"iter5" : "Iterative Step 5",
"iter6" : "Iterative Step 6",
"iter7" : "Iterative Step 7",
"iter9" : "Iterative Step 9",
"iter10": "Iterative Step 10",
}
_trackAlgoOrder = [
'ootb',
'initialStepPreSplitting',
'initialStep',
'highPtTripletStep',
'detachedQuadStep',
'detachedTripletStep',
'lowPtQuadStep',
'lowPtTripletStep',
'pixelPairStep',
'mixedTripletStep',
'pixelLessStep',
'tobTecStep',
'jetCoreRegionalStep',
'muonSeededStepInOut',
'muonSeededStepOutIn',
'duplicateMerge',
'convStep',
'conversionStep',
'ckfInOutFromConversions',
'ckfOutInFromConversions',
'electronGsf',
'iter0',
'iter1',
'iter2',
'iter3',
'iter4',
'iter5',
'iter6',
'iter7',
'iter9',
'iter10',
]
_pageNameMap = {
"summary": "Summary",
"vertex": "Vertex",
"v0": "V0",
"miniaod": "MiniAOD",
"timing": "Timing",
"hlt": "HLT",
}
_sectionNameMapOrder = collections.OrderedDict([
# These are for the summary page
("seeding_seeds", "Seeds"),
("building", "Built tracks"),
("", "All tracks"),
("highPurity", "High purity tracks"),
("btvLike", "BTV-like"),
("ak4PFJets", "AK4 PF jets"),
("allTPEffic", _allTPEfficName),
("allTPEffic_highPurity", _allTPEfficName.replace("All", "High purity")),
("fromPV", _fromPVName),
("fromPV_highPurity", "High purity "+_lowerFirst(_fromPVName)),
("fromPVAllTP", _fromPVAllTPName),
("fromPVAllTP_highPurity", "High purity "+_lowerFirst(_fromPVAllTPName)),
("conversion", _conversionName),
("gsf", _gsfName),
# These are for vertices
("genvertex", "Gen vertices"),
("pixelVertices", "Pixel vertices"),
("selectedPixelVertices", "Selected pixel vertices"),
("firstStepPrimaryVerticesPreSplitting", "firstStepPrimaryVerticesPreSplitting"),
("firstStepPrimaryVertices", "firstStepPrimaryVertices"),
("offlinePrimaryVertices", "All vertices (offlinePrimaryVertices)"),
("selectedOfflinePrimaryVertices", "Selected vertices (selectedOfflinePrimaryVertices)"),
("offlinePrimaryVerticesWithBS", "All vertices with BS constraint"),
("selectedOfflinePrimaryVerticesWithBS", "Selected vertices with BS constraint"),
# These are for V0
("k0", "K0"),
("lambda", "Lambda"),
])
_allTPEfficLegend = "All tracks, efficiency denominator contains all TrackingParticles"
_fromPVLegend = "Tracks from reco PV vs. TrackingParticles from gen PV (fake rate includes pileup tracks)"
_fromPVPtLegend = "Tracks (pT > 0.9 GeV) from reco PV vs. TrackingParticles from gen PV (fake rate includes pileup tracks)"
_fromPVAllTPLegend = "Tracks from reco PV, fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTPPtLegend = "Tracks (pT > 0.9 GeV) from reco PV, fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTP2Legend = "Tracks from reco PV (another method), fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTPPt2Legend = "Tracks (pT > 0.9 GeV) from reco PV (another method), fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
def _sectionNameLegend():
return {
"btvLike": "BTV-like selected tracks",
"ak4PFJets": "Tracks from AK4 PF jets (jet corrected pT > 10 GeV)",
"allTPEffic": _allTPEfficLegend,
"allTPEffic_": _allTPEfficLegend,
"allTPEffic_highPurity": _allToHP(_allTPEfficLegend),
"fromPV": _fromPVLegend,
"fromPV_": _fromPVLegend,
"fromPV_highPurity": _toHP(_fromPVLegend),
"fromPV_Pt09": _fromPVPtLegend,
"fromPV_highPurity_Pt09": _toHP(_fromPVPtLegend),
"fromPVAllTP": _fromPVAllTPLegend,
"fromPVAllTP_": _fromPVAllTPLegend,
"fromPVAllTP_highPurity": _toHP(_fromPVAllTPLegend),
"fromPVAllTP_Pt09": _fromPVAllTPPtLegend,
"fromPVAllTP_highPurityPt09": _toHP(_fromPVAllTPPtLegend),
"fromPVAllTP2_": _fromPVAllTP2Legend,
"fromPVAllTP2_highPurity": _toHP(_fromPVAllTP2Legend),
"fromPVAllTP2_Pt09": _fromPVAllTPPt2Legend,
"fromPVAllTP2_highPurityPt09": _toHP(_fromPVAllTPPt2Legend),
}
class Table:
# table [column][row]
def __init__(self, columnHeaders, rowHeaders, table, purpose, page, section):
if len(columnHeaders) != len(table):
raise Exception("Got %d columnHeaders for table with %d columns for page %s, section %s" % (len(columnHeaders), len(table), page, section))
lenRow = len(table[0])
for icol, column in enumerate(table):
if len(column) != lenRow:
raise Exception("Got non-square table, first column has %d rows, column %d has %d rows" % (lenRow, icol, len(column)))
if len(rowHeaders) != lenRow:
raise Exception("Got %d rowHeaders for table with %d rows" % (len(rowHeaders), lenRow))
self._columnHeaders = columnHeaders
self._rowHeaders = rowHeaders
self._table = table
self._purpose = purpose
self._page = page
self._section = section
def getPurpose(self):
return self._purpose
def getPage(self):
return self._page
def getSection(self):
return self._section
def ncolumns(self):
return len(self._table)
def nrows(self):
return len(self._table[0])
def columnHeaders(self):
return self._columnHeaders
def rowHeaders(self):
return self._rowHeaders
def tableAsColumnRow(self):
return self._table
def tableAsRowColumn(self):
return map(list, zip(*self._table))
class PlotPurpose:
class TrackingIteration: pass
class TrackingSummary: pass
class Vertexing: pass
class MiniAOD: pass
class Timing: pass
class HLT: pass
class Page(object):
def __init__(self, title, sampleName):
self._content = [
'<html>',
' <head>',
' <title>%s</title>' % title,
' </head>',
' <body>',
' '+sampleName,
' <br/>',
' <br/>',
]
self._plotSets = {}
self._tables = {}
def addPlotSet(self, section, plotSet):
if section in self._plotSets:
self._plotSets[section].extend(plotSet)
else:
self._plotSets[section] = plotSet
def addTable(self, section, table):
self._tables[section] = table
def isEmpty(self):
for plotSet in self._plotSets.itervalues():
if len(plotSet) > 0:
return False
if len(self._tables) > 0:
return False
return True
def write(self, fileName):
self._legends = []
self._sectionLegendIndex = {}
self._columnHeaders = []
self._columnHeadersIndex = {}
self._formatPlotSets()
self._formatTables()
self._formatLegend()
self._content.extend([
' </body>',
'</html>',
])
#print "Writing HTML report page", fileName
f = open(fileName, "w")
for line in self._content:
f.write(line)
f.write("\n")
f.close()
def _appendLegend(self, section):
leg = ""
legends = _sectionNameLegend()
if section in legends:
if section in self._sectionLegendIndex:
leg = self._sectionLegendIndex[section]
else:
legnum = len(self._legends)+1
leg = "<sup>%d</sup>" % legnum
leg2 = "<sup>%d)</sup>" % legnum
self._legends.append("%s %s" % (leg2, legends[section]))
self._sectionLegendIndex[section] = leg
return leg
def _formatPlotSets(self):
self._content.extend([
' <table>'
' <tr>',
])
fileTable = []
sections = self._orderSets(self._plotSets.keys())
for isec, section in enumerate(sections):
leg = self._appendLegend(section)
self._content.extend([
' <td>%s%s</td>' % (self._mapSectionName(section), leg),
])
files = [(os.path.basename(f), f) for f in self._plotSets[section]]
for row in fileTable:
found = False
for i, (bsf, f) in enumerate(files):
if bsf == row[0]:
row.append(f)
found = True
del files[i]
break
if not found:
row.append(None)
for bsf, f in files:
fileTable.append( [bsf] + [None]*isec + [f] )
self._content.extend([
' </tr>',
])
for row in fileTable:
self._content.append(' <tr>')
bs = row[0]
for elem in row[1:]:
if elem is not None:
self._content.append(' <td><a href="%s">%s</a></td>' % (elem, bs))
else:
self._content.append(' <td></td>')
self._content.append(' </tr>')
self._content.extend([
' </table>',
])
def _appendColumnHeader(self, header):
leg = ""
if header in self._columnHeadersIndex:
leg = self._columnHeadersIndex[header]
else:
leg = str(chr(ord('A')+len(self._columnHeaders)))
self._columnHeaders.append("%s: %s" % (leg, header))
self._columnHeadersIndex[header] = leg
return leg
def _formatTables(self):
def _allNone(row):
for item in row:
if item is not None:
return False
return True
sections = self._orderSets(self._tables.keys())
for isec, section in enumerate(sections):
leg = self._appendLegend(section)
table = self._tables[section]
self._content.extend([
' <br/>',
' %s%s' % (self._mapSectionName(section), leg),
' <table border="1">'
])
# table is stored in column-row, need to transpose
data = table.tableAsRowColumn()
self._content.extend([
' <tr>'
' <td></td>'
])
heads = table.columnHeaders()
if max(map(lambda h: len(h), heads)) > 20:
heads = [self._appendColumnHeader(h) for h in heads]
for head in heads:
self._content.append(' <td>%s</td>' % head)
self._content.append(' </tr>')
for irow, row in enumerate(data):
# Skip row if all values are non-existent
if _allNone(row):
continue
self._content.extend([
' <tr>'
' <td>%s</td>' % table.rowHeaders()[irow]
])
# align the number columns to right
for icol, item in enumerate(row):
formatted = str(item) if item is not None else ""
self._content.append(' <td align="right">%s</td>' % formatted)
self._content.append(' </tr>')
self._content.append(' </table>')
for shortenedColumnHeader in self._columnHeaders:
self._content.append(' %s<br/>' % shortenedColumnHeader)
self._columnHeaders = []
self._columnHeadersIndex = {}
def _formatLegend(self):
if len(self._legends) > 0:
self._content.extend([
' <br/>'
' Details:</br>',
])
for leg in self._legends:
self._content.append(' %s<br/>' % leg)
def _mapSectionName(self, section):
return _sectionNameMapOrder.get(section, section)
def _orderSets(self, keys):
keys_sorted = sorted(keys)
ret = []
for section in _sectionNameMapOrder.keys():
if section in keys_sorted:
ret.append(section)
keys.remove(section)
ret.extend(keys_sorted)
return ret
class PageSet(object):
def __init__(self, title, sampleName, sample, fastVsFull, pileupComparison, dqmSubFolderTranslatedToSectionName=None):
self._title = title
self._sampleName = sampleName
self._pages = collections.OrderedDict()
self._dqmSubFolderTranslatedToSectionName = dqmSubFolderTranslatedToSectionName
self._prefix = ""
if sample.fastsim():
self._prefix += "fast_"
if fastVsFull:
self._prefix += "full_"
self._prefix += _sampleFileName.get(sample.label(), sample.label())+"_"
if hasattr(sample, "hasScenario") and sample.hasScenario():
self._prefix += sample.scenario()+"_"
if hasattr(sample, "hasPileup"):
if sample.hasPileup():
self._prefix += "pu"+str(sample.pileupNumber())+"_"+sample.pileupType()+"_"
else:
self._prefix += "nopu_"
if pileupComparison:
self._prefix += "vspu_"
def _getPage(self, key, pageClass):
if key not in self._pages:
page = pageClass(self._title, self._sampleName)
self._pages[key] = page
else:
page = self._pages[key]
return page
def addPlotSet(self, plotterFolder, dqmSubFolder, plotFiles):
pageKey = plotterFolder.getPage()
if pageKey is None:
if dqmSubFolder is not None:
pageKey = dqmSubFolder.translated
else:
pageKey = plotterFolder.getName()
page = self._getPage(pageKey, Page)
sectionName = plotterFolder.getSection()
if sectionName is None:
if plotterFolder.getPage() is not None and dqmSubFolder is not None:
if self._dqmSubFolderTranslatedToSectionName is not None:
sectionName = self._dqmSubFolderTranslatedToSectionName(dqmSubFolder.translated)
else:
sectionName = dqmSubFolder.translated
else:
sectionName = ""
page.addPlotSet(sectionName, plotFiles)
def addTable(self, table):
if table is None:
return
page = self._getPage(table.getPage(), Page)
page.addTable(table.getSection(), table)
def write(self, baseDir):
#print "TrackingPageSet.write"
ret = []
keys = self._orderPages(self._pages.keys())
for key in keys:
page = self._pages[key]
if page.isEmpty():
continue
fileName = "%s%s.html" % (self._prefix, key)
page.write(os.path.join(baseDir, fileName))
ret.append( (self._mapPagesName(key), fileName) )
return ret
def _mapPagesName(self, name):
return _pageNameMap.get(name, name)
def _orderPages(self, keys):
return keys
class TrackingIterPage(Page):
def __init__(self, *args, **kwargs):
super(TrackingIterPage, self).__init__(*args, **kwargs)
def _mapSectionName(self, quality):
return _trackQualityNameOrder.get(quality, quality)
def _orderSets(self, qualities):
ret = []
for qual in _trackQualityNameOrder.keys():
if qual in qualities:
ret.append(qual)
qualities.remove(qual)
ret.extend(qualities)
return ret
class TrackingPageSet(PageSet):
def __init__(self, *args, **kwargs):
super(TrackingPageSet, self).__init__(*args, **kwargs)
def addPlotSet(self, plotterFolder, dqmSubFolder, plotFiles):
(algo, quality) = dqmSubFolder.translated
pageName = algo
sectionName = quality
# put all non-iterative stuff under OOTB
#
# it is bit of a hack to access trackingPlots.TrackingPlotFolder this way,
# but it was simple and it works
if algo != "ootb" and not plotterFolder._plotFolder.isAlgoIterative(algo):
pageName = "ootb"
sectionName = algo
folderName = plotterFolder.getName()
if folderName != "":
sectionName = folderName+"_"+sectionName
page = self._getPage(pageName, TrackingIterPage)
page.addPlotSet(sectionName, plotFiles)
def _mapPagesName(self, algo): # algo = pageName
return _trackAlgoName.get(algo, algo)
def _orderPages(self, algos):
ret = []
for algo in _trackAlgoOrder:
if algo in algos:
ret.append(algo)
algos.remove(algo)
ret.extend(algos)
return ret
class IndexSection:
def __init__(self, sample, title, fastVsFull, pileupComparison):
self._sample = sample
self._sampleName = ""
if sample.fastsim():
self._sampleName += "FastSim "
if fastVsFull:
self._sampleName += "vs FullSim "
pileup = ""
if hasattr(sample, "hasPileup"):
pileup = "with no pileup"
if sample.hasPileup():
pileup = "with %d pileup (%s)" % (sample.pileupNumber(), sample.pileupType())
if pileupComparison is not None:
pileup += " "+pileupComparison
if hasattr(sample, "customPileupLabel"):
pileup = sample.customPileupLabel()
scenario = ""
if hasattr(sample, "hasScenario") and sample.hasScenario():
scenario = " (\"%s\")" % sample.scenario()
self._sampleName += "%s sample%s %s" % (_sampleName.get(sample.name(), sample.name()), scenario, pileup)
params = [title, self._sampleName, sample, fastVsFull, pileupComparison is not None]
self._summaryPage = PageSet(*params)
self._iterationPages = TrackingPageSet(*params)
self._vertexPage = PageSet(*params)
self._miniaodPage = PageSet(*params)
self._timingPage = PageSet(*params)
self._hltPages = PageSet(*params, dqmSubFolderTranslatedToSectionName=lambda algoQuality: algoQuality[0])
self._otherPages = PageSet(*params)
self._purposePageMap = {
PlotPurpose.TrackingIteration: self._iterationPages,
PlotPurpose.TrackingSummary: self._summaryPage,
PlotPurpose.Vertexing: self._vertexPage,
PlotPurpose.MiniAOD: self._miniaodPage,
PlotPurpose.Timing: self._timingPage,
PlotPurpose.HLT: self._hltPages,
}
def addPlots(self, plotterFolder, dqmSubFolder, plotFiles):
page = self._purposePageMap.get(plotterFolder.getPurpose(), self._otherPages)
page.addPlotSet(plotterFolder, dqmSubFolder, plotFiles)
def addTable(self, table):
if table is None:
return
page = self._purposePageMap.get(table.getPurpose(), self._otherPages)
page.addTable(table)
params = []
def write(self, baseDir):
ret = [
" "+self._sampleName,
" <br/>",
" <ul>",
]
for pages in [self._summaryPage, self._iterationPages, self._vertexPage, self._miniaodPage, self._timingPage, self._hltPages, self._otherPages]:
labelFiles = pages.write(baseDir)
for label, fname in labelFiles:
ret.append(' <li><a href="%s">%s</a></li>' % (fname, label))
ret.extend([
' </ul>',
' <br/>',
])
return ret
class HtmlReport:
def __init__(self, validationName, newBaseDir):
self._title = "Tracking validation "+validationName
self._newBaseDir = newBaseDir
self._index = [
'<html>',
' <head>',
' <title>%s</title>' % self._title,
' </head>',
' <body>',
]
self._sections = collections.OrderedDict()
def addNote(self, note):
self._index.append(' <p>%s</p>'%note)
def beginSample(self, sample, fastVsFull=False, pileupComparison=None):
# Fast vs. Full becomes just after the corresponding Fast
# Same for PU
rightAfterRefSample = fastVsFull or (pileupComparison is not None)
key = (sample.digest(), rightAfterRefSample)
if key in self._sections:
self._currentSection = self._sections[key]
else:
self._currentSection = IndexSection(sample, self._title, fastVsFull, pileupComparison)
self._sections[key] = self._currentSection
def addPlots(self, *args, **kwargs):
self._currentSection.addPlots(*args, **kwargs)
def addTable(self, *args, **kwargs):
self._currentSection.addTable(*args, **kwargs)
def write(self):
# Reorder sections such that Fast vs. Full becomes just after the corresponding Fast
keys = self._sections.iterkeys()
newkeys = []
for key in keys:
if not key[1]:
newkeys.append(key)
continue
# is fast vs full
ind_fast = newkeys.index( (key[0], False) )
newkeys.insert(ind_fast+1, key)
for key in newkeys:
section = self._sections[key]
self._index.extend(section.write(self._newBaseDir))
self._index.extend([
" </body>",
"</html>",
])
f = open(os.path.join(self._newBaseDir, "index.html"), "w")
for line in self._index:
f.write(line)
f.write("\n")
f.close()
class HtmlReportDummy:
def __init__(self):
pass
def beginSample(self, *args, **kwargs):
pass
def addPlots(self, *args, **kwargs):
pass
def addTable(self, *args, **kwargs):
pass
| [
"matti.kortelainen@cern.ch"
] | matti.kortelainen@cern.ch |
22eb63305890280ff00427e395dc7ee12f3f314c | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4330/codes/1594_1800.py | 0302eb5caf63f16066aa6406b53455d42458aa87 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | a=int(input("Insira o valor de A"))
b=int(input("Insira o valor de B"))
c=int(input("Insira o valor de C"))
x = (a**2)+(b**2)+(c**2)
y = a+b+c
t=x/y
print(round(t,7))
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
aea6c3de83c23b1dc7a355b74d2d31cefade985e | dd9f643d5833a3a4707a08eac38e30e03819a5f8 | /scomp/migrations/0014_blogmodel_blogparamodel.py | 8b5a0b8680eb54ca0f8280f9415cd36845bdfcdc | [] | no_license | Ashwary-Jharbade/services | e65e99be5508c9854797124f0392c2d32477ee7a | 2e514117e374fee4feef908e85cf8853f830f390 | refs/heads/master | 2023-03-04T10:10:35.499854 | 2021-02-18T19:12:16 | 2021-02-18T19:12:16 | 304,116,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | # Generated by Django 3.0.4 on 2020-08-27 07:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scomp', '0013_servicemodel_desc'),
]
operations = [
migrations.CreateModel(
name='BlogModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now_add=True)),
('image', models.FileField(blank=True, null=True, upload_to='blogimages')),
('blogauthor', models.CharField(max_length=30)),
('aboutauthor', models.CharField(max_length=200)),
('intropara', models.CharField(max_length=150)),
('content', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='BlogParaModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('image', models.FileField(blank=True, null=True, upload_to='blogimages')),
('content', models.CharField(max_length=500)),
('date', models.DateTimeField(auto_now_add=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scomp.BlogModel')),
],
),
]
| [
"ashwary.jharbade999@gmail.com"
] | ashwary.jharbade999@gmail.com |
7b4f54884801a64b393131a5a772f15a7ccfe5aa | 45a70554091ea06afc63d86ddb2724053513189b | /dataset/__init__.py | 6db9f41c3b178cc49530593183e883df9f08deb2 | [] | no_license | mponza/keyphrase-annotation | d10705c2ccf9ae7b2d2e3d8aa1901460de564976 | 14abbd4ebcf449f65f1b1c392235b55eb051005b | refs/heads/master | 2021-06-13T00:21:49.108263 | 2017-01-04T14:09:00 | 2017-01-04T14:09:00 | 77,946,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from duc import DUCDataset
from icsi import ICSIASRDataset, ICSIHumanTranscriptDataset
from inspec import InspectTrainingDataset, InspectValidationDataset, \
InspectTestDataset
from nus import NUSDataset
def make_dataset(dataset_name):
"""
Factory-style method for getting dataset from string name.
"""
return {
'duc': DUCDataset(),
'icsi-asr': ICSIASRDataset(),
'icsi-ht': ICSIHumanTranscriptDataset(),
'inspec-train': InspectTrainingDataset(),
'inspec-val': InspectValidationDataset(),
'inspec-test': InspectTestDataset(),
'nus': NUSDataset()
}[dataset_name]
| [
"mponza@gmail.com"
] | mponza@gmail.com |
abc7888375db7b5790e14fedfa8dedb11c05d33e | d2e6823851e236312e4b99acca53a293dff5b1a7 | /BlogApp/managers.py | eedf596bd527a60656aea2a3e09bf9a12dcf89b5 | [
"Apache-2.0"
] | permissive | devvspaces/nimrodage | f061378692e94b8dc9b15ae2f3fdcd587bfdfe1d | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | refs/heads/main | 2023-05-09T03:41:22.345841 | 2021-06-04T03:47:56 | 2021-06-04T03:47:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from django.db import models
class PostQuery(models.QuerySet):
def get_authors_post(self, username):
return self.filter(author__username=username)
class PostManager(models.Manager):
def get_queryset(self):
return PostQuery(model=self.model, using=self._db)
def get_posts(self, username):
return self.get_queryset().get_authors_post(username) | [
"netrobeweb@gmail.com"
] | netrobeweb@gmail.com |
0efac6e3d7417f91494c5e8208d5faffb89d643d | 5edf72a6e86f35fb00a34670a6f9ca61d96c2db8 | /kmeans.py | 11b764d161a4957b8d65b733c9e8080f2df3de7d | [] | no_license | isabellekazarian/kmeans-algorithm | b94008ed80ec198f2d8cb7def960025dfdfad53e | 7378cdf56f1c51c953c34f1e11c76499850d07c1 | refs/heads/master | 2023-03-11T19:37:57.830724 | 2021-02-08T18:39:13 | 2021-02-08T18:39:13 | 337,162,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py |
import matplotlib.pyplot as plt
import numpy
import csv
import random
from collections import defaultdict
DATA_FILE = './dataBoth.csv'
NUM_CLUSTERS = 4
NUM_ITERATIONS = 4
# returns distance between two data points
def distance (x1, y1, x2, y2):
square = pow((x2 - x1), 2) + pow((y2 - y1), 2)
return pow(square, 0.5)
# returns data from the csv file as a list of values
# 2D list with values: [country, birth_rate, life_expectancy]
def readCsv(file_name):
data = []
with open(file_name) as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
next(readCSV)
# create point and add to data dict
for row in readCSV:
country = row[0]
birth_rate = float(row[1])
life_expectancy = float(row[2])
data.append([country, birth_rate, life_expectancy])
return data
# returns index of centroid in centroid list closest to the given point
# takes a 2D array of centroids and a point as an (x,y) array
def getClosestCentroid(centroids, point):
if(point in centroids):
return point
px, py = point[1], point[2]
distances = []
# calculate each distance
for centroid in centroids:
cx, cy = centroid[0], centroid[1]
distances.append(distance(px,py,cx,cy))
# find min distance & return centroid
min_distance = numpy.amin(distances)
dist_sum = pow(min_distance, 2)
min_index = (numpy.where(distances == min_distance))[0][0]
return centroids[min_index], dist_sum
# returns randomly generated list of centroids
def initializeCentroids(data):
samples = random.sample(list(data), NUM_CLUSTERS)
centroids = []
# create xy point for each data sample
for sample in samples:
centroids.append([sample[1], sample[2]])
#return centroid list
return centroids
# returns mean-calculated list of centroids
def calculateNewCentroidsFromMean(clusters):
centroids = []
# for each cluster, calculate mean of all points
for cluster in clusters:
x_sum, y_sum = 0, 0
size = len(cluster)
# points are an array list item for each cluster
for point in cluster:
x_sum += point[1]
y_sum += point[2]
mean_point = [(x_sum / size), (y_sum / size)]
centroids.append(mean_point)
return centroids
# scatter plot of all clusters
def plotClusters(clusters, centroids):
points = [[] for i in range(0, NUM_CLUSTERS)]
colors = ["Blue", "Green", "Pink", "Red", "Orange", "Purple", "Gray"]
# plot each point by cluster
for cluster in range(0, len(clusters)):
for point in clusters[cluster]:
plt.scatter(point[1], point[2], c = colors[cluster])
# Plot centroids
centroids_x, centroids_y = zip(*centroids)
plt.scatter(centroids_x, centroids_y, s=80, c='black')
plt.show()
# read data file
data = readCsv(DATA_FILE)
# initialize
centroids = initializeCentroids(data)
clusters = [[] for x in range(NUM_CLUSTERS)]
# run iterations
for i in range(0, NUM_ITERATIONS):
# initialize
clusters = [[] for x in range(NUM_CLUSTERS)]
dist_sum = 0
# for each point find closest centroid and add to cluster
for point in data:
# get closest centroid index
closest_centroid, dist_sq = getClosestCentroid(centroids, point)
dist_sum += dist_sq
# add point to the cluster for corresponding centroid
closest_centroid_index = centroids.index(closest_centroid)
clusters[closest_centroid_index].append(point)
# visualize clusters
plotClusters(clusters, centroids)
# print distance sum
print("Sum of distances: " + str(dist_sum))
# get new centroids
centroids = calculateNewCentroidsFromMean(clusters)
# print results ---------------------------------
for cluster in range(0, len(clusters)):
countries = []
num_points = 0
sum_life_expectancy = 0
sum_birth_rate = 0
for point in clusters[cluster]:
num_points += 1
sum_birth_rate += point[1]
sum_life_expectancy += point[2]
countries.append(point[0])
print()
print("Cluster: " + str(cluster))
print("Mean life expectancy: " + str(sum_life_expectancy / num_points))
print("Mean birth rate: " + str(sum_birth_rate / num_points))
print("Number of countries: " + str(num_points))
print()
print("Countries:")
for country in countries:
print(country)
| [
"rosekaz13@gmail.com"
] | rosekaz13@gmail.com |
49735df185c393c2ec9eacd961fb3a5fade967e1 | b3a9740a5099c490c2f21ca9a9bbf507ad2bd2bf | /blog/apps/aricle/migrations/0002_auto_20190725_1259.py | edfa89f55b029a422eb06d9951b52e75e5453e68 | [] | no_license | JummyWu/drf_blog | c27d39237fa0953650d092f40cfcc73beb238652 | b192485ad460eb1f05322dd09d0e97a63c476d4f | refs/heads/master | 2022-12-08T11:40:41.594820 | 2019-08-24T02:13:26 | 2019-08-24T02:13:26 | 164,213,480 | 1 | 0 | null | 2022-12-08T01:48:34 | 2019-01-05T12:36:00 | Python | UTF-8 | Python | false | false | 1,515 | py | # Generated by Django 2.1.5 on 2019-07-25 12:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('aricle', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='tag',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='category',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='aricle',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='aricle.Category', verbose_name='分类'),
),
migrations.AddField(
model_name='aricle',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='aricle',
name='tags',
field=models.ManyToManyField(related_name='posts', to='aricle.Tag', verbose_name='标签'),
),
]
| [
"929440925@qq.com"
] | 929440925@qq.com |
f931f93487dee0b1d116ef38d52fa5222198b620 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /345. 反转字符串中的元音字母.py | f259c3af854c1e4b250ef47b593bf61f4f86067c | [] | no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | """
编写一个函数,以字符串作为输入,反转该字符串中的元音字母。
"""
def reverseVowels(s):
query = "aeiouAEIOU"
vow = []
idx = []
for i, j in enumerate(s):
if j in query:
vow.append(j)
idx.append(i)
vow = vow[::-1]
s = list(s)
for i, j in zip(idx, vow):
s[i] = j
return "".join(s)
print(reverseVowels("hello"))
print(reverseVowels("leetcode")) | [
"xlfeng886@163.com"
] | xlfeng886@163.com |
f2ab8dfb0b4f100d21c732dc63482a3816c4a33e | 9629daa92c3a002dcfb5e81ba1870c8bf22c4ae3 | /Shop/forms.py | 721799ae0e2fa7dfb43d4f8c56df47d172623992 | [
"MIT"
] | permissive | forhadsidhu/Django-E-Commerce-App | ce61e15836a9dd4d808b52768ab4d592c0f7890f | 5c07b0c2e562fc0bb8dcc6803a7595b889ea8954 | refs/heads/master | 2023-03-16T14:58:06.391665 | 2021-03-15T10:50:00 | 2021-03-15T10:50:00 | 248,206,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | from django import forms
from .models import ImageUploadModel,Post
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
# Now create customized user creation form like for adding email field in default django form.
class CreateUserform(UserCreationForm):
# Meta class is simply inner class
#add image field
# image = forms.ImageField()
class Meta:
model = User
fields = ['username','email','password1','password2']
class Rev(forms.ModelForm):
class Meta:
model = Post
fields=['review'] | [
"forhadsidhu@gmail.com"
] | forhadsidhu@gmail.com |
f316549e5a2ecc6bd4a40922f52af9d83adf665c | 55e79a84cc8f416ef354c9457f53ba0ddf1dde09 | /tweets/migrations/0003_auto_20200120_1407.py | 107537f578f2235605ad5eb7b08cfa7cced2601d | [] | no_license | montionugera/twitter-api-drf | fdb9935b924ca406a4d472b0d38a13c06988bd7d | e1a6e7d4e88b8946548f7c7a301061871e65206c | refs/heads/master | 2020-12-14T23:50:41.866101 | 2020-01-20T15:01:34 | 2020-01-20T15:01:34 | 234,916,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # Generated by Django 3.0.2 on 2020-01-20 14:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tweets', '0002_remove_tweet_pub_date'),
]
operations = [
migrations.AlterModelOptions(
name='tweet',
options={'ordering': ['created_at']},
),
]
| [
"montionugera@gmail.com"
] | montionugera@gmail.com |
80bd00d96dd1eb06ab47528bd9e4e22aa23bbe46 | 152eae1f0febe35268c65b80ac218480486f2123 | /py/my_collections/test/test_sorting.py | 44a05787466dec2edb9df712b10793728e81a659 | [] | no_license | Crescent617/code-practice | a02b65516d296db15e72b2c2d1412f5befd7034f | f3dd8a3cf0bc9b1d00ed37793d02f1a89d8d5a96 | refs/heads/master | 2023-06-28T02:30:30.987862 | 2023-06-22T14:41:08 | 2023-06-22T14:41:08 | 218,738,764 | 0 | 0 | null | 2023-06-22T14:41:09 | 2019-10-31T10:17:55 | Python | UTF-8 | Python | false | false | 1,378 | py | from sorting import *
from dllist import DoubleLinkedList
from random import randint
from cProfile import Profile
max_num = 5000
def random_list(count):
numbers = DoubleLinkedList()
for i in range(0, count):
numbers.shift(randint(0, 10000))
return numbers
def is_sorted(numbers):
node = numbers.begin.next
while node:
if node.prev.value > node.value:
return False
else:
node = node.next
return True
def test_bubble():
numbers = random_list(max_num)
bubble_sort(numbers)
assert is_sorted(numbers)
def test_merge():
numbers = random_list(max_num)
merge_sort(numbers)
assert is_sorted(numbers)
def test_quick():
numbers = [randint(0, 10000) for i in range(max_num)]
quick_sort(numbers, 0, max_num-1)
i = 1
while i < max_num:
assert numbers[i] >= numbers[i-1]
i += 1
def test_all():
numbers = [randint(0, 10000) for i in range(max_num)]
numbers_m = DoubleLinkedList()
numbers_b = DoubleLinkedList()
for i in numbers:
numbers_m.shift(i)
numbers_b.shift(i)
quick_sort(numbers, 0, max_num-1)
merge_sort(numbers_m)
bubble_sort(numbers_b)
if __name__ == '__main__':
prof = Profile()
prof.enable()
test_all()
prof.create_stats()
prof.print_stats('sorting.py', sort="cumulative")
| [
"lihuaru617@outlook.com"
] | lihuaru617@outlook.com |
d2feecf8c86df8ebfbb1a826ff254b1b98455ddf | 4ac6c008882c1a7321bf9477ba532b88bb113741 | /ThiNet_TPAMI/ResNet50/analysis_reid.py | 9e4300e6213e6eaf5645e641d6a0213350804a8f | [
"MIT"
] | permissive | QQQYang/ThiNet_Code | cbd67470838b0d1d0a1803ae66a9a74a899adf89 | 850525c8ca85b63e5f7cec1a73b1b681178a5786 | refs/heads/master | 2020-09-18T05:36:23.093997 | 2019-12-05T11:54:39 | 2019-12-05T11:54:39 | 224,130,726 | 0 | 0 | MIT | 2019-11-26T07:37:29 | 2019-11-26T07:37:28 | null | UTF-8 | Python | false | false | 6,649 | py | #coding=utf-8
'''
This file is used for analysing the filters and activations of a network, which inspire us of new ideas about network pruning
Author: yqian@aibee.com
'''
# import ptvsd
# ptvsd.enable_attach(address = ('0.0.0.0', 5678))
# ptvsd.wait_for_attach()
import caffe
import numpy as np
from PIL import Image
import cv2
from net_generator import solver_and_prototxt
import random
import time
import os
import argparse
import json
def cal_corrcoef(act):
act_sum = np.sum(act)
act = np.sort(act)[::-1]
y = [sum(act[:i+1])/act_sum for i in range(len(act))]
x = [float(i+1)/len(act) for i in range(len(act))]
coef = np.corrcoef(np.array([x, y]))
return coef[0, 1]
def resize_image_with_padding(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(0)
target_as = new_dims[1] / float(new_dims[0])
aspect_ratio = im.shape[1] / float(im.shape[0])
if target_as < aspect_ratio:
scale = new_dims[1] / float(im.shape[1])
scaled_width = int(new_dims[1])
scaled_height = min(int(new_dims[0]), int(scale* im.shape[0]))
resized_img = cv2.resize(im, (scaled_width, scaled_height))
start_x = 0
start_y = 0
ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img
else:
scale = new_dims[0] / float(im.shape[0])
scaled_width = min(int(new_dims[1]), int(scale* im.shape[1]))
scaled_height = int(new_dims[0])
resized_img = cv2.resize(im, (scaled_width, scaled_height))
start_x = 0
start_y = int((new_dims[1] - scaled_width) / 2)
ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img
return ret.astype(np.float32)
def collect_activation(selected_layer, selected_block):
model_def = '/ssd/yqian/prune/model/reid/deploy_baseline.prototxt'
model_weights = '/ssd/yqian/prune/model/body_reid_general_npair_caffe_cpu_ctf_20190925_v010002/npair_may_to_aug_ctf_all_stores_finetune_full_year_iter_44000.caffemodel'
# load net
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net(model_def, model_weights, caffe.TEST)
# load the mean ImageNet image (as distributed with Caffe) for subtraction
mean_value = np.array([104, 117, 123], dtype=float)
sample_num = 2000
act_mean = {}
layers = ['2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c']
data_list = np.loadtxt('/ssd/yqian/prune/dataset/data/test_data/eval_CTF_beijing_xhm_20181207_label_finish_revision.txt', dtype=str)
img_index = random.sample(range(len(data_list)), sample_num)
# f = open('/ssd/yqian/prune/dataset/data/train_all_new.txt')
for file_index in img_index:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# time_start = time.time()
# while len(line) < 2:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# try:
# file_path = '/ssd/yqian/prune/dataset/data/' + line.split()[0]
# except IndexError:
# print('error: ', len(line))
# im = cv2.imread(file_path)
# while im is None:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# while len(line) < 2:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# try:
# file_path = '/ssd/yqian/prune/dataset/data/' + line.split()[0]
# except IndexError:
# print('error: ', len(line))
# im = cv2.imread(file_path)
# print(line.split()[0])
file_path = '/ssd/yqian/prune/dataset/data/test_data/all/' + data_list[file_index][0]
im = cv2.imread(file_path)
im = resize_image_with_padding(im, (384, 128))
im -= mean_value
im = np.transpose(im, (2,0,1)) # HWC -> CHW
im = np.reshape(im, (1, 3, 384, 128)) #CHW ->NCHW
# shape for input (data blob is N x C x H x W), set data
# center crop
# im = im[:, 16:240, 16:240]
net.blobs['data'].reshape(*im.shape)
net.blobs['data'].data[...] = im
# run net and take argmax for prediction
net.forward()
for i in range(len(selected_layer)):
for j in range(len(selected_block)):
if selected_block[j] == 1:
output_layer = 'res' + layers[selected_layer[i]] + '_branch2a'
else:
output_layer = 'res' + layers[selected_layer[i]] + '_branch2b'
activation = net.blobs[output_layer].data
if output_layer not in act_mean:
act_mean[output_layer] = [np.mean(activation, axis=(0, 2, 3)).tolist()]
else:
act_mean[output_layer].append(np.mean(activation, axis=(0, 2, 3)).tolist())
for key in act_mean:
layer_act = act_mean[key]
act_mean[key] = np.sum(np.abs(np.array(layer_act)), axis=0).tolist()
act_mean[key] = float(cal_corrcoef(act_mean[key]))
print(act_mean)
with open('act_mean.json','w') as f:
json.dump(act_mean, f)
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--selected_layer", type=int, nargs='+', default = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
parser.add_argument("--selected_block", type=int, nargs='+', default = [1,2], help='range from 1 to 2')
parser.add_argument("--gpu", type=int, default = 4)
opt = parser.parse_args()
return opt
if __name__ == "__main__":
while True:
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_gpu=[int(x.split()[2]) for x in open('tmp','r').readlines()]
memory_max = max(memory_gpu)
if memory_max>5000:
gpu = np.argmax(memory_gpu)
os.environ["CUDA_VISIBLE_DEVICES"] = str(np.argmax(memory_gpu))
os.system('rm tmp')
print('Find vacant GPU: %d' % gpu)
break
opt = get_opt()
collect_activation(opt.selected_layer, opt.selected_block) | [
"yqian@gpu002.aibee.cn"
] | yqian@gpu002.aibee.cn |
d839e4467adb97c603f1bbf720207d83942d87d2 | 46267e38d63bb487ccef4612593676412ea956d7 | /astraeus/core.py | 268d58bf9ad346c038f6b1a1989ccc7a00c0339b | [
"MIT"
] | permissive | eos-sns/astraeus | 17f63fc02e27b8b40b8470fb8202b9bb4b50e3d6 | bbbe820bdc02d7c0209854b80b1f952bfaaf984a | refs/heads/master | 2020-04-25T12:56:35.666259 | 2019-09-18T12:15:04 | 2019-09-18T12:15:04 | 172,793,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,379 | py | # -*- coding: utf-8 -*-
import abc
import datetime
import uuid
from astraeus.models.memcache import MemcacheClientBuilder, MemcacheFacade
from astraeus.models.mongodb import MongoDBBuilder
class Hasher:
""" Something that hashes something """
@abc.abstractmethod
def hash_key(self, key):
return 0
class UUIDHasher(Hasher):
""" Hashing based on UUID4 """
def hash_key(self, key=None):
hashed = str(uuid.uuid4())
hashed = hashed.replace('-', '')
return hashed
class Astraeus(object):
""" Saves in-memory data about stuff """
MEMCACHE_PORT = 11211 # default memcache port
EXPIRE_SECONDS = ((60 * 60) * 24) * 14 # 14 days
def __init__(self,
port=MEMCACHE_PORT,
expire_seconds=EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
"""
:param port: port where memcache runs
:param expire_seconds: values in memcache will be null after that
:param hash_function: function to compute hash of key
"""
client = MemcacheClientBuilder() \
.with_server('localhost') \
.with_port(port) \
.build()
self.memcache = MemcacheFacade(client, expire_seconds)
self.hasher = hash_function # function to hash stuff
def _get_key(self, val):
return self.hasher(str(val)) # todo better jsonify ?
def save(self, val):
"""
:param val: Saves val in memcache database
:return: key of memcache
"""
assert not (val is None)
key = self._get_key(val)
if self.memcache.set(key, val):
return key
return None
def retrieve(self, key):
assert not (key is None)
return self.memcache.get(key)
class MongoAstraeus(Astraeus):
""" Normal Astraeus, but saves data also in MongoDB for reduncancy
reasons """
MONGO_DB = 'astraeus' # todo move to config
def _get_parent(self):
return super(self.__class__, self)
def __init__(self,
mongo_collection,
mongo_db=MONGO_DB,
port=Astraeus.MEMCACHE_PORT,
expire_seconds=Astraeus.EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
super(self.__class__, self).__init__(port, expire_seconds, hash_function)
mongo = MongoDBBuilder() \
.with_db(mongo_db) \
.build()
self.mongo = mongo[mongo_collection] # specify collection
def _try_save_to_memcache(self, val):
try:
return self._get_parent().save(val)
except:
print('Cannot save {} to memcache'.format(val))
return None
def _try_save_to_mongodb(self, memcache_key, val):
if not memcache_key:
memcache_key = self._get_key(val)
try:
item = self.build_mongo_item(memcache_key, val)
self.mongo.insert_one(item)
return memcache_key
except:
print('Cannot save {} to mongodb'.format(val))
return None
def save(self, val):
key = self._try_save_to_memcache(val) # first save to memcache ...
key = self._try_save_to_mongodb(key, val) # ... then in mongo
return key
def _try_retrieve_from_memcache(self, key):
try:
return self._get_parent().retrieve(key)
except:
print('Cannot retrieve {} from memcache'.format(key))
return None
def _try_retrieve_from_mongodb(self, key):
try:
results = self.mongo.find({'key': key})
if results:
most_recent = max(results, key=lambda x: x['time']) # sort by date
return most_recent['val'] # DO NOT check expiration: this is a redundant database
except:
print('Cannot retrieve {} from mongodb'.format(key))
return None
def retrieve(self, key):
val = self._try_retrieve_from_memcache(key) # first try with memcache ...
if not val:
return self._try_retrieve_from_mongodb(key) # ... then with mongo
return val
@staticmethod
def build_mongo_item(key, val):
time_now = datetime.datetime.now()
return {
'key': key,
'val': val,
'time': time_now
}
| [
"sirfoga@protonmail.com"
] | sirfoga@protonmail.com |
dab971b8321388507ca5447b0771da8ff6b6cbe4 | 9ecfdfbe098070079c9d96eb41ddb73f95857f93 | /Problems/Fahrenheit/main.py | a79b42503b60d7e0b1dac928368c284aaae7d333 | [] | no_license | sathishkumar8594ys/Simple_Chatty_Bot | 0e850c616bc6dbd1a970596a3a6105d38960f59a | b07c148fa057bd3171a86e6bb456342fbfd38bfe | refs/heads/master | 2023-03-09T21:13:13.150854 | 2021-02-28T04:07:00 | 2021-02-28T04:07:00 | 343,017,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def fahrenheit_to_celsius(fahrenheit):
cel = ((fahrenheit - 32)*(5/9))
return round(cel, 3)
| [
"sk@kali"
] | sk@kali |
49c8129426eeffff345ae304991e2b9e7e5cf774 | 147b94f148dcaf10dbc3dfbcf571c1fa6d47a115 | /code/enclosure_test.py | 26a9cf737d3f2212aab46b39dd207bbf3572cbeb | [] | no_license | indra-n/env-context-detection | 4ad608af9a7b32920d57339d7b9e450862622b46 | 433d377448c3e807ac0ff833a4f1733e12e6931a | refs/heads/master | 2021-01-22T01:38:33.787931 | 2017-09-02T21:27:39 | 2017-09-02T21:27:39 | 102,221,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,074 | py | import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import random
import csv
random.seed(42)
# Tests if location is surrounded by walls
#Load the data
fp_indoor_cutsark_in1_sp = '..\data_Greenwich\indoor\CuttySark_front\CuttySark_front_P2_(inside)\Obs_features_1'
fp_indoor_cutsark_in2_sp = '..\data_Greenwich\indoor\CuttySark_front\CuttySark_front_P2_(inside)\Obs_features_2'
fp_indoor_market_gr1_sp = '..\data_Greenwich\indoor\GreenwichMarket\\under_glass_roof_P2\Obs_features_1'
fp_indoor_market_gr2_sp = '..\data_Greenwich\indoor\GreenwichMarket\\under_glass_roof_P2\Obs_features_2'
fp_indoor_museum_gr1_sp = '..\data_Greenwich\indoor\MaritimeMuseum\hall_underGlassRoof\Obs_features_1'
fp_indoor_museum_gr2_sp = '..\data_Greenwich\indoor\MaritimeMuseum\hall_underGlassRoof\Obs_features_2'
fp_indoor_museum_lw1_sp = '..\data_Greenwich\indoor\MaritimeMuseum\\under_light_well\Obs_features_1'
fp_indoor_museum_lw2_sp = '..\data_Greenwich\indoor\MaritimeMuseum\\under_light_well\Obs_features_2'
fp_inter_path1_sp = '..\data_Greenwich\intermediate\covered_path_byGym\Obs_features_1'
fp_inter_path2_sp = '..\data_Greenwich\intermediate\covered_path_byGym\Obs_features_2'
fp_inter_dept3_sp = '..\data_Greenwich\intermediate\Deptford_TrainStation\P3\Obs_features'
fp_inter_GreenTS_p1_1_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P1\Obs_features_1'
fp_inter_GreenTS_p1_2_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P1\Obs_features_2'
fp_inter_GreenTS_p2_1_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P2\Obs_features_1'
fp_inter_GreenTS_p2_2_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P2\Obs_features_2'
fp_inter_market_aw1_sp = '..\data_Greenwich\intermediate\GreenwichMarket\entrance_archway_P1\Obs_features_1'
fp_inter_market_aw2_sp = '..\data_Greenwich\intermediate\GreenwichMarket\entrance_archway_P1\Obs_features_2'
fp_inter_park_dark1_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_dark\Obs_features_1'
fp_inter_park_dark2_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_dark\Obs_features_2'
fp_inter_park_light1_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_lighter\Obs_features_1'
fp_inter_park_light2_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_lighter\Obs_features_2'
fp_inter_queens_arch_sp = '..\data_Greenwich\intermediate\QueensHouse\\archway\Obs_features'
fp_inter_queens_col1_sp = '..\data_Greenwich\intermediate\QueensHouse\colonnade\Obs_features_1'
fp_inter_queens_col2_sp = '..\data_Greenwich\intermediate\QueensHouse\colonnade\Obs_features_2'
fp_open_park1_sp = '..\data_Greenwich\open_sky\GreenwichPark\open\Obs_features_1'
fp_open_park2_sp = '..\data_Greenwich\open_sky\GreenwichPark\open\Obs_features_2'
fp_urban_sl1_sp = '..\data_Greenwich\\urban\\behind_SailLoftPub\Obs_features_1'
fp_urban_sl2_sp = '..\data_Greenwich\\urban\\behind_SailLoftPub\Obs_features_2'
fp_urban_cutsark_out1_sp = '..\data_Greenwich\\urban\CuttySark_front\CuttySark_front_P1_(outside)\Obs_features_1'
fp_urban_cutsark_out2_sp = '..\data_Greenwich\\urban\CuttySark_front\CuttySark_front_P1_(outside)\Obs_features_2'
fp_urban_dept1_sp = '..\data_Greenwich\\urban\Deptford_TrainStation\P1\Obs_features'
fp_urban_dept2_sp = '..\data_Greenwich\\urban\Deptford_TrainStation\P2\Obs_features'
fp_urban_GreenTS_p3_1_sp = '..\data_Greenwich\\urban\Greenwich_TrainStation\P3\Obs_features_1'
fp_urban_GreenTS_p3_2_sp = '..\data_Greenwich\\urban\Greenwich_TrainStation\P3\Obs_features_2'
fp_urban_queens_court_sp = '..\data_Greenwich\\urban\QueensHouse\courtyard\Obs_features'
# Load in dataframe
#######
# Enclosure labels
# 0 - no enclosure
# 1 - light enclosure (glass walls, open side etc.)
# 2 - enclosing walls
df_indoor_cutsark_in1 = pd.read_csv(fp_indoor_cutsark_in1_sp)
df_indoor_cutsark_in2 = pd.read_csv(fp_indoor_cutsark_in2_sp)
df_indoor_market_gr1 = pd.read_csv(fp_indoor_market_gr1_sp)
df_indoor_market_gr2 = pd.read_csv(fp_indoor_market_gr2_sp)
df_indoor_museum_gr1 = pd.read_csv(fp_indoor_museum_gr1_sp)
df_indoor_museum_gr2 = pd.read_csv(fp_indoor_museum_gr2_sp)
df_indoor_museum_lw1 = pd.read_csv(fp_indoor_museum_lw1_sp)
df_indoor_museum_lw2 = pd.read_csv(fp_indoor_museum_lw2_sp)
df_indoor_cutsark_in1['true_class'] = 1
df_indoor_cutsark_in2['true_class'] = 1
df_indoor_market_gr1['true_class'] = 2
df_indoor_market_gr2['true_class'] = 2
df_indoor_museum_gr1['true_class'] = 2
df_indoor_museum_gr2['true_class'] = 2
df_indoor_museum_lw1['true_class'] = 1
df_indoor_museum_lw2['true_class'] = 1
df_inter_path1 = pd.read_csv(fp_inter_path1_sp)
df_inter_path2 = pd.read_csv(fp_inter_path2_sp)
df_inter_dept3 = pd.read_csv(fp_inter_dept3_sp)
df_inter_GreenTS_p1_1 = pd.read_csv(fp_inter_GreenTS_p1_1_sp)
df_inter_GreenTS_p1_2 = pd.read_csv(fp_inter_GreenTS_p1_2_sp)
df_inter_GreenTS_p2_1 = pd.read_csv(fp_inter_GreenTS_p2_1_sp)
df_inter_GreenTS_p2_2 = pd.read_csv(fp_inter_GreenTS_p2_2_sp)
df_inter_market_aw1 = pd.read_csv(fp_inter_market_aw1_sp)
df_inter_market_aw2 = pd.read_csv(fp_inter_market_aw2_sp)
df_inter_park_dark1 = pd.read_csv(fp_inter_park_dark1_sp)
df_inter_park_dark2 = pd.read_csv(fp_inter_park_dark2_sp)
df_inter_park_light1 = pd.read_csv(fp_inter_park_light1_sp)
df_inter_park_light2 = pd.read_csv(fp_inter_park_light2_sp)
df_inter_queens_arch = pd.read_csv(fp_inter_queens_arch_sp)
df_inter_queens_col1 = pd.read_csv(fp_inter_queens_col1_sp)
df_inter_queens_col2 = pd.read_csv(fp_inter_queens_col2_sp).iloc[:67]
df_inter_path1['true_class'] = 1
df_inter_path2['true_class'] = 1
df_inter_dept3['true_class'] = 1
df_inter_GreenTS_p1_1['true_class'] = 1
df_inter_GreenTS_p1_2['true_class'] = 1
df_inter_GreenTS_p2_1['true_class'] = 1
df_inter_GreenTS_p2_2['true_class'] = 1
df_inter_market_aw1['true_class'] = 1
df_inter_market_aw2['true_class'] = 1
df_inter_park_dark1['true_class'] = 1
df_inter_park_dark2['true_class'] = 1
df_inter_park_light1['true_class'] = 0
df_inter_park_light2['true_class'] = 0
df_inter_queens_arch['true_class'] = 2
df_inter_queens_col1['true_class'] = 1
df_inter_queens_col2['true_class'] = 1
df_open_park1 = pd.read_csv(fp_open_park1_sp)
df_open_park2 = pd.read_csv(fp_open_park2_sp)
df_open_park1['true_class'] = 0
df_open_park2['true_class'] = 0
df_urban_sl1 = pd.read_csv(fp_urban_sl1_sp)
df_urban_sl2 = pd.read_csv(fp_urban_sl2_sp)
df_urban_cutsark_out1 = pd.read_csv(fp_urban_cutsark_out1_sp).iloc[0:38]
df_urban_cutsark_out2 = pd.read_csv(fp_urban_cutsark_out2_sp)
df_urban_dept1 = pd.read_csv(fp_urban_dept1_sp)
df_urban_dept2 = pd.read_csv(fp_urban_dept2_sp)
df_urban_GreenTS_p3_1 = pd.read_csv(fp_urban_GreenTS_p3_1_sp)
df_urban_GreenTS_p3_2 = pd.read_csv(fp_urban_GreenTS_p3_2_sp)
df_urban_queens_court = pd.read_csv(fp_urban_queens_court_sp)
df_urban_sl1['true_class'] = 1
df_urban_sl2['true_class'] = 1
df_urban_cutsark_out1['true_class'] = 0
df_urban_cutsark_out2['true_class'] = 0
df_urban_dept1['true_class'] = 0
df_urban_dept2['true_class'] = 0
df_urban_GreenTS_p3_1['true_class'] = 0
df_urban_GreenTS_p3_2['true_class'] = 0
df_urban_queens_court['true_class'] = 2
#cols = ['obs_id', 'e_id', 'sv_prn', 'constell_id', 'azimuth', 'elevation', 'CN0']
# cols = ['sv_prn', 'constell_id', 'azimuth', 'elevation', 'CN0']
# cols=['num_sat', 'sum_snr', 'num_sat_25', 'sum_snr_25', 'elev_0_30', 'elev_30_60', 'elev_60_90',
# 'elev_0_30_25', 'elev_30_60_25', 'elev_60_90_25']
# cols=['num_sat', 'sum_snr', 'num_sat_25', 'sum_snr_25',
# 'elev_0_30_25', 'elev_30_60_25', 'elev_60_90_25']
# cols=['num_sat_25', 'sum_snr_25',
# 'elev_0_30_25', 'elev_30_60_25', 'elev_60_90_25']
cols=['num_sat', 'sum_snr', 'num_sat_25', 'sum_snr_25', 'elev_0_30',
'elev_0_30_25']
#######
# Location values
df_indoor_cutsark_in1['location'] = 321
df_indoor_cutsark_in2['location'] = 322
df_indoor_market_gr1['location'] = 421
df_indoor_market_gr2['location'] = 422
df_indoor_museum_gr1['location'] = 511
df_indoor_museum_gr2['location'] = 512
df_indoor_museum_lw1['location'] = 521
df_indoor_museum_lw2['location'] = 522
df_inter_path1['location'] = 211
df_inter_path2['location'] = 212
df_inter_dept3['location'] = 931
df_inter_GreenTS_p1_1['location'] = 811
df_inter_GreenTS_p1_2['location'] = 812
df_inter_GreenTS_p2_1['location'] = 821
df_inter_GreenTS_p2_2['location'] = 822
df_inter_market_aw1['location'] = 411
df_inter_market_aw2['location'] = 412
df_inter_park_dark1['location'] = 721
df_inter_park_dark2['location'] = 722
df_inter_park_light1['location'] = 731
df_inter_park_light2['location'] = 732
df_inter_queens_arch['location'] = 631
df_inter_queens_col1['location'] = 611
df_inter_queens_col2['location'] = 612
df_open_park1['location'] = 711
df_open_park2['location'] = 712
df_urban_sl1['location'] = 111
df_urban_sl2['location'] = 112
df_urban_cutsark_out1['location'] = 311
df_urban_cutsark_out2['location'] = 312
df_urban_dept1['location'] = 911
df_urban_dept2['location'] = 921
df_urban_GreenTS_p3_1['location'] = 831
df_urban_GreenTS_p3_2['location'] = 832
df_urban_queens_court['location'] = 621
#######
# Alternative assignments
# 1- indoor
# 2- inbetween
# 3- urban
# 4- open sky
# 5- i don't know
# df_indoor_cutsark_in1['true_class'] = 1
# df_indoor_cutsark_in2['true_class'] = 1
# df_indoor_market_gr1['true_class'] = 1
# df_indoor_market_gr2['true_class'] = 1
# df_indoor_museum_gr1['true_class'] = 1
# df_indoor_museum_gr2['true_class'] = 1
# df_indoor_museum_lw1['true_class'] = 2
# df_indoor_museum_lw2['true_class'] = 2
# # df_indoor_cutsark_in1['true_class'] = 5
# # df_indoor_cutsark_in2['true_class'] = 5
# # df_indoor_market_gr1['true_class'] = 5
# # df_indoor_market_gr2['true_class'] = 5
# # df_indoor_museum_gr1['true_class'] = 5
# # df_indoor_museum_gr2['true_class'] = 5
# # df_indoor_museum_lw1['true_class'] = 5
# # df_indoor_museum_lw2['true_class'] = 5
#
# df_inter_path1['true_class'] = 3
# df_inter_path2['true_class'] = 3
# df_inter_dept3['true_class'] = 3
# df_inter_GreenTS_p1_1['true_class'] = 3
# df_inter_GreenTS_p1_2['true_class'] = 3
# df_inter_GreenTS_p2_1['true_class'] = 3
# df_inter_GreenTS_p2_2['true_class'] = 3
# df_inter_market_aw1['true_class'] = 2
# df_inter_market_aw2['true_class'] = 2
# # df_inter_market_aw1['true_class'] = 5
# # df_inter_market_aw2['true_class'] = 5
#
# df_inter_park_dark1['true_class'] = 2
# df_inter_park_dark2['true_class'] = 2
# # df_inter_park_dark1['true_class'] = 5
# # df_inter_park_dark2['true_class'] = 5
#
# df_inter_park_light1['true_class'] = 3
# df_inter_park_light2['true_class'] = 3
#
# df_inter_queens_arch['true_class'] = 2
# #df_inter_queens_arch['true_class'] = 5
#
# df_inter_queens_col1['true_class'] = 3
# df_inter_queens_col2['true_class'] = 3
#
# df_urban_sl1['true_class'] = 3
# df_urban_sl2['true_class'] = 3
# df_urban_cutsark_out1['true_class'] = 3
# df_urban_cutsark_out2['true_class'] = 3
# df_urban_dept1['true_class'] = 4
# df_urban_dept2['true_class'] = 3
# df_urban_GreenTS_p3_1['true_class'] = 3
# df_urban_GreenTS_p3_2['true_class'] = 3
# df_urban_queens_court['true_class'] = 2
#
# df_open_park1['true_class'] = 4
# df_open_park2['true_class'] = 4
# Split training and test data
df_indoor_cutsark_in = pd.concat([df_indoor_cutsark_in1, df_indoor_cutsark_in2])
train_indoor_1 = df_indoor_cutsark_in.sample(60)
test_indoor_1 = df_indoor_cutsark_in.drop(train_indoor_1.index).sample(60)
df_indoor_market_gr = pd.concat([df_indoor_market_gr1, df_indoor_market_gr2])
train_indoor_2 = df_indoor_market_gr.sample(40)
test_indoor_2 = df_indoor_market_gr.drop(train_indoor_2.index).sample(60)
df_indoor_museum_gr = pd.concat([df_indoor_museum_gr1, df_indoor_museum_gr2])
train_indoor_3 = df_indoor_museum_gr.sample(60)
test_indoor_3 = df_indoor_museum_gr2.drop(train_indoor_3.index).sample(60)
train_indoor_4 = df_indoor_museum_lw1.sample(30)
test_indoor_4 = df_indoor_museum_lw2.sample(15)
df_inter_path = pd.concat([df_inter_path1, df_inter_path2])
train_inter_1 = df_inter_path.sample(40)
test_inter_1 = df_inter_path.drop(train_inter_1.index).sample(60)
test_inter_2 = df_inter_dept3.sample(60)
df_inter_GreenTS_p1 = pd.concat([df_inter_GreenTS_p1_1, df_inter_GreenTS_p1_2])
train_inter_2 = df_inter_GreenTS_p1.sample(60)
test_inter_3 = df_inter_GreenTS_p1.drop(train_inter_2.index).sample(60)
train_inter_3 = df_inter_GreenTS_p2_1.sample(60)
test_inter_4 = df_inter_GreenTS_p2_2.sample(60)
train_inter_4 = df_inter_market_aw1.sample(40)
test_inter_5 = df_inter_market_aw2.sample(60)
train_inter_5 = df_inter_park_dark1.sample(40)
test_inter_6 = df_inter_park_dark2.sample(60)
train_inter_6 = df_inter_park_light1.sample(60)
test_inter_9 = df_inter_park_light2.sample(60)
test_inter_7 = df_inter_queens_arch.sample(60)
train_inter_7 = df_inter_queens_col1.sample(60)
test_inter_8 = df_inter_queens_col2.sample(60)
df_urban_sl = pd.concat([df_urban_sl1, df_urban_sl2])
train_urban_1 = df_urban_sl.sample(60)
test_urban_1 = df_urban_sl.drop(train_urban_1.index).sample(60)
df_urban_cutsark_out = pd.concat([df_urban_cutsark_out1, df_urban_cutsark_out2])
train_urban_2 = df_urban_cutsark_out.sample(50)
test_urban_2 = df_urban_cutsark_out2.drop(train_urban_2.index).sample(50)
train_urban_3 = df_urban_dept1.sample(60)
test_urban_3 = df_urban_dept2.sample(60)
train_urban_4 = df_urban_GreenTS_p3_1.sample(40)
test_urban_4 = df_urban_GreenTS_p3_2.sample(60)
train_urban_5 = df_urban_queens_court.sample(60)
train_open = df_open_park1.sample(60)
test_open = df_open_park2.sample(60)
#########
# train_indoor_bm = df_indoor_bm.sample(100)
# train_indoor_ch2221 = df_indoor_ch2221.sample(100)
# train_indoor_ch103a = df_indoor_ch103a.sample(100)
# train_indoor_jah = df_indoor_jah.sample(100)
#
# test_indoor_bm = df_indoor_bm.drop(train_indoor_bm.index).sample(100)
# test_indoor_ch2221 = df_indoor_ch2221.drop(train_indoor_ch2221.index).sample(100)
# test_indoor_ch103a = df_indoor_ch103a.drop(train_indoor_ch103a.index).sample(100)
# test_indoor_ch103b = df_indoor_ch103b.sample(100)
# test_indoor_jah = df_indoor_jah.drop(train_indoor_jah.index).sample(100)
#
# train_inter = df_inter.sample(100)
# test_inter = df_inter.drop(train_inter.index).sample(100)
#
# train_urban_p1b = df_urban_p1b.sample(100)
# train_urban_p2b = df_urban_p2b.sample(100)
# train_urban_p4b = df_urban_p4b.sample(100)
#
# test_urban_p1b = df_urban_p1b.drop(train_urban_p1b.index).sample(100)
# test_urban_p2b = df_urban_p2b.drop(train_urban_p2b.index).sample(100)
# test_urban_p3b = df_urban_p3b.sample(100)
# test_urban_p4b = df_urban_p4b.drop(train_urban_p4b.index).sample(100)
#
# train_open_reg = df_open_reg.sample(100)
# test_open_hyde = df_open_hyde.sample(100)
# train_df = [train_indoor_bm, train_indoor_ch2221, train_indoor_ch103a, train_indoor_jah, train_inter, train_urban_p1b,
# train_urban_p2b, train_urban_p4b, train_open_reg]
train_df = [train_indoor_2, train_indoor_3, train_indoor_4, train_inter_1, train_inter_2, train_inter_3,
train_inter_4, train_inter_5, train_inter_6, train_inter_7, train_urban_1, train_urban_2, train_urban_3,
train_urban_4, train_urban_5, train_open, test_urban_3]
# train_df = [train_indoor_1, train_indoor_2, train_indoor_3, train_indoor_4, train_inter_1, train_inter_2, train_inter_3,
# train_inter_4, train_inter_5, train_inter_6, train_inter_7, train_urban_1, train_urban_2, train_urban_3,
# train_urban_4, train_urban_5, train_open]
train_data = pd.concat(train_df).sample(frac=1).reset_index(drop=True)
# test_df = [test_indoor_bm, test_indoor_ch2221, test_indoor_ch103a, test_indoor_ch103b, test_indoor_jah, test_inter,
# test_urban_p1b, test_urban_p2b, test_urban_p3b, test_urban_p4b, test_open_hyde]
test_df = [test_indoor_2, test_indoor_3, test_indoor_4, test_inter_1, test_inter_2, test_inter_3,
test_inter_4, test_inter_5, test_inter_6, test_inter_7, test_inter_8, test_inter_9, test_urban_1,
test_urban_2, test_urban_3, test_urban_4, test_open]
# test_df = [test_indoor_1, test_indoor_2, test_indoor_3, test_indoor_4, test_inter_1, test_inter_2, test_inter_3,
# test_inter_4, test_inter_5, test_inter_6, test_inter_7, test_inter_8, test_inter_9, test_urban_1,
# test_urban_2, test_urban_3, test_urban_4, test_open]
test_data = pd.concat(test_df).sample(frac=1).reset_index(drop=True)
forest = ensemble.RandomForestClassifier(n_estimators=100)
forest.fit(train_data[cols], train_data['true_class'])
pred = forest.predict(test_data[cols])
pred_probas = forest.predict_proba(test_data[cols])
pred_probas_dept = forest.predict_proba(test_inter_7[cols])
pred_dept = forest.predict(test_inter_7[cols])
differ_dept = abs(pred_dept - test_inter_7['true_class'])
accu_dept = 1 - np.count_nonzero(differ_dept) / test_inter_7.shape[0]
differ = abs(pred - test_data['true_class'])
accu = 1 - np.count_nonzero(differ) / test_data.shape[0]
print(accu)
print(accu_dept)
wrong_pred = test_data[differ != 0]
print(wrong_pred.shape)
print(wrong_pred['location'].value_counts())
cm = confusion_matrix(test_data['true_class'], pred)
print(cm)
cm_proc = cm / np.sum(cm, axis=1).reshape((3, 1))
print(cm_proc)
# print(pred_probas_dept)
# for i in range(1000):
# if differ[i] != 0:
# print(test_data['true_class'][i])
# print(pred_probas[i])
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(test_data[cols].shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(test_data[cols].shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(test_data[cols].shape[1]), indices)
plt.xlim([-1, test_data[cols].shape[1]])
plt.show() | [
"indra.niedre@gmail.com"
] | indra.niedre@gmail.com |
00ff92f5206a95948024ec7a6e5ea5fa74bdedc7 | d5f31dbe958d5e8ddcf0dd042050925b5206c7c7 | /.vscode/寻找空间中的向量投影.py | 23e5f2dd1e0d29100620dc3deb4baaeee142ffec | [] | no_license | dertaek/a-b-test-projects | b88910ffca421b8b5d3c47d84142ebe6fa6f0239 | 5637b833064b8992d9eb7ba115477b7a9723d492 | refs/heads/master | 2023-03-17T00:45:30.337383 | 2020-10-26T05:26:11 | 2020-10-26T05:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | # 利用矩阵来描述投影。
| [
"w664578037@gmial.com"
] | w664578037@gmial.com |
e5b94ab47de93b8731f125af0e33149260abb4fe | 6a515e3eaec9ddc9a0f5d61a8295ef26b250a520 | /run_tests.py | 88d89d01c2be113fec3bb199ac59b13e114a55df | [] | no_license | EventsExpertsMIEM/backend_19288 | 88146d0e1e0140318040e2f9fc68da14e180a76b | f4a98b325366ef7bbdbe9243f9b742a6998d6da3 | refs/heads/dev | 2020-09-14T14:17:51.939454 | 2020-04-23T14:45:49 | 2020-04-23T14:45:49 | 223,153,250 | 1 | 1 | null | 2020-04-10T10:34:15 | 2019-11-21T10:56:04 | Python | UTF-8 | Python | false | false | 172 | py | from app import db
import bcrypt
import testsuite
pw = bcrypt.hashpw(str('1234').encode('utf-8'), bcrypt.gensalt())
db.create_tables(pw.decode('utf-8'))
testsuite.run()
| [
"mvalkhimovich@miem.hse.ru"
] | mvalkhimovich@miem.hse.ru |
315d23bf96cfe201a6c72c58d0333896da2bea03 | 649eabe3d4bef9c866c2884474f58c997a64f8d5 | /papers.py | 4dec4263add2da77a2e1abc163a3cbb8e88b5b83 | [] | no_license | ValeriaFakhrutd/Memory_model | 091c78a537b5ea4f7391c0d686c976711f43d339 | 62d787eb6e5d02899c727d28d401fb0d169ebede | refs/heads/master | 2022-12-07T12:31:54.710748 | 2020-09-03T21:25:13 | 2020-09-03T21:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,178 | py | """
=== Module Description ===
This module contains a new class, PaperTree, which is used to model data on
publications in a particular area of Computer Science Education research.
This data is adapted from a dataset presented at SIGCSE 2019.
You can find the full dataset here: https://www.brettbecker.com/sigcse2019/
Although this data is very different from filesystem data, it is still
hierarchical. This means we are able to model it using a TMTree subclass,
and we can then run it through our treemap visualisation tool to get a nice
interactive graphical representation of this data.
"""
import csv
from typing import List, Dict
from tm_trees import TMTree
# Filename for the dataset
DATA_FILE = 'cs1_papers.csv'
class PaperTree(TMTree):
"""A tree representation of Computer Science Education research paper data.
=== Private Attributes ===
_authors:
authors of the paper, does not keep any information for categories.
_doi:
link of the paper, '' for the categories
_by_year:
stores in formation if sorting is doing by year or not.
These should store information about this paper's <authors> and <doi>.
=== Inherited Attributes ===
rect:
The pygame rectangle representing this node in the treemap
visualization.
data_size:
The size of the data represented by this tree.
_colour:
The RGB colour value of the root of this tree.
_name:
The root value of this tree, or None if this tree is empty.
_subtrees:
The subtrees of this tree.
_parent_tree:
The parent tree of this tree; i.e., the tree that contains this tree
as a subtree, or None if this tree is not part of a larger tree.
_expanded:
Whether or not this tree is considered expanded for visualization.
=== Representation Invariants ===
- All TMTree RIs are inherited.
"""
_authors: str
_doi: str
_by_year: bool
def __init__(self, name: str, subtrees: List[TMTree], authors: str = '',
doi: str = '', citations: int = 0, by_year: bool = True,
all_papers: bool = False) -> None:
"""Initialize a new PaperTree with the given <name> and <subtrees>,
<authors> and <doi>, and with <citations> as the size of the data.
If <all_papers> is True, then this tree is to be the root of the paper
tree. In that case, load data about papers from DATA_FILE to build the
tree.
If <all_papers> is False, Do NOT load new data.
<by_year> indicates whether or not the first level of subtrees should be
the years, followed by each category, subcategory, and so on. If
<by_year> is False, then the year in the dataset is simply ignored.
"""
if subtrees == []:
TMTree.__init__(self, name, subtrees, citations) # i.e our file is a
self._doi = doi
self._authors = authors
# self._citation = citations ### Data_size
self._by_year = by_year
if not all_papers and subtrees != []:
TMTree.__init__(self, name, subtrees, citations)
self._doi = doi
self._authors = authors
self._by_year = by_year
if all_papers:
x = _get_paper_list(by_year)
subtrees = _build_tree_from_dict(x)
TMTree.__init__(self, name, subtrees, citations)
self._doi = doi
self._authors = authors
self._by_year = by_year
def get_separator(self) -> str:
"""Return the file separator for this OS.
"""
return ": "
def get_suffix(self) -> str:
"""Return the final descriptor of this tree.
"""
if len(self._subtrees) == 0:
return '(paper)'
else:
return '(category)'
def _build_tree_from_dict(nested_dict: Dict) -> List[PaperTree]:
"""Return a list of trees from the nested dictionary <nested_dict>.
"""
lst = []
for items in nested_dict:
if nested_dict[items] == {}:
temp_tree = PaperTree(items[1], [],
items[0], items[2],
items[3],
False, False)
# put here data for authors, size ect
lst.append(temp_tree)
else:
temp_tree = PaperTree(items,
_build_tree_from_dict(nested_dict[items]),
by_year=False, all_papers=False)
temp_tree.update_data_sizes()
lst.append(temp_tree)
return lst
def _get_paper_list(by_year: bool) -> dict:
"""
'hey'
"""
dic = {}
with open(DATA_FILE, newline='') as csv_file:
csv_file.readline()
reader = csv.reader(csv_file)
for line in reader:
author, title, year, categories, url, size = line
size = int(size)
# year = int(year)
categories = categories.split(":")
for i in range(len(categories)):
categories[i] = categories[i].strip()
tup1 = (author, title, url, size)
categories.append(tup1)
if by_year:
categories.insert(0, year)
new = categories
dic = _convert_dict(new, dic)
# print(dic)
csv_file.close()
return dic
def _convert_dict(lst: List, dics: Dict) -> Dict:
if len(lst) == 0:
pass
elif len(lst) == 1:
if lst[0] in dics:
pass
else:
d = {lst[0]: {}}
dics.update(d)
else:
if lst[0] in dics:
dics[lst[0]] = _convert_dict(lst[1:], dics[lst[0]])
else:
dics[lst[0]] = _convert_dict(lst[1:], {})
return dics
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={
'allowed-import-modules': ['python_ta', 'typing', 'csv', 'tm_trees'],
'allowed-io': ['_load_papers_to_dict', '_get_paper_list'],
'max-args': 8
})
# x = _get_paper_list()
# y = _build_tree_from_dict(x)
# print(y)
| [
"valeriia.fakhrutdinova@mail.utoronto.ca"
] | valeriia.fakhrutdinova@mail.utoronto.ca |
580cbb2d0c363236cfddb8740feec72eacf3119a | e9db45dc23454e256decaabc697016b18cc79cd1 | /game.py | d06e0fa7b641346b8909d0745de62f4ebd0f241d | [] | no_license | siyan38000/WikiGame | 4d7eebe41546ac7d8e038933b9065c96e0950979 | 5bd3949cddca71c5add7923384caa010754a4490 | refs/heads/main | 2023-03-07T08:53:24.773947 | 2021-02-15T11:03:43 | 2021-02-15T11:03:43 | 326,699,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,233 | py | from bs4 import BeautifulSoup
import requests
import urllib.request
import random
import tkinter as tk
window = tk.Tk()
window.title('Wikigame')
var = tk.StringVar()
global start_links
global startURL
#Definition des deux pages aléatoires
def getRandomPage():
return requests.get('https://fr.wikipedia.org/wiki/Sp%C3%A9cial:Page_au_hasard').content
#Fonction que filtre les liens afin de ne garder que les liens vers d'autree articles
def linksFilter(url):
linksList = []
with urllib.request.urlopen(url) as page:
actualPage = BeautifulSoup(page.read(), 'html.parser')
for anchor in actualPage.find_all('div', {"class":"mw-parser-output"}):
for links in anchor.find_all('a'):
link = formatage(str(links.get('href')))
#On s'assure que le lien pointe bien vers un article et qu'il n'existe pas déja dans la liste
if not ('/w/') in link:
if not ('#') in link:
if not ('Fichier:') in link:
if not ('http:') in link:
if not ('https:') in link:
if not ('Modèle:') in link:
if not ('/API') in link:
if not ('Spécial:') in link:
if not ('Catégorie:') in link:
if not (':') in link:
if not ('None') in link:
if link not in linksList:
linksList.append(link)
return linksList
def formatage(arg):
return arg.replace("%20"," ").replace("%27","'").replace("%C3%A8","è").replace("%C3%A9","é").replace('%C3%AA','ê').replace("%C3%A2","â").replace("%C5%93","œ").replace("%C3%B",'ü').replace("%C3%AC","ì").replace('%C3%A7','ç').replace('%C3%A0','à').replace('%C3%B4','ô').replace('%C3%89','É').replace("%C3%AF","ï")
#Fonction qui s'execute au clic sur un bouton radio et recupere sa valeur
def askForChoice():
choice = var.get()
updateWindow(choice)
depart = BeautifulSoup(getRandomPage(), 'html.parser')
arrive = BeautifulSoup(getRandomPage(), 'html.parser')
url1 = depart.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
url2 = arrive.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
def wikigame(start, end):
startURL = start.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
global endURL
endURL = end.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
updateWindow(startURL)
#Met a jour l'affichage a chaque changement de page
#le paramètre cpt compte le nombre de fois que la fonction est appelée
def updateWindow(url, cpt=[0]):
#Suppression de tout les objets de la fenetre
for widget in window.winfo_children():
widget.destroy()
if url == endURL:
tk.Label(window, text="BRAVO !").pack()
tk.Label(window, text="Page trouvée en {} coups".format(cpt)).pack()
else:
tk.Label(window, text="Page actuelle : {}(URL = https://fr.wikipedia.org{})".format(url.replace("/wiki/",""), url)).pack()
tk.Label(window, text="Page d'arrivée :{} (URL : https://fr.wikipedia.org{})".format(arrive.find(id='firstHeading').text,url2)).pack()
#Ajout de la scrollbar pour la liste des liens
canvas = tk.Canvas(window)
scroll = tk.Scrollbar(window, orient='vertical', command=canvas.yview)
start_links = linksFilter('https://fr.wikipedia.org'+url)
i = 0
for link in start_links:
rb = tk.Radiobutton(canvas, text="{} - {}".format(i, link), variable=var, value = link, command=askForChoice)
canvas.create_window(0, i*50, anchor='nw', window=rb, height=50)
i = i + 1
canvas.configure(scrollregion=canvas.bbox('all'), yscrollcommand=scroll.set)
canvas.pack(fill='both', expand=True, side='left')
scroll.pack(fill='y', side='right')
cpt[0] += 1
wikigame(depart, arrive)
tk.mainloop()
| [
"yanis.petit@epsi.fr"
] | yanis.petit@epsi.fr |
65ece97ccb16002fa54a5cd663cf837dc9ccdf3f | 96bf70c65bbca98f85112e09d51ca749eeeaeb6b | /selftf/tf_job/inception/slim/ops_test.py | fa6b41c009c903fcb6a101d5c69b220604dc8b40 | [] | no_license | MLSysTune/MLTuner | 13f3ad91ce243224bf54e4b1af0a39046c4c45cb | 82fbeadb64a476a6d37afc7f34bd29ca2627740e | refs/heads/master | 2023-08-01T08:17:08.112017 | 2021-07-30T12:19:19 | 2021-07-30T12:19:19 | 407,482,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,284 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from selftf.tf_job.inception.slim import ops
from selftf.tf_job.inception.slim import scopes
from selftf.tf_job.inception.slim import variables
class ConvTest(tf.test.TestCase):
def testCreateConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateSquareConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, 3)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, images.get_shape()[1:3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
output = ops.conv2d(images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
def testCreateVerticalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 1])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
def testCreateHorizontalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [1, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
def testCreateConvWithStride(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], stride=2)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height/2, width/2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], activation=None)
self.assertEquals(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateConvWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'Conv/weights/Regularizer/L2Regularizer/value')
sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(wd) <= 0.01)
def testCreateConvWithoutWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(len(variables.get_variables()), 2)
ops.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
def testNonReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 2)
ops.conv2d(images, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1',
reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
net = ops.conv2d(images, 32, [3, 3])
net = ops.conv2d(net, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 8)
self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
net = ops.conv2d(images, 32, [3, 3], scope='Conv')
net = ops.conv2d(net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEquals(len(variables.get_variables()), 4)
self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 0)
class FCTest(tf.test.TestCase):
def testCreateFC(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32)
self.assertEquals(output.op.name, 'FC/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32, scope='fc1')
self.assertEquals(output.op.name, 'fc1/Relu')
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
ops.fc(inputs, 32, scope='fc1')
self.assertTrue(variables.get_variables('fc1/weights'))
self.assertTrue(variables.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
ops.fc(inputs, 32, scope='fc1')
self.assertEquals(len(variables.get_variables('fc1')), 2)
ops.fc(inputs, 32, scope='fc1', reuse=True)
self.assertEquals(len(variables.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
ops.fc(inputs, 32)
self.assertEquals(len(variables.get_variables('FC')), 2)
ops.fc(inputs, 32)
self.assertEquals(len(variables.get_variables('FC')), 4)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32, activation=None)
self.assertEquals(output.op.name, 'FC/xw_plus_b')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0.01)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'FC/weights/Regularizer/L2Regularizer/value')
sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(wd) <= 0.01)
def testCreateFCWithoutWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0.01, scope='fc')
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
ops.fc(inputs, 32, weight_decay=0.01, scope='fc', reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with scopes.arg_scope([ops.fc], batch_norm_params={}):
net = ops.fc(images, 27)
net = ops.fc(net, 27)
self.assertEquals(len(variables.get_variables()), 8)
self.assertEquals(len(variables.get_variables('FC/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('FC_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with scopes.arg_scope([ops.fc], batch_norm_params={'decay': 0.9}):
net = ops.fc(images, 27, scope='fc1')
net = ops.fc(net, 27, scope='fc1', reuse=True)
self.assertEquals(len(variables.get_variables()), 4)
self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3)
class MaxPoolTest(tf.test.TestCase):
def testCreateMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3])
self.assertEquals(output.op.name, 'MaxPool/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateSquareMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, 3)
self.assertEquals(output.op.name, 'MaxPool/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateMaxPoolStrideSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class AvgPoolTest(tf.test.TestCase):
def testCreateAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3])
self.assertEquals(output.op.name, 'AvgPool/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateSquareAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, 3)
self.assertEquals(output.op.name, 'AvgPool/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateAvgPoolStrideSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class OneHotEncodingTest(tf.test.TestCase):
def testOneHotEncodingCreate(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
output = ops.one_hot_encoding(labels, num_classes=3)
self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testOneHotEncoding(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
one_hot_labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
output = ops.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class DropoutTest(tf.test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.dropout(images)
self.assertEquals(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = ops.dropout(images, is_training=False)
self.assertEquals(output, images)
class FlattenTest(tf.test.TestCase):
def testFlatten4D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = ops.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width), seed=1, name='images')
output = ops.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlattenBatchSize(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
inputs = tf.placeholder(tf.int32, (None, height, width, 3))
output = ops.flatten(inputs)
self.assertEquals(output.get_shape().as_list(),
[None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEquals(output.size,
images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
class BatchNormTest(tf.test.TestCase):
def testCreateOp(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.batch_norm(images)
self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images)
beta = variables.get_variables_by_name('beta')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(gamma, [])
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithoutCenterWithScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, center=False, scale=True)
beta = variables.get_variables_by_name('beta')
self.assertEquals(beta, [])
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithoutCenterWithoutScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, center=False, scale=False)
beta = variables.get_variables_by_name('beta')
self.assertEquals(beta, [])
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(gamma, [])
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True)
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
update_moving_mean = update_ops[0]
update_moving_variance = update_ops[1]
self.assertEquals(update_moving_mean.op.name,
'BatchNorm/AssignMovingAvg')
self.assertEquals(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True, scope='bn')
ops.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(len(beta), 1)
self.assertEquals(len(gamma), 1)
moving_vars = tf.get_collection('moving_vars')
self.assertEquals(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scope='bn')
self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
ops.batch_norm(images, scope='bn', reuse=True)
self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
_ = ops.batch_norm(images, moving_vars='moving_vars')
moving_mean = tf.get_collection('moving_vars',
'BatchNorm/moving_mean')
self.assertEquals(len(moving_mean), 1)
self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = tf.get_collection('moving_vars',
'BatchNorm/moving_variance')
self.assertEquals(len(moving_variance), 1)
self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testComputeMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
for _ in range(10):
sess.run([output])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testReuseVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
if __name__ == '__main__':
tf.test.main()
| [
"fafaoc@me.com"
] | fafaoc@me.com |
b0eab76f789da136a8a281e69b3f5b956c7456b4 | d0a2df49e95881b3e3cdde806e55d5ef7ca84526 | /logistic_regression.py | e8f81f7f32d6b1273b8f2a77b8c6408ec5b2571b | [] | no_license | nhan-dam/svgd-variance-reduction | b288d35f0ad7c342be57043d14104a8cdc905a66 | 7f626a198cf0cf3aab083e1ac720ea58d3c9b7b9 | refs/heads/master | 2022-04-18T07:07:30.940649 | 2020-04-06T08:35:31 | 2020-04-06T08:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,968 | py | from __future__ import print_function, division, absolute_import
import numpy as np
'''
Logistic Regression:
The observed data D = {X, y} consist of N binary class labels,
y_t \in {-1,+1}, and d covariates for each datapoint, X_t \in R^d.
p(y_t = 1| x_t, w) = 1 / (1 + exp(-w^T x_t))
'''
class LogisticRegression:
def __init__(self, W, solver='sgd', batchsize=128):
'''
Initialise a Logistic Regression model.
solver: name of the solver. Currently, this function supports 4 solvers: 'sgd', 'adagrad', 'rmsprop',
'svrg-sgd', 'svrg-adagrad' and 'svrg-rmsprop'. By default, solver = 'sgd'.
'''
self.W = np.copy(W)
self.solver = solver
self.batchsize = batchsize
def _sigmoid(self, X, W):
'''
Compute the sigmoid function given weights and inputs.
X: N x D, where N is the number of data points and D is the dimension of each data point.
W: (D,) array.
'''
coeff = -1.0 * np.matmul(X, W)
return np.divide(np.ones(X.shape[0]), 1 + np.exp(coeff))
def _fn_J(self, w, x, y, reg):
loss_term = np.sum(-np.multiply(np.dot(x, w), y) + np.log(1 + np.exp(np.dot(x, w))))
reg_term = reg / 2.0 * np.sum(w ** 2)
return 1.0 / len(y) * loss_term + reg_term
def fit(self, X, y, n_updates=128, learning_rate=0.01, regularisation_factor=0.1, n_svrg_updates=128,
online=False, eval_freq=0, eval_fn=None, momentum_factor=0.9, decay_lr=0, debug=False):
'''
Train the model.
n_updates: number of training iterations. By default, n_updates = 100.
learning_rate: the learning rate. By default, learning_rate = 0.01.
regularisation_factor: regularisation parameter used in L2 penalty.
n_svrg_updates: number of training iterations in the inner loop of SVRG solver. By default,
n_svrg_updates = 100.
online: boolean flag for online learning setting. If online = True, we follow online learning to update
particles. That means for each training data point, we predict its label before using it for training.
We compute the accumulated accuracy of prediction. By default, online = False.
eval_freq: the frequency that the performance of the model with current parameters is evaluated.
If online = True, eval_freq is automatically set to -1, that means the evaluation is executed before
training with each data point. Otherwise, if eval_freq <= 0, no evaluation will be executed during
training and if eval_freq > 0 the evaluation will be executed after every eval_freq data points trained.
eval_fn: the function to evaluate the performance of the model with the current parameters.
By default, eval_fn = None.
momentum_factor: momentum parameter used in RMSProp. By default, momentum_factor = 0.9.
decay_lr: the hyperparameters that control the decay of learning rate. By default, decay_stepsize = 0,
that means there is no decay.
debug: boolean flag to determine the mode of this function. In debug mode, the function will print more
information to the standard output during training. By default, debug = False.
'''
X_train = np.copy(X)
y_train = np.copy(y)
y_train[y_train == -1] = 0 # in this function, we use labels 0 and 1.
num_data = X_train.shape[0]
n_svrg_updates = 1 if self.solver != 'svrg-sgd' and self.solver != 'svrg-adagrad' and \
self.solver != 'svrg-rmsprop' else n_svrg_updates
if online:
batchsize = 1
eval_freq = min(-eval_freq, -1)
n_updates = int(np.ceil(num_data / n_svrg_updates))
else:
batchsize = min(self.batchsize, X_train.shape[0])
eval_freq = n_updates * n_svrg_updates + 1 if eval_freq <= 0 else eval_freq
data_idx_perm = np.random.permutation(num_data)
if eval_freq < 0:
loss_log = np.zeros(int(np.ceil(num_data / (-eval_freq))) + 1)
accumulated_loss = 0
cnt = 0
elif eval_freq > 0:
eval_log = []
if self.solver == 'sgd':
print('Train Logistic Regression with SGD solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
data_idx = data_idx_perm[np.arange(it * batchsize, (it + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if it % (-eval_freq) == 0 or (it + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it + 1)
cnt += 1
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
# W_prime = self.W + 1.0 / 10000 * np.random.normal(0, 1, len(self.W))
# numerical_grad_J = (self._fn_J(W_prime, x_batch, y_batch, regularisation_factor) - self._fn_J(self.W, x_batch, y_batch, regularisation_factor)) / (W_prime - self.W)
# diff = numerical_grad_J - grad_J
self.W = self.W - learning_rate * np.exp(-decay_lr * it) * grad_J
# self.W = self.W - learning_rate * np.exp(-decay_lr * it) * numerical_grad_J
if eval_freq > 0 and (it + 1) % eval_freq == 0:
if (it + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
elif self.solver == 'adagrad':
print('Train Logistic Regression with AdaGrad solver.')
fudge_factor = 1e-6
historical_grad = 0.0
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
data_idx = data_idx_perm[np.arange(it * batchsize, (it + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if it % (-eval_freq) == 0 or (it + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it + 1)
cnt += 1
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
historical_grad += (grad_J ** 2)
adj_grad = np.divide(grad_J, fudge_factor + np.sqrt(historical_grad))
self.W = self.W - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it + 1) % eval_freq == 0:
if (it + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
elif self.solver == 'rmsprop':
print('Train Logistic Regression with RMSProp solver.')
fudge_factor = 1e-6
historical_grad = 0.0
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
data_idx = data_idx_perm[np.arange(it * batchsize, (it + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if it % (-eval_freq) == 0 or (it + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it + 1)
cnt += 1
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
if it == 0:
historical_grad = grad_J ** 2
else:
historical_grad = momentum_factor * historical_grad + (1 - momentum_factor) * (grad_J ** 2)
adj_grad = np.divide(grad_J, fudge_factor + np.sqrt(historical_grad))
self.W = self.W - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it + 1) % eval_freq == 0:
if (it + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
elif self.solver == 'svrg-sgd':
print('Train Logistic Regression with SVRG-SGD solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
mu = -np.sum(np.multiply(X_train, np.broadcast_to(np.vstack(y_train - self._sigmoid(X_train, self.W)),
(len(y_train), X_train.shape[1]))),
axis=0) * 1.0 / len(y_train) \
+ regularisation_factor * self.W
w_hat = np.copy(self.W)
for it_svrg in np.arange(n_svrg_updates):
data_idx = data_idx_perm[np.arange((it * n_svrg_updates + it_svrg) * batchsize,
(it * n_svrg_updates + it_svrg + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
if it * n_svrg_updates + it_svrg >= num_data:
break
self.W, w_hat = w_hat, self.W
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if (it * n_svrg_updates + it_svrg) % (-eval_freq) == 0 or (
it * n_svrg_updates + it_svrg + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it * n_svrg_updates + it_svrg + 1)
cnt += 1
self.W, w_hat = w_hat, self.W
grad_J_hat = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, w_hat)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * w_hat
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
w_hat = w_hat - learning_rate * np.exp(-decay_lr * it) * (grad_J_hat - grad_J + mu)
if eval_freq > 0 and (it * n_svrg_updates + it_svrg + 1) % eval_freq == 0:
self.W, w_hat = w_hat, self.W
if (it * n_svrg_updates + it_svrg + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
self.W, w_hat = w_hat, self.W
self.W = np.copy(w_hat)
elif self.solver == 'svrg-adagrad':
print('Train Logistic Regression with SVRG-AdaGrad solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
fudge_factor = 1e-6
historical_grad = 0.0
mu = -np.sum(np.multiply(X_train, np.broadcast_to(np.vstack(y_train - self._sigmoid(X_train, self.W)),
(len(y_train), X_train.shape[1]))),
axis=0) * 1.0 / len(y_train) \
+ regularisation_factor * self.W
w_hat = np.copy(self.W)
for it_svrg in np.arange(n_svrg_updates):
data_idx = data_idx_perm[np.arange((it * n_svrg_updates + it_svrg) * batchsize,
(it * n_svrg_updates + it_svrg + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
if it * n_svrg_updates + it_svrg >= num_data:
break
self.W, w_hat = w_hat, self.W
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if (it * n_svrg_updates + it_svrg) % (-eval_freq) == 0 or (
it * n_svrg_updates + it_svrg + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it * n_svrg_updates + it_svrg + 1)
cnt += 1
self.W, w_hat = w_hat, self.W
grad_J_hat = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, w_hat)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * w_hat
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
grad_J_svrg = grad_J_hat - grad_J + mu
historical_grad += (grad_J_svrg ** 2)
adj_grad = np.divide(grad_J_svrg, fudge_factor + np.sqrt(historical_grad))
w_hat = w_hat - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it * n_svrg_updates + it_svrg + 1) % eval_freq == 0:
self.W, w_hat = w_hat, self.W
if (it * n_svrg_updates + it_svrg + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
self.W, w_hat = w_hat, self.W
self.W = np.copy(w_hat)
elif self.solver == 'svrg-rmsprop':
print('Train Logistic Regression with SVRG-RMSProp solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
fudge_factor = 1e-6
historical_grad = 0.0
mu = -np.sum(np.multiply(X_train, np.broadcast_to(np.vstack(y_train - self._sigmoid(X_train, self.W)),
(len(y_train), X_train.shape[1]))),
axis=0) * 1.0 / len(y_train) \
+ regularisation_factor * self.W
w_hat = np.copy(self.W)
for it_svrg in np.arange(n_svrg_updates):
data_idx = data_idx_perm[np.arange((it * n_svrg_updates + it_svrg) * batchsize,
(it * n_svrg_updates + it_svrg + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
if it * n_svrg_updates + it_svrg >= num_data:
break
self.W, w_hat = w_hat, self.W
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if (it * n_svrg_updates + it_svrg) % (-eval_freq) == 0 or (
it * n_svrg_updates + it_svrg + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it * n_svrg_updates + it_svrg + 1)
cnt += 1
self.W, w_hat = w_hat, self.W
grad_J_hat = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, w_hat)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * w_hat
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
grad_J_svrg = grad_J_hat - grad_J + mu
if it_svrg == 0:
historical_grad = grad_J_svrg ** 2
else:
historical_grad = momentum_factor * historical_grad + (1 - momentum_factor) * (grad_J_svrg ** 2)
adj_grad = np.divide(grad_J_svrg, fudge_factor + np.sqrt(historical_grad))
w_hat = w_hat - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it * n_svrg_updates + it_svrg + 1) % eval_freq == 0:
self.W, w_hat = w_hat, self.W
if (it * n_svrg_updates + it_svrg + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
self.W, w_hat = w_hat, self.W
self.W = np.copy(w_hat)
else:
raise ValueError('The requested solver %s is currently not supported.' % (self.solver))
if eval_freq < 0:
return self, loss_log
if eval_fn is not None:
return self, eval_log
return self
def predict(self, X_test, y_test=None, get_label=True, get_prob=False):
'''
Predict the labels given observations.
x: one or many new observations. The dimensions of the matrix of observations are N x D, where N is the number
of observations and D is the dimension of each observation.
y: corresponding labels of the observations x. If we pass in y_test, the return values of this function
will include the accuracy. By default, y is None.
get_label: a boolean flag to determine if we return the predicted labels. get_label has higher precedence
than get_prob. That means if y_test = None and get_label = False, then get_prob is automatically True.
By default, get_label = True.
get_prob: a boolean flag to determine if we return the uncertainty of the prediction. By default, get_prob = False.
Return: (predicted labels, probabilities, accuracy)
'''
prob = self._sigmoid(X_test, self.W)
y_pred = np.ones(len(prob))
y_pred[prob <= 0.5] = -1
if y_test is None:
if not get_label: # get_prob is automatically True
return 0.5 + np.abs(prob - 0.5)
if not get_prob:
return y_pred
return y_pred, 0.5 + np.abs(prob - 0.5)
if not get_label:
if not get_prob:
return np.sum(y_pred == y_test) * 1.0 / len(y_test)
return 0.5 + np.abs(prob - 0.5), np.sum(y_pred == y_test) * 1.0 / len(y_test)
return y_pred, 0.5 + np.abs(prob - 0.5), np.sum(y_pred == y_test) * 1.0 / len(y_test)
| [
"ndam@deakin.edu.au"
] | ndam@deakin.edu.au |
a9812104f466c0374fbccf71d0cd2b8edbf21fb8 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20200601/route_filter.py | 91eecb201ea5a51babd94a74b8238698682e23f2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,170 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteFilter resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param RouteFilterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210501:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20200601:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
"""
Get an existing RouteFilter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route filter resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
e063920acaa40258008dba8ae5ed79c9bd2b66b7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ENTERASYS-VLAN-AUTHORIZATION-MIB.py | c846ff1eb1ce291ffe2d355f4fb5cea046a7128a | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,071 | py | #
# PySNMP MIB module ENTERASYS-VLAN-AUTHORIZATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-VLAN-AUTHORIZATION-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:04:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
dot1dBasePortEntry, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePortEntry")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibIdentifier, Bits, NotificationType, IpAddress, TimeTicks, Counter64, iso, Integer32, Counter32, ObjectIdentity, Unsigned32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "NotificationType", "IpAddress", "TimeTicks", "Counter64", "iso", "Integer32", "Counter32", "ObjectIdentity", "Unsigned32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
etsysVlanAuthorizationMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48))
etsysVlanAuthorizationMIB.setRevisions(('2004-06-02 19:22',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setRevisionsDescriptions(('The initial version of this MIB module',))
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setLastUpdated('200406021922Z')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setOrganization('Enterasys Networks, Inc')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setContactInfo('Postal: Enterasys Networks, Inc. 50 Minuteman Rd. Andover, MA 01810-1008 USA Phone: +1 978 684 1000 E-mail: support@enterasys.com WWW: http://www.enterasys.com')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setDescription("This MIB module defines a portion of the SNMP MIB under Enterasys Networks' enterprise OID pertaining to proprietary extensions to the IETF Q-BRIDGE-MIB, as specified in RFC2674, pertaining to VLAN authorization, as specified in RFC3580. Specifically, the enabling and disabling of support for the VLAN Tunnel-Type attribute returned from a RADIUS authentication, and how that attribute is applied to the port which initiated the authentication.")
class VlanAuthEgressStatus(TextualConvention, Integer32):
description = 'The possible egress configurations which may be applied in response to a successful authentication. none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. dynamic(4) The authenticating port will use information returned in the authentication response to modify the current egress lists.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("tagged", 2), ("untagged", 3), ("dynamic", 4))
etsysVlanAuthorizationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1))
etsysVlanAuthorizationSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 1))
etsysVlanAuthorizationPorts = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2))
etsysVlanAuthorizationEnable = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 1, 1), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationEnable.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationEnable.setDescription('The enable/disable state for the VLAN authorization feature. When disabled, no modifications to the VLAN attributes related to packet switching should be enforced.')
etsysVlanAuthorizationTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1), )
if mibBuilder.loadTexts: etsysVlanAuthorizationTable.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationTable.setDescription('Extensions to the table that contains information about every port that is associated with this transparent bridge.')
etsysVlanAuthorizationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1), )
dot1dBasePortEntry.registerAugmentions(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationEntry"))
etsysVlanAuthorizationEntry.setIndexNames(*dot1dBasePortEntry.getIndexNames())
if mibBuilder.loadTexts: etsysVlanAuthorizationEntry.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationEntry.setDescription('A list of extensions that support the management of proprietary features for each port of a transparent bridge. This is indexed by dot1dBasePort.')
etsysVlanAuthorizationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 1), EnabledStatus().clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationStatus.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationStatus.setDescription('The enabled/disabled status for the application of VLAN authorization on this port, if disabled, the information returned in the VLAN-Tunnel-Type from the authentication will not be applied to the port (although it should be represented in this table). If enabled, those results will be applied to the port.')
etsysVlanAuthorizationAdminEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 2), VlanAuthEgressStatus().clone('untagged')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationAdminEgress.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationAdminEgress.setDescription('Controls the modification of the current vlan egress list (of the vlan returned in the VLAN-Tunnel-Type, and reported by etsysVlanAuthorizationVlanID) upon successful authentication in the following manner: none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. dynamic(4) The authenticating port will use information returned in the authentication response to modify the current egress lists. This value is supported only if the device supports a mechanism through which the egress status may be returned through the RADIUS response. Should etsysVlanAuthorizationEnable become disabled, etsysVlanAuthorizationStatus become disabled for a port, or should etsysVlanAuthorizationVlanID become 0 or 4095, all effect on the port egress MUST be removed.')
etsysVlanAuthorizationOperEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 3), VlanAuthEgressStatus().clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysVlanAuthorizationOperEgress.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationOperEgress.setDescription('Reports the current state of modification to the current vlan egress list (of the vlan returned in the VLAN-Tunnel-Type) upon successful authentication, if etsysVlanAuthorizationStatus is enabled, in the following manner: none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. The purpose of this leaf is to report, specifically when etsysVlanAuthorizationAdminEgress has been set to dynamic(4), the currently enforced egress modification. If the port is unauthenticated, or no VLAN-ID has been applied, this leaf should return none(1).')
etsysVlanAuthorizationVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ValueRangeConstraint(4095, 4095), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysVlanAuthorizationVlanID.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationVlanID.setDescription('The 12 bit VLAN identifier for a given port, used to override the PVID of the given port, obtained as a result of an authentication. A value of zero indicates that there is no authenticated VLAN ID for the given port. Should a port become unauthenticated this value MUST be returned to zero. A value of 4095 indicates that a the port has been authenticated, but that the VLAN returned could not be applied to the port (possibly because of resource constraints or misconfiguration). In this instance, the original PVID should still be applied. Should the feature become disabled or the session terminate, all effect on the Port VLAN ID MUST be removed.')
etsysVlanAuthorizationConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2))
etsysVlanAuthorizationGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 1))
etsysVlanAuthorizationCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 2))
etsysVlanAuthorizationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 1, 1)).setObjects(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationEnable"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationStatus"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationAdminEgress"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationOperEgress"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationVlanID"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysVlanAuthorizationGroup = etsysVlanAuthorizationGroup.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationGroup.setDescription('A collection of objects relating to VLAN Authorization.')
etsysVlanAuthorizationCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 2, 1)).setObjects(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysVlanAuthorizationCompliance = etsysVlanAuthorizationCompliance.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationCompliance.setDescription('The compliance statement for devices that support the Enterasys VLAN Authorization MIB.')
mibBuilder.exportSymbols("ENTERASYS-VLAN-AUTHORIZATION-MIB", etsysVlanAuthorizationVlanID=etsysVlanAuthorizationVlanID, etsysVlanAuthorizationGroup=etsysVlanAuthorizationGroup, etsysVlanAuthorizationEnable=etsysVlanAuthorizationEnable, etsysVlanAuthorizationOperEgress=etsysVlanAuthorizationOperEgress, etsysVlanAuthorizationAdminEgress=etsysVlanAuthorizationAdminEgress, etsysVlanAuthorizationConformance=etsysVlanAuthorizationConformance, VlanAuthEgressStatus=VlanAuthEgressStatus, etsysVlanAuthorizationPorts=etsysVlanAuthorizationPorts, etsysVlanAuthorizationStatus=etsysVlanAuthorizationStatus, etsysVlanAuthorizationCompliance=etsysVlanAuthorizationCompliance, etsysVlanAuthorizationMIB=etsysVlanAuthorizationMIB, etsysVlanAuthorizationGroups=etsysVlanAuthorizationGroups, etsysVlanAuthorizationObjects=etsysVlanAuthorizationObjects, etsysVlanAuthorizationTable=etsysVlanAuthorizationTable, etsysVlanAuthorizationSystem=etsysVlanAuthorizationSystem, etsysVlanAuthorizationEntry=etsysVlanAuthorizationEntry, etsysVlanAuthorizationCompliances=etsysVlanAuthorizationCompliances, PYSNMP_MODULE_ID=etsysVlanAuthorizationMIB)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
2f827b2603f3b2e2da3faa274a618d5620244e37 | 6b2794ac7ee275654f753659c83e9c6f115b4bbc | /budget/migrations/0008_auto_20190311_1818.py | d6e2d7ac6e8893eb63a9eb2da9d501d480441d49 | [] | no_license | mtmbutler/simplefi | 5ae667b93a69b77070652ecf6d1808badc68cc46 | e1afd06c525a1231a01dd4760d2aa145c9862be9 | refs/heads/main | 2021-06-25T01:27:32.008217 | 2020-12-22T18:48:30 | 2020-12-22T18:48:30 | 183,545,508 | 1 | 1 | null | 2020-12-24T17:21:16 | 2019-04-26T02:51:31 | Python | UTF-8 | Python | false | false | 1,214 | py | # Generated by Django 2.1.7 on 2019-03-12 01:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0007_auto_20190311_0740'),
]
operations = [
migrations.AlterModelOptions(
name='statement',
options={'ordering': ['-date']},
),
migrations.AlterField(
model_name='account',
name='annual_fee',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=9, verbose_name='Annual Fee ($)'),
),
migrations.AlterField(
model_name='account',
name='interest_rate',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=9, verbose_name='Interest Rate (%)'),
),
migrations.AlterField(
model_name='account',
name='statement_date',
field=models.PositiveSmallIntegerField(default=1, help_text='The numbered day of each month that your statement posts.', verbose_name='Statement Date'),
),
migrations.AlterUniqueTogether(
name='statement',
unique_together={('account', 'date')},
),
]
| [
"mtmbutler@icloud.com"
] | mtmbutler@icloud.com |
e19cb658c575b3bf49becb8695d95500b966fddc | 0967182e0b2c59448305870aaa193e051dd0eafa | /visualizer.py | 79f1191df0144cfdc55d0ebf0481a7b094b9d15b | [] | no_license | DT021/fake-tradingview | 43dcd483328193fb7d401b783bfa390c02c539d2 | 4c1c2ba1a58263c85545ac11abff555fa747c09a | refs/heads/master | 2023-01-30T22:15:48.790656 | 2020-12-04T15:06:23 | 2020-12-04T15:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import datetime
import logging
import time
import numpy as np
import pyqtgraph as pg
from pyqtgraph import QtCore, QtGui
from pyqtgraph.dockarea import *
from candlestickItem import CandlestickItem
from utils import Worker, logger
from volumeProfileItem import VolumeProfileItem
class Visualizer(DockArea):
def __init__(self, parent):
super().__init__()
self.db = parent.db
# Candlestick init
self.candlestick = CandlestickItem(self.db)
self.volumeProfile = VolumeProfileItem(self.db)
self.volumeProfile.onUpdate.connect(self.candlestick.update)
self.candlestickWidget = pg.PlotWidget(
self, axisItems={"bottom": pg.DateAxisItem()}
)
self.candlestickWidget.addItem(self.candlestick)
self.candlestickWidget.addItem(self.volumeProfile)
self.candlestickWidget.setMouseEnabled(x=True, y=False)
self.candlestickWidget.enableAutoRange(x=False, y=True)
self.candlestickWidget.setAutoVisible(x=False, y=True)
self.candlestickWidget.showAxis("right")
self.candlestickWidget.hideAxis("left")
self.candlestickWidget.showGrid(True, True, 0.2)
self.candlestickWidget.scene().sigMouseMoved.connect(
self.candlestick.onMouseMoved
)
# Add dock
self.d = Dock("OHLC", widget=self.candlestickWidget)
self.addDock(self.d)
def setIndex(self, index):
worker = Worker(self.candlestick.setIndex, index)
QtCore.QThreadPool.globalInstance().start(worker)
self.volumeProfile.removeAll()
def setInterval(self, interval):
worker = Worker(self.candlestick.setInterval, interval)
QtCore.QThreadPool.globalInstance().start(worker)
def refresh(self):
self.candlestick.refresh()
| [
"minh020698@gmail.com"
] | minh020698@gmail.com |
1e4bc752b2c1a4e95cfc85a70366502bdad4f7cf | dfc4dc5d823dada86216bc7df451127bffab00bb | /authors/apps/products/views.py | f7e50a3347930b55f6672439a7e79b8ee513a0c9 | [
"BSD-3-Clause"
] | permissive | hoslack/jua-kali_Backend | 311250360574495052adab9267dc7c07f48ba0e7 | e0e92aa0287c4a17b303fdde941f457b28c51223 | refs/heads/master | 2022-12-21T02:17:35.808370 | 2019-02-05T12:26:05 | 2019-02-05T12:26:05 | 158,075,591 | 0 | 0 | BSD-3-Clause | 2022-12-08T01:20:29 | 2018-11-18T11:13:48 | Python | UTF-8 | Python | false | false | 374 | py | from .models import Product
from .serializers import ProductSerializer
from rest_framework import generics
class ProductList(generics.ListCreateAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
| [
"hoslackochieng@gmail.com"
] | hoslackochieng@gmail.com |
e8e564dd8a81a7204c2c1219c8828de5d75a5b39 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsexpression.py | 10aca71722b9813074d199da83ce3d260fed8d3b | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSExpression(TestCase):
def testConstants(self):
self.assertEqual(NSConstantValueExpressionType, 0)
self.assertEqual(NSEvaluatedObjectExpressionType, 1)
self.assertEqual(NSVariableExpressionType, 2)
self.assertEqual(NSKeyPathExpressionType, 3)
self.assertEqual(NSFunctionExpressionType, 4)
self.assertEqual(NSUnionSetExpressionType, 5)
self.assertEqual(NSIntersectSetExpressionType, 6)
self.assertEqual(NSMinusSetExpressionType, 7)
self.assertEqual(NSSubqueryExpressionType, 13)
self.assertEqual(NSAggregateExpressionType, 14)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(NSBlockExpressionType, 19)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertEqual(NSAnyKeyExpressionType, 15)
@min_os_level("10.11")
def testConstants10_11(self):
self.assertEqual(NSConditionalExpressionType, 20)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsBlock(NSExpression.expressionForBlock_arguments_, 0, b"@@@@")
self.assertResultIsBlock(NSExpression.expressionBlock, b"@@@@")
@min_os_level("10.6")
def testMethod10_6_unsupported(self):
self.assertArgIsPrintf(NSExpression.expressionWithFormat_, 0)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
39b208a82a1ddf4a4bdc0162879768521bb9893d | 30b0b79b5d5258aefbeb4faf129483fae456c9fa | /shoe/models.py | 40a94f5d75503d82fd4a3ab753daf5b3f513b6dc | [] | no_license | arturAdr/easyResource | bcee8fefeffb13bc2de648a2d0220eb3dc1b1d71 | ffff2818c481191a0133b8b44b9b3e53f9de9e94 | refs/heads/master | 2020-06-10T08:44:01.352198 | 2019-06-30T02:19:45 | 2019-06-30T02:19:45 | 193,625,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models
from EasyResource.fields import JSONSchemaField
class Shoe(models.Model):
sku = models.CharField(max_length=100, unique=True)
name = models.CharField(max_length=100)
details = models.CharField(max_length=5000)
informations = JSONField()
tags = ArrayField(models.CharField(max_length=200), blank=True)
price = models.FloatField()
sizes = JSONSchemaField(schema = {
"type": "array",
"items": {
"type": "object",
"required": [
"size",
"available_quantity"
],
"additionalProperties": False,
"properties": {
"size": {
"type": "integer",
},
"available_quantity": {
"type": "integer"
}
}
}
})
class Meta:
verbose_name = u'Shoe'
verbose_name_plural = u'Shoes'
def __str__(self):
return self.name | [
"artur.adr@hotmail.com"
] | artur.adr@hotmail.com |
345af0510923871e7c277fa605b5fbb91b36acd5 | 357048e2990a572be19f2816a9890fdb10b4ef71 | /bmips.py | 2b0713eb577cd1cf89d6eeb2eb0ee707691543e2 | [] | no_license | brightp-py/bmips | 444771a8b46edd7bded2e048dc58573cee2007ca | 59308e55f5eb4b56937044e932383216b1a0c8c7 | refs/heads/master | 2023-04-03T01:58:14.517627 | 2021-04-10T19:52:42 | 2021-04-10T19:52:42 | 356,678,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import sys
from src import simulator, parsemips
if len(sys.argv) < 2:
print("FAILED: Expected a file name.")
print("'python bmips.py [filename] (-d)'")
sys.exit()
simulator.DEBUGPRINT = False
simulator.REPORTPRINT = None
wantreportfile = False
for arg in sys.argv[2:]:
if wantreportfile:
if arg[0] != "-":
simulator.REPORTPRINT = arg
else:
simulator.REPORTPRINT = "print"
wantreportfile = False
if arg == "-d":
simulator.DEBUGPRINT = True
elif arg == "-r":
wantreportfile = True
if "-d" in sys.argv[2:]:
simulator.DEBUGPRINT = True
with open(sys.argv[1], 'r') as f:
p = parsemips.parseCode(f.read())
sim = simulator.Simulator(p)
sim.run() | [
"brightp@umich.edu"
] | brightp@umich.edu |
1f3ff6b0e0ffd238536bc4cba66923da5ef896f3 | b8be27aa871f298e9b9a53f417219ebb080378d6 | /deep-dive-convolutional-neural-networks/vgg/vgg.py | b4985404ee4a610b23290e35edbb125bbbe411be | [] | no_license | Bayesian4042/computer-vision | f0d9e010ecf043b72b49a8118cf334310200f031 | afe969a1be2e8f396f2fe6282d0027534f88281d | refs/heads/master | 2023-02-15T20:03:34.237416 | 2021-01-10T16:30:35 | 2021-01-10T16:30:35 | 135,942,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,644 | py |
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from models.model import Model
from preprocessing.imagenet.bgr import resize_crop
from weight_loading.numpyfile import load_weights
from helper.layer import fc, conv
class VGG(Model):
"""
VGG16 model definition for Tensorflow
"""
image_size = 224
image_prep = resize_crop
def __init__(self, tensor, keep_prob=1.0, num_classes=1000, retrain_layer=[], weights_path='./weights/vgg16.npy'):
# Call the parent class, which will create the graph
Model.__init__(self, tensor, keep_prob, num_classes, retrain_layer, weights_path)
# Call the create function to build the computational graph
self.final, self.endpoints = self.create()
def get_final_op(self):
return self.final
def get_endpoints(self):
return self.endpoints
def get_restore_vars(self):
return [v for v in tf.global_variables() if not v.name.split('/')[0] in self.retrain_layer]
def get_retrain_vars(self):
return tf.trainable_variables()
def load_initial_weights(self, session):
load_weights(session, self.weights_path, self.retrain_layer)
def create(self):
# 1st Layer: Conv -> Conv -> Pool
# conv(tensor, filter_height, filter_width, num_filters, stride_y, stride_x, name, padding)
conv1_1 = conv(self.tensor, 3, 3, 64, 1, 1, padding='SAME', name='conv1_1', trainable=self.is_layer_trainable('conv1_1'))
conv1_2 = conv(conv1_1 , 3, 3, 64, 1, 1, padding='SAME', name='conv1_2', trainable=self.is_layer_trainable('conv1_2'))
pool1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# 2nd Layer: Conv -> Conv -> Pool
conv2_1 = conv(pool1 , 3, 3, 128, 1, 1, padding='SAME', name='conv2_1', trainable=self.is_layer_trainable('conv2_1'))
conv2_2 = conv(conv2_1, 3, 3, 128, 1, 1, padding='SAME', name='conv2_2', trainable=self.is_layer_trainable('conv2_2'))
pool2 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# 3rd Layer: Conv -> Conv -> Conv -> Pool
conv3_1 = conv(pool2 , 3, 3, 256, 1, 1, padding='SAME', name='conv3_1', trainable=self.is_layer_trainable('conv3_1'))
conv3_2 = conv(conv3_1, 3, 3, 256, 1, 1, padding='SAME', name='conv3_2', trainable=self.is_layer_trainable('conv3_2'))
conv3_3 = conv(conv3_2, 3, 3, 256, 1, 1, padding='SAME', name='conv3_3', trainable=self.is_layer_trainable('conv3_3'))
pool3 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# 4th Layer: Conv -> Conv -> Conv -> Pool
conv4_1 = conv(pool3 , 3, 3, 512, 1, 1, padding='SAME', name='conv4_1', trainable=self.is_layer_trainable('conv4_1'))
conv4_2 = conv(conv4_1, 3, 3, 512, 1, 1, padding='SAME', name='conv4_2', trainable=self.is_layer_trainable('conv4_2'))
conv4_3 = conv(conv4_2, 3, 3, 512, 1, 1, padding='SAME', name='conv4_3', trainable=self.is_layer_trainable('conv4_3'))
pool4 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# 5th Layer: Conv -> Conv -> Conv -> Pool
conv5_1 = conv(pool4 , 3, 3, 512, 1, 1, padding='SAME', name='conv5_1', trainable=self.is_layer_trainable('conv5_1'))
conv5_2 = conv(conv5_1, 3, 3, 512, 1, 1, padding='SAME', name='conv5_2', trainable=self.is_layer_trainable('conv5_2'))
conv5_3 = conv(conv5_2, 3, 3, 512, 1, 1, padding='SAME', name='conv5_3', trainable=self.is_layer_trainable('conv5_3'))
pool5 = tf.nn.max_pool(conv5_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5')
# 6th Layer: FC -> DropOut
# [1:] cuts away the first element
pool5_out = int(np.prod(pool5.get_shape()[1:])) # 7 * 7 * 512 = 25088
pool5_flat = tf.reshape(pool5, [-1, pool5_out]) # shape=(image count, 7, 7, 512) -> shape=(image count, 25088)
fc6 = fc(pool5_flat, num_out=4096, name='fc6', relu=True, trainable=self.is_layer_trainable('fc6'))
dropout1 = tf.nn.dropout(fc6, self.keep_prob)
# 7th Layer: FC
fc7 = fc(dropout1, num_out=4096, name='fc7', relu=True, trainable=self.is_layer_trainable('fc7'))
dropout2 = tf.nn.dropout(fc7, self.keep_prob)
# 8th Layer: FC
fc8 = fc(dropout2, num_out=self.num_classes, name='fc8', relu=False, trainable=self.is_layer_trainable('fc8'))
# add layers to the endpoints dict
endpoints = OrderedDict()
endpoints['conv1/conv1_1'] = conv1_1
endpoints['conv1/conv1_2'] = conv1_2
endpoints['pool1'] = pool1
endpoints['conv2/conv2_1'] = conv2_1
endpoints['conv2/conv2_2'] = conv2_2
endpoints['pool2'] = pool2
endpoints['conv3/conv3_1'] = conv3_1
endpoints['conv3/conv3_2'] = conv3_2
endpoints['conv3/conv3_3'] = conv3_3
endpoints['pool3'] = pool3
endpoints['conv4/conv4_1'] = conv4_1
endpoints['conv4/conv4_2'] = conv4_2
endpoints['conv4/conv4_3'] = conv4_3
endpoints['pool4'] = pool4
endpoints['conv5/conv5_1'] = conv5_1
endpoints['conv5/conv5_2'] = conv5_2
endpoints['conv5/conv5_3'] = conv5_3
endpoints['pool5'] = pool5
endpoints['pool5/flat'] = pool5_flat # 25088
endpoints['fc6'] = fc6 # 4096
endpoints['fc7'] = fc7 # 4096
endpoints['fc8'] = fc8 # number of output classes
return fc8, endpoints
| [
"singhabhilasha4042@gmail.com"
] | singhabhilasha4042@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.