blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a58d2d0f98b28216c8228e5c1ea84e9f433c6285
|
45de3aa97525713e3a452c18dcabe61ac9cf0877
|
/src/primaires/combat/commandes/bander/__init__.py
|
df15806a8f441e1a2a4bf8d37a4d5bfa54f3e48e
|
[
"BSD-3-Clause"
] |
permissive
|
stormi/tsunami
|
95a6da188eadea3620c70f7028f32806ee2ec0d1
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
refs/heads/master
| 2020-12-26T04:27:13.578652
| 2015-11-17T21:32:38
| 2015-11-17T21:32:38
| 25,606,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,123
|
py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'bander'.
"""
from primaires.interpreteur.commande.commande import Commande
from primaires.objet.conteneur import SurPoids
class CmdBander(Commande):
"""Commande 'bander'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "charger", "bend")
self.nom_categorie = "combat"
self.schema = "<jet:nom_objet> (avec/with <projectile:nom_objet>)"
self.aide_courte = "charge une arme de jet"
self.aide_longue = \
"Cette commande permet de charger une arme de jet que " \
"vous équipez. Elle prend en paramètre obligatoire le " \
"nom de l'arme. Si rien n'est précisé ensuite, le système " \
"cherchera le bon projectile dans vos conteneurs équipés " \
"et le placera automatiquement sur l'arme de jet. Sinon, " \
"vous pouvez préciser après le nom de l'arme de jet le " \
"mot-clé |cmd|avec|ff| (ou |cmd|with|ff| en anglais) suivi " \
"du nom du projectile. Vous devez dans tous les cas " \
"posséder le projectile indiqué."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
arme_de_jet = self.noeud.get_masque("jet")
arme_de_jet.proprietes["conteneurs"] = \
"(personnage.equipement.equipes, )"
projectile = self.noeud.get_masque("projectile")
projectile.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt(" \
"True), )"
projectile.proprietes["quantite"] = "True"
projectile.proprietes["conteneur"] = "True"
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
personnage.agir("charger")
arme_de_jet = dic_masques["jet"].objet
if not arme_de_jet.est_de_type("arme de jet"):
personnage << "|err|Ceci n'est pas une arme de jet.|ff|"
return
if dic_masques["projectile"]:
projectiles = list(dic_masques["projectile"].objets_qtt_conteneurs)
projectile, qtt, conteneur = projectiles[0]
if not projectile.est_de_type("projectile"):
personnage << "|err|Ceci n'est pas un projectile.|ff|"
return
else:
projectile = conteneur = None
for objet in personnage.equipement.inventaire:
if objet.est_de_type("projectile") and objet.cle in \
arme_de_jet.projectiles_autorises:
projectile = objet
conteneur = objet.contenu
break
if projectile is None or conteneur is None:
personnage << "|err|Aucun projectile pour cette arme " \
"de jet ne peut être trouvé sur vous.|ff|"
return
if projectile.cle not in arme_de_jet.projectiles_autorises:
personnage << "|err|Vous ne pouvez utiliser {} avec " \
"{}.|ff|".format(arme_de_jet.get_nom(),
projectile.get_nom())
personnage << "Vous commencez à recharger {}.".format(
arme_de_jet.get_nom())
personnage.etats.ajouter("charger")
yield 1
if "charger" not in personnage.etats:
return
personnage.etats.retirer("charger")
# Si l'arme de jet est déjà chargée
if arme_de_jet.projectile:
ancien_projectile = arme_de_jet.projectile
try:
personnage.ramasser(objet=ancien_projectile)
except SurPoids:
personnage.salle.objets_sol.ajouter(objet=ancien_projectile)
personnage << "{} glisse à terre.".format(
ancien_projectile.get_nom().capitalize())
else:
personnage << "Vous récupérez {}.".format(
ancien_projectile.get_nom())
arme_de_jet.projectile = None
conteneur.retirer(projectile)
arme_de_jet.script["charge"].executer(personnage=personnage,
arme=arme_de_jet, projectile=projectile)
arme_de_jet.projectile = projectile
personnage << "Vous bandez {} avec {}.".format(
arme_de_jet.get_nom(), projectile.get_nom())
personnage.salle.envoyer("{{}} bande {} avec {}.".format(
arme_de_jet.get_nom(), projectile.get_nom()), personnage)
|
[
"kredh@free.fr"
] |
kredh@free.fr
|
712ffc2d8089cd9cdf5177d269fd592d0b46f7db
|
50658c41dd6de3330d3795b116c0e0e1b38a41d4
|
/lib/daal/storages/__init__.py
|
bcaea9583775754402938acf7f54233f020ca15e
|
[] |
no_license
|
nolar/shortener
|
c01223f4d24f794cd5df3eb76a4beca81419c03a
|
05da766aeef7cac4df7a172aefd1d37d360083ac
|
refs/heads/master
| 2021-01-25T05:27:48.011877
| 2011-08-29T18:24:48
| 2011-08-29T18:24:48
| 2,200,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
# coding: utf-8
from ._base import Storage, StorageID
from ._base import StorageExpectationError, StorageItemAbsentError, StorageUniquenessError
from .wrapped import WrappedStorage
from .sdb import SDBStorage
from .mysql import MysqlStorage
|
[
"nolar@nolar.info"
] |
nolar@nolar.info
|
53acc62eb0a66fd52d0aac9c4d02ff50f8eec581
|
574e7b276c83dc3866c0401f51fba38031e1faf1
|
/setup.py
|
386ff893bd50d64a2733f87a81810a0d7977b6bf
|
[
"BSD-3-Clause"
] |
permissive
|
hiclib/iced
|
251805e411a4126a07f186c67b378d4e03320f16
|
53d7a936d841dba6ae0f8fe8d168e7f4553a62eb
|
refs/heads/master
| 2023-01-27T23:34:12.999341
| 2023-01-20T14:15:16
| 2023-01-20T14:15:16
| 29,135,229
| 30
| 38
|
NOASSERTION
| 2023-01-20T14:15:18
| 2015-01-12T12:50:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
import os
import sys
DISTNAME = 'iced'
DESCRIPTION = 'ICE normalization'
MAINTAINER = 'Nelle Varoquaux'
MAINTAINER_EMAIL = 'nelle.varoquaux@gmail.com'
VERSION = "0.6.0a0.dev0"
SCIPY_MIN_VERSION = '0.19.0'
NUMPY_MIN_VERSION = '1.16.0'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('iced')
return config
def setup_package():
metadata = dict(
configuration=configuration,
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
version=VERSION,
scripts=['iced/scripts/ice'],
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
[
"nelle.varoquaux@gmail.com"
] |
nelle.varoquaux@gmail.com
|
b65eeff263cdcdddf10a579f84650f676024d345
|
b733a463ba1a21ac4c2756e8552ff4b251ce6b55
|
/Inpainting/contextual_attention_baseline/celebahq/model.py
|
2395a67a8b38326d4562a068027be9706d77003e
|
[] |
no_license
|
fengjiran/tensorflow_learning
|
003047cdb396572e3535043dfff8671befcd0f21
|
44dce5bb39c8d6421ea6181338b0ea4b0e5e9797
|
refs/heads/master
| 2023-04-04T07:53:55.707703
| 2022-03-30T09:35:52
| 2022-03-30T09:35:52
| 100,839,372
| 3
| 0
| null | 2023-03-24T22:50:54
| 2017-08-20T04:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 21,391
|
py
|
from __future__ import print_function
# import numpy as np
import tensorflow as tf
from utils import spatial_discounting_mask
from utils import random_bbox
from utils import bbox2mask
from utils import local_patch
from utils import gan_wgan_loss
from utils import random_interpolates
from utils import gradient_penalty
from utils import images_summary
from utils import gradients_summary
class CompletionModel(object):
"""Construct model."""
def __init__(self):
print('Construct the model')
def coarse_network(self, images, reuse=None):
conv_layers = []
cnum = 32
with tf.variable_scope('coarse', reuse=reuse):
conv1 = tf.layers.conv2d(images, cnum, 5, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv1')
conv2 = tf.layers.conv2d(conv1, 2 * cnum, 3, strides=2, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv2_downsample')
conv3 = tf.layers.conv2d(conv2, 2 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv3')
conv4 = tf.layers.conv2d(conv3, 4 * cnum, 3, strides=2, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv4_downsample')
conv5 = tf.layers.conv2d(conv4, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv5')
conv6 = tf.layers.conv2d(conv5, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv6')
conv7 = tf.layers.conv2d(conv6, 4 * cnum, 3, padding='same', dilation_rate=2, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv7_atrous')
conv8 = tf.layers.conv2d(conv7, 4 * cnum, 3, padding='same', dilation_rate=4, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv8_atrous')
conv9 = tf.layers.conv2d(conv8, 4 * cnum, 3, padding='same', dilation_rate=8, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv9_atrous')
conv10 = tf.layers.conv2d(conv9, 4 * cnum, 3, padding='same', dilation_rate=16, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv10_atrous')
conv11 = tf.layers.conv2d(conv10, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv11')
conv12 = tf.layers.conv2d(conv11, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv12')
conv13 = tf.layers.conv2d(
inputs=tf.image.resize_nearest_neighbor(conv12,
(conv3.get_shape().as_list()[1], conv3.get_shape().as_list()[2])),
filters=2 * cnum,
kernel_size=3,
strides=1,
padding='same',
activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(),
name='conv13_upsample')
conv14 = tf.layers.conv2d(conv13, 2 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv14')
conv15 = tf.layers.conv2d(
inputs=tf.image.resize_nearest_neighbor(conv14,
(conv1.get_shape().as_list()[1], conv1.get_shape().as_list()[2])),
filters=cnum,
kernel_size=3,
strides=1,
padding='same',
activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(),
name='conv15_upsample')
conv16 = tf.layers.conv2d(conv15, int(cnum / 2), 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv16')
conv17 = tf.layers.conv2d(conv16, 3, 3, strides=1, padding='same',
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv17')
conv_output = tf.clip_by_value(conv17, -1., 1.)
for i in range(1, 18):
conv_layers.append(eval('conv{}'.format(i)))
for conv in conv_layers:
print('conv:{}, output_shape:{}'.format(conv_layers.index(conv) + 1, conv.get_shape().as_list()))
return conv_output
def refine_network(self, images, reuse=None):
"""Construct refine network."""
conv_layers = []
cnum = 32
with tf.variable_scope('refine', reuse=reuse):
conv1 = tf.layers.conv2d(images, cnum, 5, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv1')
conv2 = tf.layers.conv2d(conv1, cnum, 3, strides=2, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv2_downsample')
conv3 = tf.layers.conv2d(conv2, 2 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv3')
conv4 = tf.layers.conv2d(conv3, 2 * cnum, 3, strides=2, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv4_downsample')
conv5 = tf.layers.conv2d(conv4, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv5')
conv6 = tf.layers.conv2d(conv5, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv6')
conv7 = tf.layers.conv2d(conv6, 4 * cnum, 3, padding='same', dilation_rate=2, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv7_atrous')
conv8 = tf.layers.conv2d(conv7, 4 * cnum, 3, padding='same', dilation_rate=4, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv8_atrous')
conv9 = tf.layers.conv2d(conv8, 4 * cnum, 3, padding='same', dilation_rate=8, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv9_atrous')
conv10 = tf.layers.conv2d(conv9, 4 * cnum, 3, padding='same', dilation_rate=16, activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv10_atrous')
conv11 = tf.layers.conv2d(conv10, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv11')
conv12 = tf.layers.conv2d(conv11, 4 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv12')
conv13 = tf.layers.conv2d(
inputs=tf.image.resize_nearest_neighbor(conv12,
(conv3.get_shape().as_list()[1], conv3.get_shape().as_list()[2])),
filters=2 * cnum,
kernel_size=3,
strides=1,
padding='same',
activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(),
name='conv13_upsample')
conv14 = tf.layers.conv2d(conv13, 2 * cnum, 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv14')
conv15 = tf.layers.conv2d(
inputs=tf.image.resize_nearest_neighbor(conv14,
(conv1.get_shape().as_list()[1], conv1.get_shape().as_list()[2])),
filters=cnum,
kernel_size=3,
strides=1,
padding='same',
activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(),
name='conv15_upsample')
conv16 = tf.layers.conv2d(conv15, int(cnum / 2), 3, strides=1, padding='same', activation=tf.nn.elu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv16')
conv17 = tf.layers.conv2d(conv16, 3, 3, strides=1, padding='same',
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv17')
conv_output = tf.clip_by_value(conv17, -1., 1.)
for i in range(1, 18):
conv_layers.append(eval('conv{}'.format(i)))
for conv in conv_layers:
print('conv:{}, output_shape:{}'.format(conv_layers.index(conv) + 1, conv.get_shape().as_list()))
return conv_output
def global_discriminator(self, x, reuse=None):
cnum = 64
with tf.variable_scope('global_discriminator', reuse=reuse):
conv1 = tf.layers.conv2d(x, cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv1')
conv2 = tf.layers.conv2d(conv1, 2 * cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv2')
conv3 = tf.layers.conv2d(conv2, 4 * cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv3')
conv4 = tf.layers.conv2d(conv3, 4 * cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv4')
return tf.contrib.layers.flatten(tf.nn.leaky_relu(conv4))
def local_discriminator(self, x, reuse=None):
cnum = 64
with tf.variable_scope('local_discriminator', reuse=reuse):
conv1 = tf.layers.conv2d(x, cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv1')
conv2 = tf.layers.conv2d(conv1, 2 * cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv2')
conv3 = tf.layers.conv2d(conv2, 4 * cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv3')
conv4 = tf.layers.conv2d(conv3, 8 * cnum, 5, strides=2, padding='same', activation=tf.nn.leaky_relu,
kernel_initializer=tf.keras.initializers.glorot_normal(), name='conv4')
return tf.contrib.layers.flatten(tf.nn.leaky_relu(conv4))
def build_wgan_discriminator(self, global_input, local_input, reuse=None):
with tf.variable_scope('wgan_discriminator', reuse=reuse):
dglobal = self.global_discriminator(global_input, reuse=reuse)
dlocal = self.local_discriminator(local_input, reuse=reuse)
dout_global = tf.layers.dense(dglobal, 1, name='dout_global_fc')
dout_local = tf.layers.dense(dlocal, 1, name='dout_local_fc')
return dout_global, dout_local
def build_graph_with_losses(self, batch_data, cfg, summary=True, reuse=None):
# batch_pos = batch_data / 127.5 - 1
batch_pos = batch_data
bbox = random_bbox(cfg)
mask = bbox2mask(bbox, cfg)
batch_incomplete = batch_pos * (1. - mask)
ones_x = tf.ones_like(batch_incomplete)[:, :, :, 0:1]
coarse_network_input = tf.concat([batch_incomplete, ones_x, ones_x * mask], axis=3)
coarse_output = self.coarse_network(coarse_network_input, reuse)
batch_complete_coarse = coarse_output * mask + batch_incomplete * (1. - mask)
refine_network_input = tf.concat([batch_complete_coarse, ones_x, ones_x * mask], axis=3)
refine_output = self.refine_network(refine_network_input, reuse)
batch_complete_refine = refine_output * mask + batch_incomplete * (1. - mask)
losses = {}
# local patches
local_patch_pos = local_patch(batch_pos, bbox)
local_patch_coarse = local_patch(coarse_output, bbox)
local_patch_refine = local_patch(refine_output, bbox)
local_patch_mask = local_patch(mask, bbox)
l1_alpha = cfg['coarse_l1_alpha']
losses['coarse_l1_loss'] = l1_alpha * tf.reduce_mean(tf.abs(local_patch_pos - local_patch_coarse) *
spatial_discounting_mask(cfg))
losses['coarse_ae_loss'] = l1_alpha * tf.reduce_mean(tf.abs(batch_pos - coarse_output) * (1. - mask))
losses['refine_l1_loss'] = losses['coarse_l1_loss'] + \
tf.reduce_mean(tf.abs(local_patch_pos - local_patch_refine) *
spatial_discounting_mask(cfg))
losses['refine_ae_loss'] = losses['coarse_ae_loss'] + \
tf.reduce_mean(tf.abs(batch_pos - refine_output) * (1. - mask))
losses['coarse_ae_loss'] /= tf.reduce_mean(1. - mask)
losses['refine_ae_loss'] /= tf.reduce_mean(1. - mask)
# wgan
batch_pos_neg = tf.concat([batch_pos, batch_complete_refine], axis=0)
# local discriminator patch
local_patch_pos_neg = tf.concat([local_patch_pos, local_patch_refine], axis=0)
# wgan with gradient penalty
pos_neg_global, pos_neg_local = self.build_wgan_discriminator(batch_pos_neg,
local_patch_pos_neg,
reuse)
pos_global, neg_global = tf.split(pos_neg_global, 2)
pos_local, neg_local = tf.split(pos_neg_local, 2)
# wgan loss
g_loss_global, d_loss_global = gan_wgan_loss(pos_global, neg_global)
g_loss_local, d_loss_local = gan_wgan_loss(pos_local, neg_local)
losses['refine_g_loss'] = cfg['global_wgan_loss_alpha'] * g_loss_global + g_loss_local
losses['refine_d_loss'] = d_loss_global + d_loss_local
# gradient penalty
interpolates_global = random_interpolates(batch_pos, batch_complete_refine)
interpolates_local = random_interpolates(local_patch_pos, local_patch_refine)
dout_global, dout_local = self.build_wgan_discriminator(interpolates_global,
interpolates_local,
reuse=True)
# apply penalty
penalty_global = gradient_penalty(interpolates_global, dout_global, mask=mask, norm=750.)
penalty_local = gradient_penalty(interpolates_local, dout_local, mask=local_patch_mask, norm=750.)
losses['gp_loss'] = cfg['wgan_gp_lambda'] * (penalty_global + penalty_local)
losses['refine_d_loss'] += losses['gp_loss']
g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'coarse') +\
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'refine')
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'wgan_discriminator')
if summary:
# stage1
tf.summary.scalar('rec_loss/coarse_rec_loss', losses['coarse_l1_loss'] + losses['coarse_ae_loss'])
# tf.summary.scalar('rec_loss/coarse_l1_loss', losses['coarse_l1_loss'])
# tf.summary.scalar('rec_loss/coarse_ae_loss', losses['coarse_ae_loss'])
tf.summary.scalar('rec_loss/refine_rec_loss', losses['refine_l1_loss'] + losses['refine_ae_loss'])
# tf.summary.scalar('rec_loss/refine_l1_loss', losses['refine_l1_loss'])
# tf.summary.scalar('rec_loss/refine_ae_loss', losses['refine_ae_loss'])
visual_img = [batch_pos, batch_incomplete, batch_complete_coarse, batch_complete_refine]
visual_img = tf.concat(visual_img, axis=2)
images_summary(visual_img, 'raw_incomplete_coarse_refine', 4)
# stage2
gradients_summary(g_loss_global, refine_output, name='g_loss_global')
gradients_summary(g_loss_local, refine_output, name='g_loss_local')
tf.summary.scalar('convergence/refine_d_loss', losses['refine_d_loss'])
# tf.summary.scalar('convergence/refine_g_loss', losses['refine_g_loss'])
tf.summary.scalar('convergence/local_d_loss', d_loss_local)
tf.summary.scalar('convergence/global_d_loss', d_loss_global)
tf.summary.scalar('gradient_penalty/gp_loss', losses['gp_loss'])
tf.summary.scalar('gradient_penalty/gp_penalty_local', penalty_local)
tf.summary.scalar('gradient_penalty/gp_penalty_global', penalty_global)
# summary the magnitude of gradients from different losses w.r.t. predicted image
# gradients_summary(losses['g_loss'], refine_output, name='g_loss')
gradients_summary(losses['coarse_l1_loss'] + losses['coarse_ae_loss'],
coarse_output,
name='rec_loss_grad_to_coarse')
gradients_summary(losses['refine_l1_loss'] + losses['refine_ae_loss'] + losses['refine_g_loss'],
refine_output,
name='rec_loss_grad_to_refine')
gradients_summary(losses['coarse_l1_loss'], coarse_output, name='l1_loss_grad_to_coarse')
gradients_summary(losses['refine_l1_loss'], refine_output, name='l1_loss_grad_to_refine')
gradients_summary(losses['coarse_ae_loss'], coarse_output, name='ae_loss_grad_to_coarse')
gradients_summary(losses['refine_ae_loss'], refine_output, name='ae_loss_grad_to_refine')
return g_vars, d_vars, losses
def build_infer_graph(self, batch_data, cfg, bbox=None, name='val'):
cfg['max_delta_height'] = 0
cfg['max_delta_width'] = 0
if bbox is None:
bbox = random_bbox(cfg)
mask = bbox2mask(bbox, cfg)
batch_pos = batch_data
batch_incomplete = batch_pos * (1. - mask)
ones_x = tf.ones_like(batch_incomplete)[:, :, :, 0:1]
coarse_network_input = tf.concat([batch_incomplete, ones_x, ones_x * mask], axis=3)
# inpaint
coarse_output = self.coarse_network(coarse_network_input, reuse=True)
batch_complete_coarse = coarse_output * mask + batch_incomplete * (1. - mask)
refine_network_input = tf.concat([batch_complete_coarse, ones_x, ones_x * mask], axis=3)
refine_output = self.refine_network(refine_network_input, reuse=True)
# apply mask and reconstruct
# batch_complete = batch_predicted * mask + batch_incomplete * (1. - mask)
batch_complete_coarse = coarse_output * mask + batch_incomplete * (1. - mask)
batch_complete_refine = refine_output * mask + batch_incomplete * (1. - mask)
# global image visualization
visual_img = [batch_pos, batch_incomplete, batch_complete_coarse, batch_complete_refine]
images_summary(tf.concat(visual_img, axis=2), name + '_raw_incomplete_coarse_refine', 10)
return (batch_complete_coarse, batch_complete_refine)
def build_static_infer_graph(self, batch_data, cfg, name):
bbox = (tf.constant(cfg['hole_height'] // 2), tf.constant(cfg['hole_width'] // 2),
tf.constant(cfg['hole_height']), tf.constant(cfg['hole_width']))
return self.build_infer_graph(batch_data, cfg, bbox, name)
if __name__ == '__main__':
import yaml
with open('config.yaml', 'r') as f:
cfg = yaml.load(f)
model = CompletionModel()
x = tf.random_uniform([10, 256, 256, 3])
g_vars, d_vars, losses = model.build_graph_with_losses(x, cfg)
print(len(g_vars), len(d_vars))
|
[
"fengjiran@foxmail.com"
] |
fengjiran@foxmail.com
|
f8c8e2f0535c68e4e418bb51085da631c4d2dc26
|
8149d1030b5bc62cc82d5afedbe7486daedbf8c5
|
/[114][Flatten Binary Tree to Linked List][Medium].py
|
42719b75942c5b3d7739e947657d0281d93a89d4
|
[] |
no_license
|
guofei9987/leetcode_python
|
faef17bb59808197e32ed97e92e2222862e2ba8c
|
23703a6fb5028d982b3febc630e28f9bb65a82a6
|
refs/heads/master
| 2020-03-21T18:24:33.014579
| 2019-10-12T13:29:03
| 2019-10-12T13:29:03
| 138,889,760
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
# https://leetcode.com/problems/flatten-binary-tree-to-linked-list
|
[
"guofei9987@foxmail.com"
] |
guofei9987@foxmail.com
|
8060013fa68577e23d342f0850936dfbffc7c906
|
5bfa6d39ca5999f24d5c054cf26d4112156d6842
|
/Practice/Numpy/Polynomials.py
|
e95bad632063bb8b47132979c10f83ea795d01de
|
[] |
no_license
|
CompetitiveCode/hackerrank-python
|
3ad7f70f3d09149242b2ab6b27d0e4ec2a188837
|
898e6bf791791cbdeca9192c78c623a115b4c97b
|
refs/heads/master
| 2022-02-03T23:14:57.866923
| 2019-05-30T11:34:01
| 2019-05-30T11:34:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
#Answer to Polynomials
import numpy
a=list(map(float,input().split()))
x=int(input())
print(numpy.polyval(a,x))
"""
poly
The poly tool returns the coefficients of a polynomial with the given sequence of roots.
print numpy.poly([-1, 1, 1, 10]) #Output : [ 1 -11 9 11 -10]
roots
The roots tool returns the roots of a polynomial with the given coefficients.
print numpy.roots([1, 0, -1]) #Output : [-1. 1.]
polyint
The polyint tool returns an antiderivative (indefinite integral) of a polynomial.
print numpy.polyint([1, 1, 1]) #Output : [ 0.33333333 0.5 1. 0. ]
polyder
The polyder tool returns the derivative of the specified order of a polynomial.
print numpy.polyder([1, 1, 1, 1]) #Output : [3 2 1]
polyval
The polyval tool evaluates the polynomial at specific value.
print numpy.polyval([1, -2, 0, 2], 4) #Output : 34
polyfit
The polyfit tool fits a polynomial of a specified order to a set of data using a least-squares approach.
print numpy.polyfit([0,1,-1, 2, -2], [0,1,1, 4, 4], 2)
#Output : [ 1.00000000e+00 0.00000000e+00 -3.97205465e-16]
The functions polyadd, polysub, polymul, and polydiv also handle proper addition, subtraction, multiplication, and division of polynomial coefficients, respectively.
"""
|
[
"admin@remedcu.com"
] |
admin@remedcu.com
|
4abe6e32b5d50900b94925c86cfa31cd58e03efc
|
09e8c92187ff8d7a726727041e2dd80850dcce3d
|
/interview_cake/CAKE_203_find_rotation_point.py
|
3353430fa0d222707af18761f0fd88a2d67630b8
|
[] |
no_license
|
kakru/puzzles
|
6dd72bd0585f526e75d026f3ba2446b0c14f60e0
|
b91bdf0e68605f7e517446f8a00b1e0f1897c24d
|
refs/heads/master
| 2020-04-09T09:47:31.341475
| 2019-05-03T21:24:41
| 2019-05-03T21:24:41
| 160,246,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,210
|
py
|
import unittest
# class Solution: # naive solution with O(n) time
# def findRotationPoint(self, words):
# """
# :type A: List[List[int]]
# :rtype: List[List[int]]
# """
# length = len(words)
# if length == 0:
# return None
# elif length == 1:
# return 0
# index = 1
# prev = now = words[0]
# while index < length:
# prev, now = now, words[index]
# if prev > now:
# return index
# index += 1
class Solution: # with binary search O(logn)
def findRotationPoint(self, words):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
length = len(words)
left, right = 0, length-1
while True:
middle = (left + right)//2
if words[left] < words[middle] > words[right]:
left = middle
elif words[left] > words[middle] < words[right]:
right = middle
else:
break
if right-left == length-1: # middle index never moved
return 0
else:
return middle+1
class BasicTest(unittest.TestCase):
def test_not_rotated(self):
input_ = [
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
]
expected_output = 0
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
def test_1(self):
input_ = [
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
expected_output = 5
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
def test_2(self):
input_ = [
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
expected_output = 4
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
def test_3(self):
input_ = [
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'zzzzzz',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
expected_output = 6
output = Solution().findRotationPoint(input_)
self.assertEqual(output, expected_output)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"karol@kruzelecki.com"
] |
karol@kruzelecki.com
|
a334b1604a06a8cca9a7a309ca067b58b98f81c6
|
b637e53b36ad083575b161eaa8371f0cc11981a2
|
/apps/Articulos_Cientificos/models.py
|
a096dbfd20585fd9a513898401accf4ae8f69bc2
|
[] |
no_license
|
cienciometrico2017/cienciometrico2018v2.0
|
d7d014f858296aa262649696a4d3bfceb0b9afec
|
22e8800c921e8c4890c4f52c9826532364a99a68
|
refs/heads/master
| 2020-03-20T22:04:26.710351
| 2018-07-26T04:28:26
| 2018-07-26T04:28:26
| 137,777,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,516
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from apps.Linea_Investigacion.models import linea_investigacion
from apps.Sub_Lin_Investigacion.models import sub_lin_investigacion
from apps.baseDatos.models import baseDatos
from apps.Revista.models import revista
from apps.palabraClave.models import palabraClave
from apps.ciudad.models import ciudad
from apps.pais.models import pais
from django.contrib.auth.models import User
# Create your models here.
class articulos_cientificos(models.Model):
Estado = (
('Receptado', 'Receptado'),
('En revisión', 'En revisión'),
('Aceptado', 'Aceptado'),
('Publicado', 'Publicado'),
)
Tipo = (
('Científico', 'Científico'),
('De revisión', 'De revisión'),
('Ensayo', 'Ensayo'),
('Reflexión', 'Reflexión'),
)
titulo = models.CharField(max_length=300, null=True, blank=True, unique=True)
estado = models.CharField(max_length=30, blank=True, null=True, choices=Estado)
iSSN = models.CharField(max_length=60, blank=True, null=True)
url = models.CharField(max_length=300, blank=True, null=True)
doi = models.CharField(max_length=300, blank=True, null=True)
fechaPublicacion = models.DateField(blank=True, null=True)
pais = models.ForeignKey(pais, blank=True, null=True)
ciudad = models.ForeignKey(ciudad, blank=True, null=True)
baseDatos = models.ManyToManyField(baseDatos, blank=True)
revista = models.ForeignKey(revista, blank=True)
volumen = models.CharField(max_length=150, blank=True, null=True)
numero = models.CharField(max_length=150, blank=True, null=True)
lineaInves = models.ForeignKey(linea_investigacion, max_length=150, blank=True, null=True)
SubLinea = models.ForeignKey(sub_lin_investigacion, max_length=150, blank=True, null=True)
resumen = models.TextField(blank=True, null=True)
palabraClave = models.ManyToManyField(palabraClave, blank=True)
documento = models.FileField(upload_to='articulo/', blank=True, null=True)
tipoArticulo = models.CharField(max_length=150, blank=True, null=True, choices=Tipo)
aprobado = models.CharField(max_length=150, blank=True, null=True)
comiteEditorial = models.CharField(max_length=150, blank=True, null=True)
estPub = models.BooleanField(blank=True)
desEstado = models.TextField(null=True, blank=True)
class Meta:
permissions = (
("ver_articulo", "ver articulo"),
)
|
[
"danilomoya19@gmail.com"
] |
danilomoya19@gmail.com
|
a2febb4a52872577529393b416dd06c38c3770c7
|
bee9379d610c8e7b524646c5f6fe26608bf54fe1
|
/detectron/ops/generate_proposals.py
|
b804c36f0c3664f92a804f9b1d5cfa10abb0d969
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
PacteraOliver/Detectron
|
8e6eeaa5c17506b8fa73a47d3df7c7dbe93a200a
|
b3ad58fe74b64b7594dac7537ce950f5e4f54235
|
refs/heads/master
| 2020-03-19T02:33:24.927162
| 2018-06-12T18:57:02
| 2018-06-12T18:57:02
| 135,638,922
| 3
| 0
|
Apache-2.0
| 2018-06-12T18:14:33
| 2018-05-31T21:54:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,996
|
py
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
from detectron.core.config import cfg
import detectron.utils.boxes as box_utils
class GenerateProposalsOp(object):
"""Output object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, anchors, spatial_scale, train):
self._anchors = anchors
self._num_anchors = self._anchors.shape[0]
self._feat_stride = 1. / spatial_scale
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
# 1. for each location i in a (H, W) grid:
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas to each of the A anchors at cell i
# 2. clip predicted boxes to image
# 3. remove predicted boxes with either height or width < threshold
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top pre_nms_topN proposals before NMS
# 6. apply NMS with a loose threshold (0.7) to the remaining proposals
# 7. take after_nms_topN proposals after NMS
# 8. return the top proposals
# predicted probability of fg object for each RPN anchor
scores = inputs[0].data
# predicted achors transformations
bbox_deltas = inputs[1].data
# input image (height, width, scale), in which scale is the scale factor
# applied to the original dataset image to get the network input image
im_info = inputs[2].data
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifted positions on the (H, W) grid
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)
# Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)
# shift pointing to each grid location
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
im_i_boxes, im_i_probs = self.proposals_for_one_image(
im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],
scores[im_i, :, :, :]
)
batch_inds = im_i * np.ones(
(im_i_boxes.shape[0], 1), dtype=np.float32
)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
def proposals_for_one_image(
self, im_info, all_anchors, bbox_deltas, scores
):
# Get mode-dependent configuration
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape((-1, 4))
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape((-1, 1))
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays; First partition to get top K
# unsorted and then sort just those (~20x faster for 200k scores)
inds = np.argpartition(
-scores.squeeze(), pre_nms_topN
)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
scores = scores[order]
# Transform anchors into proposals via bbox transformations
proposals = box_utils.bbox_transform(
all_anchors, bbox_deltas, (1.0, 1.0, 1.0, 1.0))
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = box_utils.clip_tiled_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < min_size
keep = _filter_boxes(proposals, min_size, im_info)
proposals = proposals[keep, :]
scores = scores[keep]
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if nms_thresh > 0:
keep = box_utils.nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
return proposals, scores
def _filter_boxes(boxes, min_size, im_info):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Scale min_size to match image scale
min_size *= im_info[2]
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where(
(ws >= min_size) & (hs >= min_size) &
(x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]
return keep
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
23f2bea2b14225dad77414ae3db666636d85bc98
|
f89d70fc8bf370ef4e2aa54c7ee0de3b4a053624
|
/examples/EC2InstanceSample.py
|
1de96374251ddaabaa2bd2c493498593a6a8dba3
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yks0000/troposphere
|
a7622bff01c31f10dcb296d2ca353144e1d7f793
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
refs/heads/main
| 2022-04-28T03:51:42.770881
| 2022-04-15T15:15:01
| 2022-04-15T15:15:01
| 482,753,190
| 1
| 0
|
BSD-2-Clause
| 2022-04-18T07:20:42
| 2022-04-18T07:20:42
| null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
# Converted from EC2InstanceSample.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
import troposphere.ec2 as ec2
from troposphere import Base64, FindInMap, GetAtt, Output, Parameter, Ref, Template
template = Template()
keyname_param = template.add_parameter(
Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH "
"access to the instance",
Type="String",
)
)
template.add_mapping(
"RegionMap",
{
"us-east-1": {"AMI": "ami-7f418316"},
"us-west-1": {"AMI": "ami-951945d0"},
"us-west-2": {"AMI": "ami-16fd7026"},
"eu-west-1": {"AMI": "ami-24506250"},
"sa-east-1": {"AMI": "ami-3e3be423"},
"ap-southeast-1": {"AMI": "ami-74dda626"},
"ap-northeast-1": {"AMI": "ami-dcfa4edd"},
},
)
ec2_instance = template.add_resource(
ec2.Instance(
"Ec2Instance",
ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
InstanceType="t1.micro",
KeyName=Ref(keyname_param),
SecurityGroups=["default"],
UserData=Base64("80"),
)
)
template.add_output(
[
Output(
"InstanceId",
Description="InstanceId of the newly created EC2 instance",
Value=Ref(ec2_instance),
),
Output(
"AZ",
Description="Availability Zone of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "AvailabilityZone"),
),
Output(
"PublicIP",
Description="Public IP address of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PublicIp"),
),
Output(
"PrivateIP",
Description="Private IP address of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PrivateIp"),
),
Output(
"PublicDNS",
Description="Public DNSName of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PublicDnsName"),
),
Output(
"PrivateDNS",
Description="Private DNSName of the newly created EC2 instance",
Value=GetAtt(ec2_instance, "PrivateDnsName"),
),
]
)
print(template.to_json())
|
[
"mark@peek.org"
] |
mark@peek.org
|
33ee4698a0d01d53b045abacba21376f831b52ad
|
f3fb552a1b8b9c7536190bc43b9a77243f5be9b5
|
/tests/opc/test_rels.py
|
85d4d6cf7734f589e5518a29da03f5553031d6a5
|
[
"MIT"
] |
permissive
|
amisa/python-pptx
|
d602ead85ebf9e2b484c83cc901a2c742004236c
|
808f2475639f3d1471879ef2dacd151cfe8b89d3
|
refs/heads/master
| 2021-01-18T11:20:57.664794
| 2014-08-07T13:29:33
| 2014-08-07T13:29:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,280
|
py
|
# encoding: utf-8
"""Test suite for pptx.part module."""
from __future__ import absolute_import
import pytest
from mock import call, Mock, patch, PropertyMock
from pptx.opc.constants import RELATIONSHIP_TYPE as RT
from pptx.opc.oxml import CT_Relationships
from pptx.opc.package import Part, _Relationship, RelationshipCollection
from pptx.opc.packuri import PackURI
from ..unitutil import class_mock, instance_mock, loose_mock
class Describe_Relationship(object):
def it_remembers_construction_values(self):
# test data --------------------
rId = 'rId9'
reltype = 'reltype'
target = Mock(name='target_part')
external = False
# exercise ---------------------
rel = _Relationship(rId, reltype, target, None, external)
# verify -----------------------
assert rel.rId == rId
assert rel.reltype == reltype
assert rel.target_part == target
assert rel.is_external == external
def it_should_raise_on_target_part_access_on_external_rel(self):
rel = _Relationship(None, None, None, None, external=True)
with pytest.raises(ValueError):
rel.target_part
def it_should_have_target_ref_for_external_rel(self):
rel = _Relationship(None, None, 'target', None, external=True)
assert rel.target_ref == 'target'
def it_should_have_relative_ref_for_internal_rel(self):
"""
Internal relationships (TargetMode == 'Internal' in the XML) should
have a relative ref, e.g. '../slideLayouts/slideLayout1.xml', for
the target_ref attribute.
"""
part = Mock(name='part', partname=PackURI('/ppt/media/image1.png'))
baseURI = '/ppt/slides'
rel = _Relationship(None, None, part, baseURI) # external=False
assert rel.target_ref == '../media/image1.png'
class DescribeRelationshipCollection(object):
def it_also_has_dict_style_get_rel_by_rId(self, rels_with_known_rel):
rels, rId, known_rel = rels_with_known_rel
assert rels[rId] == known_rel
def it_should_raise_on_failed_lookup_by_rId(self, rels):
with pytest.raises(KeyError):
rels['rId666']
def it_has_a_len(self, rels):
assert len(rels) == 0
def it_can_add_a_relationship(self, _Relationship_):
baseURI, rId, reltype, target, is_external = (
'baseURI', 'rId9', 'reltype', 'target', False
)
rels = RelationshipCollection(baseURI)
rel = rels.add_relationship(reltype, target, rId, is_external)
_Relationship_.assert_called_once_with(
rId, reltype, target, baseURI, is_external
)
assert rels[rId] == rel
assert rel == _Relationship_.return_value
def it_can_add_a_relationship_if_not_found(
self, rels_with_matching_rel_, rels_with_missing_rel_):
rels, reltype, part, matching_rel = rels_with_matching_rel_
assert rels.get_or_add(reltype, part) == matching_rel
rels, reltype, part, new_rel = rels_with_missing_rel_
assert rels.get_or_add(reltype, part) == new_rel
def it_knows_the_next_available_rId(self, rels_with_rId_gap):
rels, expected_next_rId = rels_with_rId_gap
next_rId = rels._next_rId
assert next_rId == expected_next_rId
def it_can_find_a_related_part_by_reltype(
self, rels_with_target_known_by_reltype):
rels, reltype, known_target_part = rels_with_target_known_by_reltype
part = rels.part_with_reltype(reltype)
assert part is known_target_part
def it_can_find_a_related_part_by_rId(self, rels_with_known_target_part):
rels, rId, known_target_part = rels_with_known_target_part
part = rels.related_parts[rId]
assert part is known_target_part
def it_raises_KeyError_on_part_with_rId_not_found(self, rels):
with pytest.raises(KeyError):
rels.related_parts['rId666']
def it_can_compose_rels_xml(self, rels_with_known_rels, rels_elm):
rels_with_known_rels.xml
rels_elm.assert_has_calls(
[
call.add_rel(
'rId1', 'http://rt-hyperlink', 'http://some/link', True
),
call.add_rel(
'rId2', 'http://rt-image', '../media/image1.png', False
),
call.xml()
],
any_order=True
)
# def it_raises_on_add_rel_with_duplicate_rId(self, rels, rel):
# with pytest.raises(ValueError):
# rels.add_rel(rel)
# fixtures ---------------------------------------------
@pytest.fixture
def _Relationship_(self, request):
return class_mock(request, 'pptx.opc.package._Relationship')
@pytest.fixture
def rel(self, _rId, _reltype, _target_part, _baseURI):
return _Relationship(_rId, _reltype, _target_part, _baseURI)
@pytest.fixture
def rels(self, _baseURI):
return RelationshipCollection(_baseURI)
@pytest.fixture
def rels_elm(self, request):
"""
Return a rels_elm mock that will be returned from
CT_Relationships.new()
"""
# create rels_elm mock with a .xml property
rels_elm = Mock(name='rels_elm')
xml = PropertyMock(name='xml')
type(rels_elm).xml = xml
rels_elm.attach_mock(xml, 'xml')
rels_elm.reset_mock() # to clear attach_mock call
# patch CT_Relationships to return that rels_elm
patch_ = patch.object(CT_Relationships, 'new', return_value=rels_elm)
patch_.start()
request.addfinalizer(patch_.stop)
return rels_elm
@pytest.fixture
def rels_with_known_rel(self, rels, _rId, rel):
rels[_rId] = rel
return rels, _rId, rel
@pytest.fixture
def rels_with_known_rels(self):
"""
Populated RelationshipCollection instance that will exercise the
rels.xml property.
"""
rels = RelationshipCollection('/baseURI')
rels.add_relationship(
reltype='http://rt-hyperlink', target='http://some/link',
rId='rId1', is_external=True
)
part = Mock(name='part')
part.partname.relative_ref.return_value = '../media/image1.png'
rels.add_relationship(reltype='http://rt-image', target=part,
rId='rId2')
return rels
@pytest.fixture
def rels_with_known_target_part(self, rels, _rel_with_known_target_part):
rel, rId, target_part = _rel_with_known_target_part
rels.add_relationship(None, target_part, rId)
return rels, rId, target_part
@pytest.fixture
def rels_with_matching_rel_(self, request, rels):
matching_reltype_ = instance_mock(
request, str, name='matching_reltype_'
)
matching_part_ = instance_mock(
request, Part, name='matching_part_'
)
matching_rel_ = instance_mock(
request, _Relationship, name='matching_rel_',
reltype=matching_reltype_, target_part=matching_part_,
is_external=False
)
rels[1] = matching_rel_
return rels, matching_reltype_, matching_part_, matching_rel_
@pytest.fixture
def rels_with_missing_rel_(self, request, rels, _Relationship_):
missing_reltype_ = instance_mock(
request, str, name='missing_reltype_'
)
missing_part_ = instance_mock(
request, Part, name='missing_part_'
)
new_rel_ = instance_mock(
request, _Relationship, name='new_rel_',
reltype=missing_reltype_, target_part=missing_part_,
is_external=False
)
_Relationship_.return_value = new_rel_
return rels, missing_reltype_, missing_part_, new_rel_
@pytest.fixture
def rels_with_rId_gap(self, request, rels):
rel_with_rId1 = instance_mock(
request, _Relationship, name='rel_with_rId1', rId='rId1'
)
rel_with_rId3 = instance_mock(
request, _Relationship, name='rel_with_rId3', rId='rId3'
)
rels['rId1'] = rel_with_rId1
rels['rId3'] = rel_with_rId3
return rels, 'rId2'
@pytest.fixture
def rels_with_target_known_by_reltype(
self, rels, _rel_with_target_known_by_reltype):
rel, reltype, target_part = _rel_with_target_known_by_reltype
rels[1] = rel
return rels, reltype, target_part
@pytest.fixture
def _baseURI(self):
return '/baseURI'
@pytest.fixture
def _rel_with_known_target_part(
self, _rId, _reltype, _target_part, _baseURI):
rel = _Relationship(_rId, _reltype, _target_part, _baseURI)
return rel, _rId, _target_part
@pytest.fixture
def _rel_with_target_known_by_reltype(
self, _rId, _reltype, _target_part, _baseURI):
rel = _Relationship(_rId, _reltype, _target_part, _baseURI)
return rel, _reltype, _target_part
@pytest.fixture
def _reltype(self):
return RT.SLIDE
@pytest.fixture
def _rId(self):
return 'rId6'
@pytest.fixture
def _target_part(self, request):
return loose_mock(request)
|
[
"scanny@cisco.com"
] |
scanny@cisco.com
|
d5bb51ef5a3cee89328ed10c67a8b8fd3f001861
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/business_word_uml_president_state_father/history.py
|
44a4e3fdcc4a16d61ba54303596d20b908eb412b
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
const request = require('request');
const uuidv4 = require('uuid/v4');
/* Checks to see if the subscription key is available
as an environment variable. If you are setting your subscription key as a
string, then comment these lines out.
If you want to set your subscription key as a string, replace the value for
the Ocp-Apim-Subscription-Key header as a string. */
const subscriptionKey="844380b03ac2e822c304c3ffc5f2bb3d";
if (!subscriptionKey) {
throw new Error('Environment variable for your subscription key is not set.')
};
/* If you encounter any issues with the base_url or path, make sure that you are
using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate */
function translateText(){
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
'to': ['']
},
headers: {
'f3714fe8d47433890ba7eaa3d9424e4d': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: [{
'text': 'Hello World!'
}],
json: true,
};
request(options, function(err, res, body){
console.log(JSON.stringify(body, null, 4));
});
};
// Call the function to translate text.
translateText();
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
adcdc60dec982562dd7453dba24bea1c337f66f5
|
86990e89998b2ac9e2aa3e79df4bd72c07173903
|
/server.py
|
5ff1c63e1b6522d95a7645d3b6e9bdd21616e387
|
[] |
no_license
|
StevenAWillis/counter.py
|
ce0dac1983de177101821cbf11b40a5dbf5c6b58
|
c60873e53e069a693d11383902b5215109cc3d78
|
refs/heads/master
| 2020-06-20T18:46:19.194569
| 2019-07-16T14:43:04
| 2019-07-16T14:43:04
| 197,212,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
from flask import Flask, render_template, request, redirect, session
app =Flask(__name__)
app.secret_key = 'keep it secret, keep it safe'
@app.route("/")
def count():
if 'count' not in session:
session['count'] =0
else:
session['count'] +=1
if 'countv' not in session:
session['countv'] =0
else:
session['countv'] +=1
return render_template("index.html", count_visits = session['count'],count_visits2 = session['countv'])
@app.route("/by2")
def count_2():
session['count'] +=1
return redirect("/" )
@app.route("/reset")
def count_reset():
session['count'] = 0
return redirect("/" )
@app.route("/manual_count",methods=['POST'])
def manual_count():
# session['manual_counter'] = request.form['number']
# session['count'] += int(session['manual_counter'])-1
session['count'] += int(request.form['number'])-1
return redirect("/" )
app.run(debug=True)
|
[
"you@example.com"
] |
you@example.com
|
37d399628b1fda5d720d6cfa764e556e40257814
|
dea7bc1fe176d090ffa426ffd982f3f8ddb8afa7
|
/Histogram_Equalization.py
|
48c9fb6dd1c7f0db61f7ddb0227333f5d4228d5f
|
[] |
no_license
|
cool229/Ashish-Kumar
|
f03f99a02d6c96ff94931b9026b079b7f6c8ffad
|
72b40c7916b2447c11a6548fbb0d72a25098a6eb
|
refs/heads/master
| 2020-03-29T20:10:41.810904
| 2019-03-10T09:42:47
| 2019-03-10T09:42:47
| 150,299,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
import cv2
import matplotlib.pyplot as plt
def main():
imgpath = "D:\\COURSES\\OpenCV\\Action\\standard_test_images\\lena_color_256.tif"
img = cv2.imread(imgpath, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
R, G, B = cv2.split(img)
output1_R = cv2.equalizeHist(R)
output1_G = cv2.equalizeHist(G)
output1_B = cv2.equalizeHist(B)
output1 = cv2.merge((output1_R, output1_G, output1_B))
# clahe = cv2.createCLAHE()
clahe = cv2.createCLAHE(clipLimit = 2.0, tileGridSize = (8,8))
output2_R = clahe.apply(R)
output2_G = clahe.apply(G)
output2_B = clahe.apply(B)
output2 = cv2.merge((output2_R, output2_G, output2_B))
outputs = [img, output1, output2]
titles = ['Original Image', 'Adjusted Histogram','CLAHE']
# outputs = [img, box, blur, gaussian]
for i in range(3):
plt.subplot(1, 3, i+1)
plt.imshow(outputs[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
cool229.noreply@github.com
|
3072c7e3c97554c5f0f223ad3bc77e3dcdadf097
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/OrderJourneyElement.py
|
1723b24f1ac2ba1951d4dbd946e7c30c59f302c4
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 9,461
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.JourneyLocation import JourneyLocation
from alipay.aop.api.domain.JourneyLocation import JourneyLocation
from alipay.aop.api.domain.OrderExtInfo import OrderExtInfo
from alipay.aop.api.domain.FunctionalService import FunctionalService
from alipay.aop.api.domain.UserInfomation import UserInfomation
from alipay.aop.api.domain.JourneyServiceChangeInfo import JourneyServiceChangeInfo
from alipay.aop.api.domain.JourneyMerchantInfo import JourneyMerchantInfo
class OrderJourneyElement(object):
def __init__(self):
self._arrival = None
self._departure = None
self._duration = None
self._end_time = None
self._end_time_desc = None
self._ext_info = None
self._functional_services = None
self._passagers = None
self._service_change_info = None
self._service_provider = None
self._start_time = None
self._start_time_desc = None
@property
def arrival(self):
return self._arrival
@arrival.setter
def arrival(self, value):
if isinstance(value, JourneyLocation):
self._arrival = value
else:
self._arrival = JourneyLocation.from_alipay_dict(value)
@property
def departure(self):
return self._departure
@departure.setter
def departure(self, value):
if isinstance(value, JourneyLocation):
self._departure = value
else:
self._departure = JourneyLocation.from_alipay_dict(value)
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, value):
self._duration = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def end_time_desc(self):
return self._end_time_desc
@end_time_desc.setter
def end_time_desc(self, value):
self._end_time_desc = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, list):
self._ext_info = list()
for i in value:
if isinstance(i, OrderExtInfo):
self._ext_info.append(i)
else:
self._ext_info.append(OrderExtInfo.from_alipay_dict(i))
@property
def functional_services(self):
return self._functional_services
@functional_services.setter
def functional_services(self, value):
if isinstance(value, list):
self._functional_services = list()
for i in value:
if isinstance(i, FunctionalService):
self._functional_services.append(i)
else:
self._functional_services.append(FunctionalService.from_alipay_dict(i))
@property
def passagers(self):
return self._passagers
@passagers.setter
def passagers(self, value):
if isinstance(value, list):
self._passagers = list()
for i in value:
if isinstance(i, UserInfomation):
self._passagers.append(i)
else:
self._passagers.append(UserInfomation.from_alipay_dict(i))
@property
def service_change_info(self):
return self._service_change_info
@service_change_info.setter
def service_change_info(self, value):
if isinstance(value, JourneyServiceChangeInfo):
self._service_change_info = value
else:
self._service_change_info = JourneyServiceChangeInfo.from_alipay_dict(value)
@property
def service_provider(self):
return self._service_provider
@service_provider.setter
def service_provider(self, value):
if isinstance(value, JourneyMerchantInfo):
self._service_provider = value
else:
self._service_provider = JourneyMerchantInfo.from_alipay_dict(value)
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def start_time_desc(self):
return self._start_time_desc
@start_time_desc.setter
def start_time_desc(self, value):
self._start_time_desc = value
def to_alipay_dict(self):
params = dict()
if self.arrival:
if hasattr(self.arrival, 'to_alipay_dict'):
params['arrival'] = self.arrival.to_alipay_dict()
else:
params['arrival'] = self.arrival
if self.departure:
if hasattr(self.departure, 'to_alipay_dict'):
params['departure'] = self.departure.to_alipay_dict()
else:
params['departure'] = self.departure
if self.duration:
if hasattr(self.duration, 'to_alipay_dict'):
params['duration'] = self.duration.to_alipay_dict()
else:
params['duration'] = self.duration
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.end_time_desc:
if hasattr(self.end_time_desc, 'to_alipay_dict'):
params['end_time_desc'] = self.end_time_desc.to_alipay_dict()
else:
params['end_time_desc'] = self.end_time_desc
if self.ext_info:
if isinstance(self.ext_info, list):
for i in range(0, len(self.ext_info)):
element = self.ext_info[i]
if hasattr(element, 'to_alipay_dict'):
self.ext_info[i] = element.to_alipay_dict()
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.functional_services:
if isinstance(self.functional_services, list):
for i in range(0, len(self.functional_services)):
element = self.functional_services[i]
if hasattr(element, 'to_alipay_dict'):
self.functional_services[i] = element.to_alipay_dict()
if hasattr(self.functional_services, 'to_alipay_dict'):
params['functional_services'] = self.functional_services.to_alipay_dict()
else:
params['functional_services'] = self.functional_services
if self.passagers:
if isinstance(self.passagers, list):
for i in range(0, len(self.passagers)):
element = self.passagers[i]
if hasattr(element, 'to_alipay_dict'):
self.passagers[i] = element.to_alipay_dict()
if hasattr(self.passagers, 'to_alipay_dict'):
params['passagers'] = self.passagers.to_alipay_dict()
else:
params['passagers'] = self.passagers
if self.service_change_info:
if hasattr(self.service_change_info, 'to_alipay_dict'):
params['service_change_info'] = self.service_change_info.to_alipay_dict()
else:
params['service_change_info'] = self.service_change_info
if self.service_provider:
if hasattr(self.service_provider, 'to_alipay_dict'):
params['service_provider'] = self.service_provider.to_alipay_dict()
else:
params['service_provider'] = self.service_provider
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.start_time_desc:
if hasattr(self.start_time_desc, 'to_alipay_dict'):
params['start_time_desc'] = self.start_time_desc.to_alipay_dict()
else:
params['start_time_desc'] = self.start_time_desc
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OrderJourneyElement()
if 'arrival' in d:
o.arrival = d['arrival']
if 'departure' in d:
o.departure = d['departure']
if 'duration' in d:
o.duration = d['duration']
if 'end_time' in d:
o.end_time = d['end_time']
if 'end_time_desc' in d:
o.end_time_desc = d['end_time_desc']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'functional_services' in d:
o.functional_services = d['functional_services']
if 'passagers' in d:
o.passagers = d['passagers']
if 'service_change_info' in d:
o.service_change_info = d['service_change_info']
if 'service_provider' in d:
o.service_provider = d['service_provider']
if 'start_time' in d:
o.start_time = d['start_time']
if 'start_time_desc' in d:
o.start_time_desc = d['start_time_desc']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
b9248fdad90ce6ea4bd2265ba41473172071b84c
|
55bf9e277f3e222c3c0d5fc571f59c2454eca491
|
/scratch/delicatessen/main.py
|
688db894b2960acab3987a54a2596a102b923ee2
|
[
"MIT"
] |
permissive
|
megbedell/delicatessen
|
92dce685dcb808ddfcf8efb49a2d0dd9c1200a18
|
2f7217413c93a6ac76875724a8cc56b570065e4c
|
refs/heads/master
| 2022-12-14T01:51:06.414952
| 2020-09-08T21:23:38
| 2020-09-08T21:23:38
| 293,909,165
| 0
| 0
|
MIT
| 2020-09-08T19:35:19
| 2020-09-08T19:35:18
| null |
UTF-8
|
Python
| false
| false
| 3,123
|
py
|
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
ColumnDataSource,
Div,
Select,
MultiSelect,
Slider,
TextInput,
)
from bokeh.plotting import figure
from bokeh.models.tools import TapTool
from bokeh.models.callbacks import CustomJS
import os
PATH = os.path.abspath(os.path.dirname(__file__))
# Load an example dataset
data = np.loadtxt(
os.path.join(PATH, "data", "TESS-Gaia-mini.csv"), delimiter=",", skiprows=1
)
ra, dec, par, sid, _, _, ticid, tmag, dist = data.T
data = dict(ra=ra, dec=dec, dist=dist, ticid=ticid)
# Things the user can plot (label: parameter name)
axis_map = {"Right Ascension": "ra", "Declination": "dec", "Distance": "dist"}
# Input controls
x_axis = Select(
title="X Axis",
options=sorted(axis_map.keys()),
value="Right Ascension",
name="x_axis",
)
y_axis = Select(
title="Y Axis", options=sorted(axis_map.keys()), value="Declination"
)
s_axis = Select(
title="Marker Size", options=sorted(axis_map.keys()), value="Distance"
)
controls = [s_axis, x_axis, y_axis]
# Primary plot
source1 = ColumnDataSource(data=dict(x=[], y=[], size=[]))
plot1 = figure(
plot_height=600,
plot_width=700,
title="",
tooltips=[("TIC ID", "@ticid")],
tools="tap",
sizing_mode="scale_both",
)
plot1.circle(
x="x", y="y", source=source1, size="size", line_color=None,
)
taptool = plot1.select(type=TapTool)
# Secondary plot
source2 = ColumnDataSource(data=dict(x=[], y=[]))
plot2 = figure(
plot_height=300, plot_width=700, title="", sizing_mode="scale_both",
)
plot2.circle(
x="x", y="y", source=source2, line_color=None, color="black", alpha=0.1
)
# Events
def callback1(attr, old, new):
"""
Triggered when the user changes what we're plotting on the main plot.
"""
# Get the parameters to plot (x axis, y axis, and marker size)
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
s_name = axis_map[s_axis.value]
# Update the labels
plot1.xaxis.axis_label = x_axis.value
plot1.yaxis.axis_label = y_axis.value
# Update the data source
source1.data = dict(
x=data[x_name],
y=data[y_name],
size=data[s_name] / np.min(data[s_name]),
ticid=data["ticid"],
)
def callback2(attr, old, new):
"""
Triggered when the user selects a point on the main plot.
"""
# Get the TIC ID
ticid = source1.data["ticid"][source1.selected.indices[0]]
print("Fetching data for TIC ID {0}".format(ticid))
# TODO: Actually fetch the data from MAST.
# For now just populate with random numbers
source2.data = dict(x=np.arange(10000), y=np.random.randn(10000))
# Register the callbacks
source1.selected.on_change("indices", callback2)
for control in controls:
control.on_change("value", callback1)
# Display things on the page
inputs = column(*controls, width=320)
inputs.sizing_mode = "fixed"
l = column(row(inputs, plot1), plot2)
# Load and display the data
callback1(None, None, None)
# Go!
curdoc().add_root(l)
curdoc().title = "delicatessen"
|
[
"rodluger@gmail.com"
] |
rodluger@gmail.com
|
97f925580a92f0e296fd2c6ab77eaf73efcb812a
|
19102b3cc7a78b4f09d5e5eef3f7a93e33d8b988
|
/backend/api/serializers/model_year_report_vehicle.py
|
7d0463c011bac8eb464d731e893cdbd75be67554
|
[
"Apache-2.0"
] |
permissive
|
emi-hi/zeva
|
196b766096d2353b8ba57347b4946dce43c1b0a7
|
b395efe620a1b82c2ecee2004cca358d8407397e
|
refs/heads/master
| 2023-04-16T15:20:29.240394
| 2023-03-21T21:44:08
| 2023-03-21T21:44:08
| 234,123,338
| 0
| 0
|
Apache-2.0
| 2020-01-15T16:27:38
| 2020-01-15T16:27:37
| null |
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
from rest_framework.serializers import ModelSerializer, \
SlugRelatedField
from api.models.model_year_report_vehicle import ModelYearReportVehicle
from api.serializers.vehicle import VehicleZevTypeSerializer
from api.serializers.vehicle import ModelYearSerializer
from api.serializers.credit_transaction import CreditClassSerializer
from api.models.vehicle_zev_type import ZevType
from api.models.model_year import ModelYear
from api.models.model_year_report import ModelYearReport
from api.models.credit_class import CreditClass
class ModelYearReportVehicleSerializer(ModelSerializer):
zev_class = SlugRelatedField(
slug_field='credit_class',
queryset=CreditClass.objects.all()
)
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
vehicle_zev_type = SlugRelatedField(
slug_field='vehicle_zev_code',
queryset=ZevType.objects.all()
)
class Meta:
model = ModelYearReportVehicle
fields = (
'id', 'pending_sales', 'sales_issued', 'make', 'model_name',
'range', 'zev_class', 'model_year', 'vehicle_zev_type', 'update_timestamp',
)
class ModelYearReportVehicleSaveSerializer(ModelSerializer):
"""
Model Year Report Vehicle save serializer
"""
zev_class = SlugRelatedField(
slug_field='credit_class',
queryset=CreditClass.objects.all()
)
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
vehicle_zev_type = SlugRelatedField(
slug_field='vehicle_zev_code',
queryset=ZevType.objects.all()
)
def create(self, validated_data):
request = self.context.get('request')
model_id = request.data.get('model_year_report_id')
model_year_report_vehicle = ModelYearReportVehicle.objects.create(
**validated_data,
model_year_report=ModelYearReport.objects.get(id=model_id)
)
return model_year_report_vehicle
class Meta:
model = ModelYearReportVehicle
fields = (
'pending_sales', 'sales_issued', 'make', 'model_name', 'range',
'zev_class', 'model_year', 'vehicle_zev_type',
'model_year_report_id'
)
|
[
"noreply@github.com"
] |
emi-hi.noreply@github.com
|
d6bf6958bdc2ebf23455a7c7bf25a8584b71bf4d
|
f1ff78b950f40023e82b0d877be49971a42f70c0
|
/Channel.py
|
ff51701105e38e962e458534a4af5b3974f4bd61
|
[] |
no_license
|
buddyli/python
|
72e2c6788e4982a3fa188a757585c1c97a385eaa
|
f764b64ba9b7d723648d1d1a35af15aa0a6f3a6f
|
refs/heads/master
| 2020-04-01T21:00:27.984460
| 2014-08-12T03:16:42
| 2014-08-12T03:16:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,853
|
py
|
#!/usr/bin/python
# _*_ coding: UTF-8 _*_
import time
import urllib
import httplib
import hashlib
import json
#from lib.ChannelException import ChannelException
from lib.RequestCore import *
from json.tests.test_pass1 import JSON
class Channel(object):
'''Channel类提供百度云PUSH服务的Python版本SDK,
用户首先实例化这个类,
设置自己的apiKey,和 secretKey,即可使用PUSH服务接口
'''
#发起请求时的时间戳
TIMESTAMP = 'timestamp'
#请求过期的时间, 默认为10分钟
EXPIRES = 'expires'
#API版本号
VERSION = 'v'
#消息通道ID号
CHANNEL_ID = 'channel_id'
#用户类型
USER_ID = 'user_id'
#用户ID的类型
USER_TYPE = 'user_type'
#设备类型
DEVICE_TYPE = 'device_type'
#第几页,批量查询是,需要指定start, 默认为0
START = 'start'
#每页多少条记录,默认为100
LIMIT = 'limit'
#消息
MESSAGES = 'messages'
#消息id
MSG_IDS = 'msg_ids'
#消息key,可以按key去除重复消息
MSG_KEYS = 'msg_keys'
#消息类型,0:消息(透传), 1:通知, 默认为0
MESSAGE_TYPE = 'message_type'
#消息过期时间,默认86400秒
MESSAGE_EXPIRES = 'message_expires'
#消息标签名,可按标签分组
TAG_NAME = 'tag'
#消息标签扩展信息
TAG_INFO = 'info'
#消息标签id
TAG_ID = 'tid'
#应用id,从百度开发者中心获得
APPID = 'appid'
#应用key,从百度开发者中心获得,是创建Channel的必须参数
API_KEY = 'apikey'
#从百度开发者中心获得,是创建Channel的必须参数
SECRET_KEY = 'secret_key'
#Channel常量,用户不必关注
SIGN = 'sign'
METHOD = 'method'
HOST = 'host'
PRODUCT = 'channel'
DEFAULT_HOST = 'channel.api.duapp.com'
#证书相关常量
NAME = 'name'
DESCRIPTION = 'description'
CERT = 'cert'
RELEASE_CERT = 'release_cert'
DEV_CERT = 'dev_cert'
#推送类型
PUSH_TYPE = 'push_type'
#可选推送类型
PUSH_TO_USER = 1
PUSH_TO_TAG = 2
PUSH_TO_ALL = 3
#Channel 错误常量
CHANNEL_SDK_SYS = 1
CHANNEL_SDK_INIT_FAIL = 2
CHANNEL_SDK_PARAM = 3
CHANNEL_SDK_HTTP_STATUS_ERROR_AND_RESULT_ERROR = 4
CHANNEL_SDK_HTTP_STATUS_OK_BUT_RESULT_ERROR = 5
###
# 对外接口
###
def __init__(self, apiKey, secretKey, arr_curlOpts = None):
self._apiKey = apiKey
self._secretKey = secretKey
self._requestId = 0
if arr_curlOpts is None:
self._curlOpts = dict(IMEOUT = 30, CONNECTTIMEOUT = 5)
else:
self._curlOpts = arr_curlOpts
self._arrayErrorMap = {
'0' : 'python sdk error',
Channel.CHANNEL_SDK_SYS : 'php sdk error',
Channel.CHANNEL_SDK_INIT_FAIL : 'php sdk init error',
Channel.CHANNEL_SDK_PARAM : 'lack param',
Channel.CHANNEL_SDK_HTTP_STATUS_ERROR_AND_RESULT_ERROR : 'http status is error, and the body returned is not a json string',
Channel.CHANNEL_SDK_HTTP_STATUS_OK_BUT_RESULT_ERROR :'http status is ok, but the body returned is not a json string'}
self._method_channel_in_body = ['push_msg', 'set_tag', 'fetch_tag', 'delete_tag', 'query_user_tags']
if(not isinstance(self._curlOpts, dict)):
raise ChannelExcepthion('invalid param -arr_curlopt is not an dict',Channel.CHANNEL_SDK_INIT_FAIL)
def setApiKey(self, apiKey):
self._apiKey = apiKey
def setSecretKey(self, secretKey):
self._secretKey = secretKey
def getRequestId(self):
return self._requestId
# 服务器端根据userId, 查询绑定信息
# 用户关注:是
def queryBindList(self, optional = None):
"""
服务器端根据userId, 查询绑定信息
参数:
str userId: 用户ID号
dict optional: 可选参数
返回值:
成功:python字典; 失败:False
"""
try:
arrArgs = dict()
arrArgs[Channel.METHOD] = 'query_bindlist'
arrArgs[Channel.API_KEY] = self._apiKey
if(not arrArgs.has_key(Channel.TIMESTAMP)):
arrArgs[Channel.TIMESTAMP] = int(time.time())
if(arrArgs.has_key(Channel.SECRET_KEY)):
del arrArgs[Channel.SECRET_KEY]
resource = 'channel'
if(arrArgs.has_key(Channel.CHANNEL_ID) ):
if(arrArgs[Channel.CHANNEL_ID] is not None and arrArgs[Channel.METHOD] not in self._method_channel_in_body):
resource = arrArgs[Channel.CHANNEL_ID]
del arrArgs[Channel.CHANNEL_ID]
url = 'http://' + Channel.DEFAULT_HOST + '/rest/2.0/' + Channel.PRODUCT + '/' + resource
arrArgs[Channel.SIGN] = self._genSign('POST', url, arrArgs)
print arrArgs
self.send_request1(arrArgs)
self.send_request(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
# 服务器端根据userId, 查询绑定信息
# 用户关注:是
def queryBindList_byId(self, userId, optional = None):
"""
服务器端根据userId, 查询绑定信息
参数:
str userId: 用户ID号
dict optional: 可选参数
返回值:
成功:python字典; 失败:False
"""
try:
#tmpArgs = [userId, optional]
#arrArgs = self._mergeArgs([Channel.USER_ID], tmpArgs)
arrArgs = dict()
arrArgs[Channel.METHOD] = 'query_bindlist'
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
# 推送消息
# 用户关注:是
def pushMessage(self, push_type, messages,
message_keys, optional = None):
"""
推送消息
参数:
push_type: 推送消息的类型
messages:消息内容
message_keys: 消息key
optional: 可选参数
返回值 成功:python字典; 失败:False
"""
try:
tmpArgs = [push_type, messages, message_keys, optional]
arrArgs = self._mergeArgs([Channel.PUSH_TYPE, Channel.MESSAGES,
Channel.MSG_KEYS], tmpArgs)
arrArgs[Channel.METHOD] = 'push_msg'
if(push_type == Channel.PUSH_TO_USER):
if(not arrArgs.has_key(Channel.USER_ID) or
arrArgs[Channel.USER_ID] is None):
raise ChannelException("userId should be specified in optional when pushType is PUSH_TO_USER", self.CHANNEL_SDK_PARAM)
elif(push_type == Channel.PUSH_TO_TAG):
if(not arrArgs.has_key(Channel.TAG_NAME) or
arrArgs[Channel.TAG_NAME] is None):
raise ChannelException("tagName should be specified in optional when pushType is PUSH_TO_TAG", self.CHANNEL_SDK_PARAM)
elif(push_type == Channel.PUSH_TO_ALL):
pass
else:
raise ChannelException("pushType value is invalid", self.CHANNEL_SDK_PARAM)
arrArgs[Channel.PUSH_TYPE] = push_type
if(isinstance(arrArgs[Channel.MESSAGES], list)):
arrArgs[Channel.MESSAGES] = json.dumps(arrArgs[Channel.MESSAGES])
if(isinstance(arrArgs[Channel.MSG_KEYS], list)):
arrArgs[Channel.MSG_KEYS] = json.dumps(arrArgs[Channel.MSG_KEYS])
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#校验userId是否已经绑定
#用户关注:是
def verifyBind(self, userId, optional = None):
"""
校验userId是否已经绑定
参数:
userId:用户id
optional:可选参数
返回值:
成功:python数组;失败False
"""
try:
tmpArgs = [userId, optional]
arrArgs = self._mergeArgs([Channel.USER_ID], tmpArgs)
arrArgs[Channel.METHOD] = 'verify_bind';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#根据userId查询消息
#用户关注:是
def fetchMessage(self, userId, optional = None):
"""
根据userId查询消息
参数:
userId:用户id
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [userId, optional]
arrArgs = self._mergeArgs([Channel.USER_ID], tmpArgs)
arrArgs[Channel.METHOD] = 'fetch_msg';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#根据userId查询消息个数
#用户关注:是
def fetchMessageCount(self, userId, optional = None):
"""
根据userId查询消息个数
参数:
userId:用户id
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [userId, optional]
arrArgs = self._mergeArgs([Channel.USER_ID], tmpArgs)
arrArgs[Channel.METHOD] = 'fetch_msgcount';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#根据userId, msgIds删除消息
#用户关注:是
def deleteMessage(self, userId, msgId, optional = None):
"""
根据userId, msgIds删除消息
参数:
userId:用户id
msgIds:消息id
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [userId, msgId, optional]
arrArgs = self._mergeArgs([Channel.USER_ID, Channel.MSG_IDS], tmpArgs)
arrArgs[Channel.METHOD] = 'delete_msg';
if(isinstance(arrArgs[Channel.MSG_IDS], list)):
arrArgs[Channel.MSG_IDS] = json.dumps(arrArgs[Channel.MSG_IDS])
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
#设置消息标签
#用户关注:是
def setTag(self, tagName, optional = None):
"""
设置消息标签
参数:
tagName:标签
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [tagName, optional]
arrArgs = self._mergeArgs([Channel.TAG_NAME], tmpArgs)
arrArgs[Channel.METHOD] = 'set_tag';
return self._commonProcess(arrArgs)
except ChannelException, e:
self._channelExceptionHandler(e)
return False
#查询消息标签信息
#用户关注:是
def fetchTag(self, optional = None):
"""
查询消息标签信息
参数:
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [optional]
arrArgs = self._mergeArgs([], tmpArgs)
arrArgs[Channel.METHOD] = 'fetch_tag';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#删除消息标签
#用户关注:是
def deleteTag(self, tagName, optional = None):
"""
删除消息标签
参数:
tagName:标签
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [tagName, optional]
arrArgs = self._mergeArgs([Channel.TAG_NAME], tmpArgs)
arrArgs[Channel.METHOD] = 'delete_tag';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#查询用户相关的标签
#用户关注:是
def queryUserTag(self, userId, optional = None):
"""
查询用户相关的标签
参数:
userId:用户id
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [userId, optional]
arrArgs = self._mergeArgs([Channel.USER_ID], tmpArgs)
arrArgs[Channel.METHOD] = 'query_user_tags';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#根据channelId查询设备类型
#用户关注:是
def queryDeviceType(self, channelId, optional = None):
"""
根据channelId查询设备类型
参数:
ChannelId:用户Channel的ID号
optional:可选参数
返回值:
成功:python字典; 失败:False
"""
try:
tmpArgs = [channelId, optional]
arrArgs = self._mergeArgs([Channel.CHANNEL_ID], tmpArgs)
arrArgs[Channel.METHOD] = 'query_device_type';
return self._commonProcess(arrArgs)
except ChannelException, e:
#self._channelExceptionHandler(e)
return False
#
# 内部函数
#
def _checkString(self, string, minLen, maxLen):
if( isinstance(string, str) and len(string) >= minLen
and len(string) <= maxLen ):
return True
else:
return False
def _adjustOpt(self, opt):
if(not ((opt is not None) and isinstance(opt, dict))):
raise ChannelException('no params are set', Channel.CHANNEL_SDK_PARAM)
if(not opt.has_key(Channel.TIMESTAMP)):
opt[Channel.TIMESTAMP] = int(time.time())
opt[Channel.HOST] = Channel.DEFAULT_HOST
opt[Channel.API_KEY] = self._apiKey
if(opt.has_key(Channel.SECRET_KEY)):
del opt[Channel.SECRET_KEY]
def _genSign(self, method, url, arrContent):
gather = method + url
keys = arrContent.keys()
keys.sort()
for key in keys:
gather += key + '=' + str(arrContent[key])
gather += self._secretKey
sign = hashlib.md5(urllib.quote_plus(gather))
return sign.hexdigest()
def _baseControl(self, opt):
resource = 'channel'
if(opt.has_key(Channel.CHANNEL_ID) ):
if(opt[Channel.CHANNEL_ID] is not None and opt[Channel.METHOD] not in self._method_channel_in_body):
resource = opt[Channel.CHANNEL_ID]
del opt[Channel.CHANNEL_ID]
host = opt[Channel.HOST]
del opt[Channel.HOST]
url = 'http://' + host + '/rest/2.0/' + Channel.PRODUCT + '/'
url += resource
http_method = 'POST'
opt[Channel.SIGN] = self._genSign(http_method, url, opt)
request = RequestCore(url)
headers = dict()
headers['Content-Type'] = 'application/x-www-form-urlencoded'
headers['User-Agent'] = 'Baidu Channel Service Pythonsdk Client'
for (headerKey , headerValue) in headers.items():
headerValue = headerValue.replace('\r', '')
headerValue = headerValue.replace('\n', '')
if (headerValue is not None):
request.add_header(headerKey, headerValue)
request.set_method(http_method)
request.set_body(urllib.urlencode(opt))
if(isinstance(self._curlOpts, dict)):
request.set_curlopts(self._curlOpts)
request.handle_request()
return ResponseCore(request.get_response_header(),request.get_response_body(),request.get_response_code())
def _commonProcess(self, paramOpt):
self._adjustOpt(paramOpt)
ret = self._baseControl(paramOpt)
if( ret is None):
raise ChannelException('base control returned None object',
Channel.CHANNEL_SDK_SYS)
if(ret.isOK()):
result = json.loads(ret.body)
if (result is None):
raise ChannelException(ret.body,
Channel.CHANNEL_SDK_HTTP_STATUS_OK_BUT_RESULT_ERROR)
self._requestId = result['request_id']
return result
result = json.loads(ret.body)
if(result is None):
raise ChannelException('ret body:' + ret.body,
Channel.CHANNEL_SDK_HTTP_STATUS_ERROR_AND_RESULT_ERROR)
self._requestId = result['request_id']
raise ChannelException(result['error_msg'], result['error_code'])
def _mergeArgs(self, arrNeed, tmpArgs):
arrArgs = dict()
if( len(arrNeed) == 0 and len(tmpArgs) == 0):
return arrArgs
if(len(tmpArgs)-1 != len(arrNeed) and len(tmpArgs) != len(arrNeed)):
keys = '('
for key in arrNeed:
keys += key + ','
if(key[-1] == '' and key[-2] == ','):
keys = keys[0:-2]
keys += ')'
raise ChannelException('invalid sdk, params, params' + keys + 'are need', Channel.CHANNEL_SDK_PARAM)
if(len(tmpArgs)-1 == len(arrNeed) and tmpArgs[-1] is not None
and (not isinstance(tmpArgs[-1], dict)) ):
raise ChannelException ('invalid sdk params, optional param must bean dict', Channel.CHANNEL_SDK_PARAM)
idx = 0
if(isinstance(arrNeed, list)):
for key in arrNeed:
if(tmpArgs[idx] is None):
raise ChannelException ('lack param ' + key, Channel.CHANNEL_SDK_PARAM)
arrArgs[key] = tmpArgs[idx]
idx = idx + 1
if(len(tmpArgs) == idx + 1 and tmpArgs[idx] is not None):
for (key, value) in tmpArgs[idx].items():
if(not arrArgs.has_key(key) and value is not None):
arrArgs[key] = value
return arrArgs
# send Http Request
def send_request(self, arrArgs, echo = False):
conn = httplib.HTTPConnection("channel.api.duapp.com") #hostname, port
#headers = dict()
#headers['Content-Type'] = 'application/x-www-form-urlencoded'
#headers['User-Agent'] = 'Baidu Channel Service Pythonsdk Client'
headers = {"Content-type": "application/x-www-form-urlencoded", "User-Agent": "Baidu Channel Service Pythonsdk Client"}
conn.request("POST", "rest/2.0/channel/channel", urllib.urlencode(arrArgs), headers)
response = conn.getresponse()
if response.status == 200 and response.reason == "OK":
return response.read() if echo else "@Reviced Data OK."
else:
raise RuntimeError("Http request exception:", response.status)
def send_request1(self, opt):
request = RequestCore("http://channel.api.duapp.com/rest/2.0/channel/channel")
headers = dict()
headers['Content-Type'] = 'application/x-www-form-urlencoded'
headers['User-Agent'] = 'Baidu Channel Service Pythonsdk Client'
for (headerKey , headerValue) in headers.items():
headerValue = headerValue.replace('\r', '')
headerValue = headerValue.replace('\n', '')
if (headerValue is not None):
request.add_header(headerKey, headerValue)
request.set_method('POST')
request.set_body(urllib.urlencode(opt))
if(isinstance(self._curlOpts, dict)):
request.set_curlopts(self._curlOpts)
request.handle_request()
ret = ResponseCore(request.get_response_header(),request.get_response_body(),request.get_response_code())
if( ret is None):
raise ChannelException('base control returned None object',
Channel.CHANNEL_SDK_SYS)
if(ret.isOK()):
result = ret.body
print result
if (result is None):
raise ChannelException(ret.body,
Channel.CHANNEL_SDK_HTTP_STATUS_OK_BUT_RESULT_ERROR)
#self._requestId = result['request_id']
return result
result = json.loads(ret.body)
if(result is None):
raise ChannelException('ret body:' + ret.body,
Channel.CHANNEL_SDK_HTTP_STATUS_ERROR_AND_RESULT_ERROR)
self._requestId = result['request_id']
raise ChannelException(result['error_msg'], result['error_code'])
def _channelExceptionHandler(self, ex):
print ex.error_msg, self._arrayErrorMap[ex.error_code]
|
[
"="
] |
=
|
2ab6c871fdd07dcd31a44b4c2ab1a93718ab24fc
|
4860fc856e6c75cc980c92399a2f673bf6ee06e2
|
/hello/migrations/0003_sitemessage_event_date.py
|
f7fac7459be5640425d79342fedc65881ff9c27e
|
[] |
no_license
|
RobertPastor/music-rungis
|
bcf994bfd515e3cdc220b12b32dd2cdead9a35c6
|
b69153ded934d1d317b828f2a1aa4dbdc5b2caae
|
refs/heads/master
| 2023-03-17T18:08:44.411749
| 2022-11-19T12:16:06
| 2022-11-19T12:16:06
| 62,034,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-04-13 19:35
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0002_sitemessage'),
]
operations = [
migrations.AddField(
model_name='sitemessage',
name='event_date',
field=models.DateField(default=datetime.datetime(2016, 4, 13, 21, 35, 9, 134000)),
),
]
|
[
"robert.pastor0691@orange.fr"
] |
robert.pastor0691@orange.fr
|
9992ae849702757f553f0c77125a04659fad1629
|
6e885227c59b5b8a5a7359beb938139fca98a16f
|
/contacts/wsgi.py
|
ec24a26034a81360f3da321164adb11883330151
|
[] |
no_license
|
BukhosiMoyo/contact_list_app
|
880ada2703a50014ca30ac6f1f65dac54a8fe49a
|
122df2f328f0fd375f28587112daf14190d50896
|
refs/heads/master
| 2023-02-23T17:49:20.942877
| 2021-01-14T21:18:39
| 2021-01-14T21:18:39
| 331,508,576
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for contacts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'contacts.settings')
application = get_wsgi_application()
|
[
"bukhosizimcode@gmail.com"
] |
bukhosizimcode@gmail.com
|
723f727f775cbac7453aa196794d2a1531788594
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/bncdea001/question2.py
|
0a3ac49bba9175ed2a5208dcfbf1e0a3657c7dbc
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# Program to check if time is valid
def main():
h=eval(input("Enter the hours: \n"))
m=eval(input("Enter the minutes: \n"))
s=eval(input("Enter the seconds: \n"))
if 0<=h<=23 and 0<=m<=59 and 0<=s<=59:
print("Your time is valid. ")
else:
print("Your time is invalid. ")
main()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
9dfc3d5217726a4d5b436b42281e13f320ce4a7b
|
e7c6304677326cc40b33d72b7ee079ce3c14af4f
|
/getPredictionsSTS-BAlternatives_c.py
|
6f17588f1041ab7fc8cc466c71703d46b565b595
|
[
"MIT"
] |
permissive
|
m-hahn/fairseq
|
77f86676dd3a0793b616da89e8bc286b3c913da6
|
8508699326640a6a7a83ed4de17ac986e6213bbe
|
refs/heads/master
| 2023-02-24T02:56:20.477873
| 2021-01-29T01:38:59
| 2021-01-29T01:38:59
| 256,415,870
| 0
| 0
|
MIT
| 2020-05-01T02:02:19
| 2020-04-17T06:03:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
from fairseq.models.roberta import RobertaModel
roberta = RobertaModel.from_pretrained(
'checkpoints_STS-B/',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='STS-B-bin'
)
import torch
label_fn = lambda label: roberta.task.label_dictionary.string(
torch.LongTensor([label + roberta.task.label_dictionary.nspecial])
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
evaluatedSoFar = set()
lineNumbers = 0
with open('/u/scr/mhahn/PRETRAINED/GLUE/glue_data/STS-B/dev_alternatives_c.tsv', "r") as fin:
with open('/u/scr/mhahn/PRETRAINED/GLUE/glue_data/STS-B/dev_alternatives_c_predictions_fairseq.tsv', "w") as outFile:
while True:
lineNumbers += 1
try:
line = next(fin).strip()
except UnicodeDecodeError:
print("UnicodeDecodeError", lineNumbers)
continue
if line == "#####":
originalSentences = next(fin) # the original
separation = int(next(fin).strip()) # position of separation
tokenizedSentences = next(fin)
line = next(fin)
#print(line)
subset, sentences = line.strip().split("\t")
sentences = sentences.strip().split(" ")
# print(sentences, separation)
sentences = [sentences[:separation], sentences[separation:]]
# print(sentences)
assert len(sentences[1]) > 1, (line, separation, sentences)
# quit()
for i in range(2):
sentences[i] = ("".join(sentences[i])).replace("▁", " ").replace("</s>", "").strip()
assert len(sentences[1]) > 1, (line, separation, sentences)
assert sentences[0].endswith("."), (line, separation, sentences)
# print(sentences)
if tuple(sentences) in evaluatedSoFar:
continue
evaluatedSoFar.add(tuple(sentences))
if len(evaluatedSoFar) % 100 == 0:
print(len(evaluatedSoFar), sentences)
tokens = roberta.encode(sentences[0], sentences[1])
# https://github.com/pytorch/fairseq/issues/1009
features = roberta.extract_features(tokens)
prediction = float(5.0 * roberta.model.classification_heads['sentence_classification_head'](features))
print("\t".join([sentences[0], sentences[1], str(prediction)]), file=outFile)
|
[
"mhahn29@gmail.com"
] |
mhahn29@gmail.com
|
c2237e88d3b71f74dceb4aafd4548925815cecce
|
d926e5308b5fe794d56e57d02041ea0c2436af6e
|
/dockerfiles/application/sonarqube/build.py
|
1cd7ed2db98961fe9ca7043b4c9550ff6ad80d4c
|
[] |
no_license
|
Amos-x/Blog-scripts
|
fa1db3805551aeff22fc326b302ec433ac595add
|
95427b45cfdd5c2de6b3f8f5b4b68983fe1c5751
|
refs/heads/master
| 2022-04-19T21:19:12.070218
| 2020-03-15T15:15:27
| 2020-03-15T15:15:27
| 208,993,845
| 0
| 0
| null | 2022-03-29T21:56:43
| 2019-09-17T07:59:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
# -*- coding:utf-8 -*-
# __author__ = Amos
# Email = 379833553@qq.com
# Create_at = 2018/11/8 下午3:48
# FileName = build
from config import config as CONFIG
from utils.common import exec_shell,container_is_exist
def build_sonarqube():
if not container_is_exist('sonarqube'):
pull = 'docker pull sonarqube:7.1'
exec_shell(pull)
build = 'docker run -d --name sonarqube \
-p 9000:9000 \
-e SONARQUBE_JDBC_USERNAME={mysql_username} \
-e SONARQUBE_JDBC_PASSWORD={mysql_password} \
-e SONARQUBE_JDBC_URL=jdbc:mysql://{mysql_host}:3306/{soanr_db_name}?useUnicode=true\&characterEncoding=utf8\&rewriteBatchedStatements=true\&useConfigs=maxPerformance \
sonarqube:7.1'.format(mysql_host=CONFIG.MYSQL_HOST,mysql_username=CONFIG.MYSQL_USERNAME,
mysql_password=CONFIG.MYSQL_PASSWORD,soanr_db_name=CONFIG.MYSQL_NAME_SONARQUBE)
exec_shell(build)
exec_shell('docker start sonarqube')
else:
print('sonarqube 容器已存在,跳过安装')
|
[
"379833553@qq.com"
] |
379833553@qq.com
|
d26571867c2f8a8198ce2da00ebd7c1366a86ecd
|
d8b1effe86a654d1831b56fdd8d6a9248b29fe01
|
/Week_4/Assignment_3/assignment3.py
|
6f35dc7ef13eaee69ac858ab419489061ddaf63d
|
[] |
no_license
|
Nidhiks2000/eyrc-mooc
|
b273e376b5ae31779469c48443dee4f73ade7c82
|
8e49668569a89700954165136ea29524143ff49f
|
refs/heads/master
| 2023-07-18T05:26:22.311527
| 2021-09-08T16:48:23
| 2021-09-08T16:48:23
| 404,424,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,017
|
py
|
# No other modules apart from 'csv' and 'datetime' need to be imported
# as they aren't required to solve the assignment
# Import required module/s
import csv
from datetime import datetime as dt
def dayofweek(d, m, y):
t = [ 0, 3, 2, 5, 0, 3,
5, 1, 4, 6, 2, 4 ]
y -= m < 3
return (( y + int(y / 4) - int(y / 100)
+ int(y / 400) + t[m - 1] + d) % 7)
def readWorkSheet(file_name):
"""Reads the input CSV file of Work Sheet and creates a mapping of date and office name where he worked.
Parameters
----------
file_name : str
CSV file name of Work Sheet
Returns
-------
dict
Mapping of the date and office name where he worked as { Key : Value } pair
Example
-------
>>> csv_file_name = 'week4_assignment3_sample.csv'
>>> print( readWorkSheet( csv_file_name ) )
{
'2021-03-26': 'A', '2021-04-01': 'B', '2021-04-20': 'B', '2021-04-04': '-', '2021-04-12': 'A', '2021-04-23': 'A',
'2021-04-03': 'B', '2021-03-29': 'A', '2021-03-28': '-', '2021-03-31': 'A', '2021-04-10': 'B', '2021-04-16': 'A',
'2021-04-24': 'B', '2021-04-11': '-', '2021-04-13': 'B'
}
"""
date_office_name_mapping = {}
input_file_obj = open(file_name, 'r')
############## ADD YOUR CODE HERE ##############
reader=csv.DictReader(input_file_obj)
for rows in reader:
now = rows['date']
x = now.split("-")
#print(x)
#print(dt.date(2020,7,24).strftime('%A'))
res = dayofweek(int(x[2]),int(x[1]),int(x[0]))
if(res!=0 and res%2!=0):
date_office_name_mapping[now] = "A"
elif (res!=0 and res%2 == 0):
date_office_name_mapping[now] = "B"
elif (res == 0):
date_office_name_mapping[now] = "-"
##################################################
input_file_obj.close()
return date_office_name_mapping
def calculateOfficeHrs(mapping_dict):
"""Calculate the number of hours worked in office A and B with the given mapping of date and office name.
Parameters
----------
mapping_dict : dict
Mapping of the date and office name where he worked as { Key : Value } pair
Returns
-------
tuple
Number of hours worked in office A and B as pair
Example
-------
>>> date_office_name_mapping = {
'2021-03-26': 'A', '2021-04-01': 'B', '2021-04-20': 'B', '2021-04-04': '-', '2021-04-12': 'A', '2021-04-23': 'A',
'2021-04-03': 'B', '2021-03-29': 'A', '2021-03-28': '-', '2021-03-31': 'A', '2021-04-10': 'B', '2021-04-16': 'A',
'2021-04-24': 'B', '2021-04-11': '-', '2021-04-13': 'B'
}
>>> print( calculateOfficeHrs( date_office_name_mapping ) )
(48, 36)
"""
no_hrs_office_A, no_hrs_office_B = 0, 0
############## ADD YOUR CODE HERE ##############
for key,value in mapping_dict.items():
if (value == "A"):
no_hrs_office_A+=8
elif(value == "B"):
no_hrs_office_B+=6
##################################################
return (no_hrs_office_A, no_hrs_office_B)
def writeOfficeWorkSheet(mapping_dict, out_file_name):
"""Writes a CSV file with date and office name where the person worked on each day.
Parameters
----------
mapping_dict : dict
Mapping of the date and office name where he worked as { Key : Value } pair
out_file_name : str
File name of CSV file for writing the data to
"""
output_file_obj = open(out_file_name, 'w')
############## ADD YOUR CODE HERE ##############
writer = csv.writer(output_file_obj,delimiter=',')
writer.writerow(['date','office_name'])
for key,value in mapping_dict.items():
writer.writerow([key,value])
##################################################
output_file_obj.close()
if __name__ == "__main__":
"""Main function, code begins here.
"""
csv_file_name = 'week4_assignment3_sample.csv'
date_office_name_mapping = readWorkSheet(csv_file_name)
print(date_office_name_mapping)
total_hrs_office_A_B = calculateOfficeHrs(date_office_name_mapping)
print(total_hrs_office_A_B)
out_csv_file_name = 'output_week4_assignment3_sample.csv'
writeOfficeWorkSheet(date_office_name_mapping, out_csv_file_name)
|
[
"Happysunshine.disroot.org"
] |
Happysunshine.disroot.org
|
145447a9d8ca15126c29c3cdd7b0d1c28972fe54
|
d2a181395347b6b7308cdbd9a411c79775a035c8
|
/tests/loggers/test_csv.py
|
dcdb6421c517f5474a910124471f2b301b058c20
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
rephrase-ai/pytorch-lightning
|
d30d552288d1bf6f65a605e5c8893583ecc58862
|
8bd7b1bdd7d3f723822e78908033cf0a6743713a
|
refs/heads/master
| 2023-06-06T11:32:41.765882
| 2021-06-23T12:09:53
| 2021-06-23T12:09:53
| 291,268,679
| 2
| 0
|
Apache-2.0
| 2020-08-29T12:38:33
| 2020-08-29T12:38:32
| null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import Namespace
import pytest
import torch
from pytorch_lightning.core.saving import load_hparams_from_yaml
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.loggers.csv_logs import ExperimentWriter
def test_file_logger_automatic_versioning(tmpdir):
"""Verify that automatic versioning works"""
root_dir = tmpdir.mkdir("exp")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
logger = CSVLogger(save_dir=tmpdir, name="exp")
assert logger.version == 2
def test_file_logger_manual_versioning(tmpdir):
"""Verify that manual versioning works"""
root_dir = tmpdir.mkdir("exp")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
root_dir.mkdir("version_2")
logger = CSVLogger(save_dir=tmpdir, name="exp", version=1)
assert logger.version == 1
def test_file_logger_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
exp_name = "exp"
tmpdir.mkdir(exp_name)
expected_version = "2020-02-05-162402"
logger = CSVLogger(save_dir=tmpdir, name=exp_name, version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2})
logger.save()
assert logger.version == expected_version
assert os.listdir(tmpdir / exp_name) == [expected_version]
assert os.listdir(tmpdir / exp_name / expected_version)
@pytest.mark.parametrize("name", ['', None])
def test_file_logger_no_name(tmpdir, name):
"""Verify that None or empty name works"""
logger = CSVLogger(save_dir=tmpdir, name=name)
logger.save()
assert logger.root_dir == tmpdir
assert os.listdir(tmpdir / 'version_0')
@pytest.mark.parametrize("step_idx", [10, None])
def test_file_logger_log_metrics(tmpdir, step_idx):
logger = CSVLogger(tmpdir)
metrics = {
"float": 0.3,
"int": 1,
"FloatTensor": torch.tensor(0.1),
"IntTensor": torch.tensor(1),
}
logger.log_metrics(metrics, step_idx)
logger.save()
path_csv = os.path.join(logger.log_dir, ExperimentWriter.NAME_METRICS_FILE)
with open(path_csv, 'r') as fp:
lines = fp.readlines()
assert len(lines) == 2
assert all([n in lines[0] for n in metrics])
def test_file_logger_log_hyperparams(tmpdir):
logger = CSVLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
'a': {
'b': 'c'
}
},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar='buzz')),
"layer": torch.nn.BatchNorm1d
}
logger.log_hyperparams(hparams)
logger.save()
path_yaml = os.path.join(logger.log_dir, ExperimentWriter.NAME_HPARAMS_FILE)
params = load_hparams_from_yaml(path_yaml)
assert all([n in params for n in hparams])
|
[
"noreply@github.com"
] |
rephrase-ai.noreply@github.com
|
d15f4682a12eaabed8dec8aae69d51acc2542be6
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/Omsg/PyRunPancakeLarge.py
|
001bb8508f54e1f99cced37f0cb802254ec1da0b
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,657
|
py
|
import numpy as np
# ---- config ---- #
FileInput="dataPancakesLarge.in"
FileOutput="dataPancakesLarge.out"
# ---------------- #
def start(pancakes):
pancakes=pancakes[::-1]
pan=[]
turns=0
for p in pancakes:
pan.append(p)
i=0
for p in pan:
if p=="-":
pan=turn_pancakes(pan,i)
turns=turns+1
i=i+1
return str(turns)
def build_pancakes(pan):
pancakes=""
for p in pan:
pancakes=pancakes+p
return pancakes
def turn_pancakes(pan,start):
i=0
for p in pan:
if i>=start:
if pan[i]=="-":
pan[i]="+"
else:
pan[i]="-"
i=i+1
return pan
def file_load():
check=[]
with open(FileInput) as f:
for line in f:
check.append(line)
return check
def normal_mode():
result = start("+-+")
print "------------------------------------"
print "Result: "+str(result)
print "------------------------------------"
def array_mode():
f = open(FileOutput, 'w')
check = file_load()
print check
for i in range(np.size(check)):
if i>0:
writeString = "Case #"+str(i)+": "+str(start(str(check[i]).replace("\n","")))
f.write(writeString+"\n")
print writeString
print "------------------------------------"
f.close()
if __name__ == "__main__":
print "------------------------------------"
print "Start program"
print "------------------------------------"
array_mode()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
1504d4dc673f453ce8d6a9116235cb370f0c3b20
|
b74a332412c303921e8e067e83cbae47d64a1c9f
|
/common/libs/food/FoodService.py
|
0337a5785c2dbeb7523e2209c76d2f5e1b7cba13
|
[] |
no_license
|
whisnos/weix
|
aaaf490d8f80b7ea991fa8261a74cabff8c12da1
|
4ff6f5d03e8cc304c8b12c278488b284672210c0
|
refs/heads/master
| 2022-12-11T03:36:26.547935
| 2019-07-13T04:27:22
| 2019-07-13T04:27:22
| 195,082,638
| 0
| 0
| null | 2022-12-08T05:50:35
| 2019-07-03T15:35:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 858
|
py
|
# -*- coding: utf-8 -*-
from application import app,db
from common.models.food.FoodStockChangeLog import FoodStockChangeLog
from common.models.food.Food import Food
from common.libs.Helper import geneTime
class FoodService():
@staticmethod
def setStockChangeLog( food_id = 0,quantity = 0,note = '' ):
if food_id < 1:
return False
food_info = Food.query.filter_by( id = food_id ).first()
if not food_info:
return False
model_stock_change = FoodStockChangeLog()
model_stock_change.food_id = food_id
model_stock_change.unit = quantity
model_stock_change.total_stock = food_info.stock
model_stock_change.note = note
model_stock_change.created_time = geneTime()
db.session.add(model_stock_change)
db.session.commit()
return True
|
[
"whisnos@163.com"
] |
whisnos@163.com
|
e9a06b60aac5a8da4edf76fdebd109d5673af1a3
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/sahara-6.0.1/sahara/plugins/images.py
|
c30bc6d5e7dc0e8181b81bda91a62c4f11268d9d
|
[
"Apache-2.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927
| 2017-08-14T04:33:05
| 2017-08-14T04:33:05
| 99,709,985
| 0
| 2
| null | 2020-07-22T22:06:22
| 2017-08-08T15:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 39,713
|
py
|
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
import copy
import functools
import itertools
from os import path
import jsonschema
import six
import yaml
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import exceptions as p_ex
from sahara.utils import files
def transform_exception(from_type, to_type, transform_func=None):
"""Decorator to transform exception types.
:param from_type: The type of exception to catch and transform.
:param to_type: The type of exception to raise instead.
:param transform_func: A function to transform from_type into
to_type, which must be of the form func(exc, to_type).
Defaults to:
lambda exc, new_type: new_type(exc.message)
"""
if not transform_func:
transform_func = lambda exc, new_type: new_type(exc.message)
def decorator(func):
@functools.wraps(func)
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except from_type as exc:
raise transform_func(exc, to_type)
return handler
return decorator
def validate_instance(instance, validators, reconcile=True, **kwargs):
"""Runs all validators against the specified instance.
:param instance: An instance to validate.
:param validators: A sequence of ImageValidators.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:raises ImageValidationError: If validation fails.
"""
with instance.remote() as remote:
for validator in validators:
validator.validate(remote, reconcile=reconcile, **kwargs)
class ImageArgument(object):
"""An argument used by an image manifest."""
SPEC_SCHEMA = {
"type": "object",
"items": {
"type": "object",
"properties": {
"target_variable": {
"type": "string",
"minLength": 1
},
"description": {
"type": "string",
"minLength": 1
},
"default": {
"type": "string",
"minLength": 1
},
"required": {
"type": "boolean",
"minLength": 1
},
"choices": {
"type": "array",
"minLength": 1,
"items": {
"type": "string"
}
}
}
}
}
@classmethod
def from_spec(cls, spec):
"""Constructs and returns a set of arguments from a specification.
:param spec: The specification for the argument set.
:return A dict of arguments built to the specification.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
arguments = {name: cls(name,
arg.get('description'),
arg.get('default'),
arg.get('required'),
arg.get('choices'))
for name, arg in six.iteritems(spec)}
reserved_names = ['distro', 'reconcile']
for name, arg in six.iteritems(arguments):
if name in reserved_names:
raise p_ex.ImageValidationSpecificationError(
_("The following argument names are reserved: "
"{names}").format(reserved_names))
if not arg.default and not arg.required:
raise p_ex.ImageValidationSpecificationError(
_("Argument {name} is not required and must specify a "
"default value.").format(name=arg.name))
if arg.choices and arg.default and arg.default not in arg.choices:
raise p_ex.ImageValidationSpecificationError(
_("Argument {name} specifies a default which is not one "
"of its choices.").format(name=arg.name))
return arguments
def __init__(self, name, description=None, default=None, required=False,
choices=None):
self.name = name
self.description = description
self.default = default
self.required = required
self.choices = choices
@six.add_metaclass(abc.ABCMeta)
class ImageValidator(object):
"""Validates the image spawned to an instance via a set of rules."""
@abc.abstractmethod
def validate(self, remote, reconcile=True, **kwargs):
"""Validates the image.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:raises ImageValidationError: If validation fails.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class SaharaImageValidatorBase(ImageValidator):
"""Base class for Sahara's native image validation."""
DISTRO_KEY = 'distro'
RECONCILE_KEY = 'reconcile'
ORDERED_VALIDATORS_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"minProperties": 1,
"maxProperties": 1
}
}
_DISTRO_FAMILES = {
'centos': 'redhat',
'centos7': 'redhat',
'fedora': 'redhat',
'redhatenterpriseserver': 'redhat',
'ubuntu': 'debian'
}
@staticmethod
def get_validator_map(custom_validator_map=None):
"""Gets the map of validator name token to validator class.
:param custom_validator_map: A map of validator names and classes to
add to the ones Sahara provides by default. These will take
precedence over the base validators in case of key overlap.
:return A map of validator names and classes.
"""
default_validator_map = {
'package': SaharaPackageValidator,
'script': SaharaScriptValidator,
'any': SaharaAnyValidator,
'all': SaharaAllValidator,
'os_case': SaharaOSCaseValidator,
'argument_case': SaharaArgumentCaseValidator,
'argument_set': SaharaArgumentSetterValidator,
}
if custom_validator_map:
default_validator_map.update(custom_validator_map)
return default_validator_map
@classmethod
def from_yaml(cls, yaml_path, validator_map=None, resource_roots=None):
"""Constructs and returns a validator from the provided yaml file.
:param yaml_path: The relative path to a yaml file.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return A SaharaImageValidator built to the yaml specification.
"""
validator_map = validator_map or {}
resource_roots = resource_roots or []
file_text = files.get_file_text(yaml_path)
spec = yaml.safe_load(file_text)
validator_map = cls.get_validator_map(validator_map)
return cls.from_spec(spec, validator_map, resource_roots)
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Constructs and returns a validator from a specification object.
:param spec: The specification for the validator.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return A validator built to the specification.
"""
pass
@classmethod
def from_spec_list(cls, specs, validator_map, resource_roots):
"""Constructs a list of validators from a list of specifications.
:param specs: A list of validator specifications, each of which
will be a dict of size 1, where the key represents the validator
type and the value respresents its specification.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A list of validators.
"""
validators = []
for spec in specs:
validator_class, validator_spec = cls.get_class_from_spec(
spec, validator_map)
validators.append(validator_class.from_spec(
validator_spec, validator_map, resource_roots))
return validators
@classmethod
def get_class_from_spec(cls, spec, validator_map):
"""Gets the class and specification from a validator dict.
:param spec: A validator specification including its type: a dict of
size 1, where the key represents the validator type and the value
respresents its configuration.
:param validator_map: A map of validator name to class.
:return: A tuple of validator class and configuration.
"""
key, value = list(six.iteritems(spec))[0]
validator_class = validator_map.get(key, None)
if not validator_class:
raise p_ex.ImageValidationSpecificationError(
_("Validator type %s not found.") % validator_class)
return validator_class, value
class ValidationAttemptFailed(object):
"""An object representing a failed validation attempt.
Primarily for use by the SaharaAnyValidator, which must aggregate
failures for error exposition purposes.
"""
def __init__(self, exception):
self.exception = exception
def __bool__(self):
return False
def __nonzero__(self):
return False
def try_validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate, but returns rather than raising on failure.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:return True if successful, ValidationAttemptFailed object if failed.
"""
try:
self.validate(
remote, reconcile=reconcile,
image_arguments=image_arguments, **kwargs)
return True
except p_ex.ImageValidationError as exc:
return self.ValidationAttemptFailed(exc)
class SaharaImageValidator(SaharaImageValidatorBase):
"""The root of any tree of SaharaImageValidators.
This validator serves as the root of the tree for SaharaImageValidators,
and provides any needed initialization (such as distro retrieval.)
"""
SPEC_SCHEMA = {
"title": "SaharaImageValidator",
"type": "object",
"properties": {
"validators": SaharaImageValidatorBase.ORDERED_VALIDATORS_SCHEMA
},
"required": ["validators"]
}
def get_argument_list(self):
return [argument for name, argument
in six.iteritems(self.arguments)]
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Constructs and returns a validator from a specification object.
:param spec: The specification for the validator: a dict containing
the key "validators", which contains a list of validator
specifications.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return A SaharaImageValidator containing all specified validators.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
arguments_spec = spec.get('arguments', {})
arguments = ImageArgument.from_spec(arguments_spec)
validators_spec = spec['validators']
validator = SaharaAllValidator.from_spec(
validators_spec, validator_map, resource_roots)
return cls(validator, arguments)
def __init__(self, validator, arguments):
"""Constructor method.
:param validator: A SaharaAllValidator containing the specified
validators.
"""
self.validator = validator
self.validators = validator.validators
self.arguments = arguments
@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate the image.
Before deferring to contained validators, performs one-time setup
steps such as distro discovery.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
argument_values = {}
for name, argument in six.iteritems(self.arguments):
if name not in image_arguments:
if argument.required:
raise p_ex.ImageValidationError(
_("Argument {name} is required for image "
"processing.").format(name=name))
else:
argument_values[name] = argument.default
else:
value = image_arguments[name]
choices = argument.choices
if choices and value not in choices:
raise p_ex.ImageValidationError(
_("Value for argument {name} must be one of "
"{choices}.").format(name=name, choices=choices))
else:
argument_values[name] = value
argument_values[self.DISTRO_KEY] = remote.get_os_distrib()
self.validator.validate(remote, reconcile=reconcile,
image_arguments=argument_values)
class SaharaPackageValidator(SaharaImageValidatorBase):
"""A validator that checks package installation state on the instance."""
class Package(object):
def __init__(self, name, version=None):
self.name = name
self.version = version
def __str__(self):
return ("%s-%s" % (self.name, self.version)
if self.version else self.name)
_SINGLE_PACKAGE_SCHEMA = {
"oneOf": [
{
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"additionalProperties": {
"type": "object",
"properties": {
"version": {
"type": "string",
"minLength": 1
},
}
},
},
{
"type": "string",
"minLength": 1
}
]
}
SPEC_SCHEMA = {
"title": "SaharaPackageValidator",
"oneOf": [
_SINGLE_PACKAGE_SCHEMA,
{
"type": "array",
"items": _SINGLE_PACKAGE_SCHEMA,
"minLength": 1
}
]
}
@classmethod
def _package_from_spec(cls, spec):
"""Builds a single package object from a specification.
:param spec: May be a string or single-length dictionary of name to
configuration values.
:return: A package object.
"""
if isinstance(spec, six.string_types):
return cls.Package(spec, None)
else:
package, properties = list(six.iteritems(spec))[0]
version = properties.get('version', None)
return cls.Package(package, version)
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds a package validator from a specification.
:param spec: May be a string, a single-length dictionary of name to
configuration values, or a list containing any number of either or
both of the above. Configuration values may include:
version: The version of the package to check and/or install.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A validator that will check that the specified package or
packages are installed.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
packages = ([cls._package_from_spec(package_spec)
for package_spec in spec]
if isinstance(spec, list)
else [cls._package_from_spec(spec)])
return cls(packages)
def __init__(self, packages):
self.packages = packages
@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate package installation on the image.
Even if reconcile=True, attempts to verify previous package
installation offline before using networked tools to validate or
install new packages.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
env_distro = image_arguments[self.DISTRO_KEY]
env_family = self._DISTRO_FAMILES[env_distro]
check, install = self._DISTRO_TOOLS[env_family]
if not env_family:
raise p_ex.ImageValidationError(
_("Unknown distro: cannot verify or install packages."))
try:
check(self, remote)
except (ex.SubprocessException, ex.RemoteCommandException,
RuntimeError):
if reconcile:
install(self, remote)
check(self, remote)
else:
raise
def _dpkg_check(self, remote):
check_cmd = ("dpkg -s %s" %
" ".join(str(package) for package in self.packages))
return _sudo(remote, check_cmd)
def _rpm_check(self, remote):
check_cmd = ("rpm -q %s" %
" ".join(str(package) for package in self.packages))
return _sudo(remote, check_cmd)
def _yum_install(self, remote):
install_cmd = (
"yum install -y %s" %
" ".join(str(package) for package in self.packages))
_sudo(remote, install_cmd)
def _apt_install(self, remote):
install_cmd = (
"apt-get -y install %s" %
" ".join(str(package) for package in self.packages))
return _sudo(remote, install_cmd)
_DISTRO_TOOLS = {
"redhat": (_rpm_check, _yum_install),
"debian": (_dpkg_check, _apt_install)
}
class SaharaScriptValidator(SaharaImageValidatorBase):
"""A validator that runs a script on the instance."""
_DEFAULT_ENV_VARS = [SaharaImageValidatorBase.RECONCILE_KEY,
SaharaImageValidatorBase.DISTRO_KEY]
SPEC_SCHEMA = {
"title": "SaharaScriptValidator",
"oneOf": [
{
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"additionalProperties": {
"type": "object",
"properties": {
"env_vars": {
"type": "array",
"items": {
"type": "string"
}
},
"output": {
"type": "string",
"minLength": 1
},
"inline": {
"type": "string",
"minLength": 1
}
},
}
},
{
"type": "string"
}
]
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds a script validator from a specification.
:param spec: May be a string or a single-length dictionary of name to
configuration values. Configuration values include:
env_vars: A list of environment variable names to send to the
script.
output: A key into which to put the stdout of the script in the
image_arguments of the validation run.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A validator that will run a script on the image.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
script_contents = None
if isinstance(spec, six.string_types):
script_path = spec
env_vars, output_var = cls._DEFAULT_ENV_VARS, None
else:
script_path, properties = list(six.iteritems(spec))[0]
env_vars = cls._DEFAULT_ENV_VARS + properties.get('env_vars', [])
output_var = properties.get('output', None)
script_contents = properties.get('inline')
if not script_contents:
for root in resource_roots:
file_path = path.join(root, script_path)
script_contents = files.try_get_file_text(file_path)
if script_contents:
break
if not script_contents:
raise p_ex.ImageValidationSpecificationError(
_("Script %s not found in any resource roots.") % script_path)
return SaharaScriptValidator(script_contents, env_vars, output_var)
def __init__(self, script_contents, env_vars=None, output_var=None):
"""Constructor method.
:param script_contents: A string representation of the script.
:param env_vars: A list of environment variables to send to the
script.
:param output_var: A key into which to put the stdout of the script in
the image_arguments of the validation run.
:return: A SaharaScriptValidator.
"""
self.script_contents = script_contents
self.env_vars = env_vars or []
self.output_var = output_var
@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate by running a script on the image.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
Note that the key SIV_RECONCILE will be set to 1 if the script
should reconcile and 0 otherwise; all scripts should act on this
input if possible. The key SIV_DISTRO will also contain the
distro representation, per `lsb_release -is`.
:raises ImageValidationError: If validation fails.
"""
arguments = copy.deepcopy(image_arguments)
arguments[self.RECONCILE_KEY] = 1 if reconcile else 0
script = "\n".join(["%(env_vars)s",
"bash <<_SIV_",
"%(script)s",
"_SIV_"])
env_vars = "\n".join("export %s=%s" % (key, value) for (key, value)
in six.iteritems(image_arguments)
if key in self.env_vars)
script = script % {"env_vars": env_vars,
"script": self.script_contents}
code, stdout = _sudo(remote, script)
if self.output_var:
image_arguments[self.output_var] = stdout
@six.add_metaclass(abc.ABCMeta)
class SaharaAggregateValidator(SaharaImageValidatorBase):
"""An abstract class representing an ordered list of other validators."""
SPEC_SCHEMA = SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds the aggregate validator from a specification.
:param spec: A list of validator definitions, each of which is a
single-length dictionary of name to configuration values.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: An aggregate validator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
validators = cls.from_spec_list(spec, validator_map, resource_roots)
return cls(validators)
def __init__(self, validators):
self.validators = validators
class SaharaAnyValidator(SaharaAggregateValidator):
"""A list of validators, only one of which must succeed."""
def _try_all(self, remote, reconcile=True,
image_arguments=None, **kwargs):
results = []
for validator in self.validators:
result = validator.try_validate(remote, reconcile=reconcile,
image_arguments=image_arguments,
**kwargs)
results.append(result)
if result:
break
return results
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate any of the contained validators.
Note that if reconcile=True, this validator will first run all
contained validators using reconcile=False, and succeed immediately
should any pass validation. If all fail, it will only then run them
using reconcile=True, and again succeed immediately should any pass.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
results = self._try_all(remote, reconcile=False,
image_arguments=image_arguments)
if reconcile and not any(results):
results = self._try_all(remote, reconcile=True,
image_arguments=image_arguments)
if not any(results):
raise p_ex.AllValidationsFailedError(result.exception for result
in results)
class SaharaAllValidator(SaharaAggregateValidator):
"""A list of validators, all of which must succeed."""
def validate(self, remote, reconcile=True, image_arguments=None, **kwargs):
"""Attempts to validate all of the contained validators.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
for validator in self.validators:
validator.validate(remote, reconcile=reconcile,
image_arguments=image_arguments)
class SaharaOSCaseValidator(SaharaImageValidatorBase):
"""A validator which will take different actions depending on distro."""
_distro_tuple = collections.namedtuple('Distro', ['distro', 'validator'])
SPEC_SCHEMA = {
"type": "array",
"minLength": 1,
"items": {
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"additionalProperties":
SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA,
}
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds an os_case validator from a specification.
:param spec: A list of single-length dictionaries. The key of each is
a distro or family name and the value under each key is a list of
validators (all of which must succeed.)
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A SaharaOSCaseValidator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
distros = itertools.chain(*(six.iteritems(distro_spec)
for distro_spec in spec))
distros = [
cls._distro_tuple(key, SaharaAllValidator.from_spec(
value, validator_map, resource_roots))
for (key, value) in distros]
return cls(distros)
def __init__(self, distros):
"""Constructor method.
:param distros: A list of distro tuples (distro, list of validators).
"""
self.distros = distros
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate depending on distro.
May match the OS by specific distro or by family (centos may match
"centos" or "redhat", for instance.) If multiple keys match the
distro, only the validators under the first matched key will be run.
If no keys match, no validators are run, and validation proceeds.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
env_distro = image_arguments[self.DISTRO_KEY]
family = self._DISTRO_FAMILES.get(env_distro)
matches = {env_distro, family} if family else {env_distro}
for distro, validator in self.distros:
if distro in matches:
validator.validate(
remote, reconcile=reconcile,
image_arguments=image_arguments)
break
class SaharaArgumentCaseValidator(SaharaImageValidatorBase):
"""A validator which will take different actions depending on distro."""
SPEC_SCHEMA = {
"type": "object",
"properties": {
"argument_name": {
"type": "string",
"minLength": 1
},
"cases": {
"type": "object",
"minProperties": 1,
"additionalProperties":
SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA,
},
},
"additionalProperties": False,
"required": ["argument_name", "cases"]
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds an argument_case validator from a specification.
:param spec: A dictionary with two items: "argument_name", containing
a string indicating the argument to be checked, and "cases", a
dictionary. The key of each item in the dictionary is a value
which may or may not match the argument value, and the value is
a list of validators to be run in case it does.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A SaharaArgumentCaseValidator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
argument_name = spec['argument_name']
cases = {key: SaharaAllValidator.from_spec(
value, validator_map, resource_roots)
for key, value in six.iteritems(spec['cases'])}
return cls(argument_name, cases)
def __init__(self, argument_name, cases):
"""Constructor method.
:param argument_name: The name of an argument.
:param cases: A dictionary of possible argument value to a
sub-validator to run in case of a match.
"""
self.argument_name = argument_name
self.cases = cases
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate depending on argument value.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
arg = self.argument_name
if arg not in image_arguments:
raise p_ex.ImageValidationError(
_("Argument {name} not found.").format(name=arg))
value = image_arguments[arg]
if value in self.cases:
self.cases[value].validate(
remote, reconcile=reconcile,
image_arguments=image_arguments)
class SaharaArgumentSetterValidator(SaharaImageValidatorBase):
"""A validator which sets a specific argument to a specific value."""
SPEC_SCHEMA = {
"type": "object",
"properties": {
"argument_name": {
"type": "string",
"minLength": 1
},
"value": {
"type": "string",
"minLength": 1
},
},
"additionalProperties": False,
"required": ["argument_name", "value"]
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds an argument_set validator from a specification.
:param spec: A dictionary with two items: "argument_name", containing
a string indicating the argument to be set, and "value", a value
to which to set that argument.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A SaharaArgumentSetterValidator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
argument_name = spec['argument_name']
value = spec['value']
return cls(argument_name, value)
def __init__(self, argument_name, value):
"""Constructor method.
:param argument_name: The name of an argument.
:param value: A value to which to set that argument.
"""
self.argument_name = argument_name
self.value = value
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate depending on argument value.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
"""
image_arguments[self.argument_name] = self.value
def _sudo(remote, cmd, **kwargs):
return remote.execute_command(cmd, run_as_root=True, **kwargs)
|
[
"gongwayne@hotmail.com"
] |
gongwayne@hotmail.com
|
df62c390162d75f6230cac5e9e41da256e08bfd0
|
f33b30743110532ddae286ba1b34993e61669ab7
|
/352. Data Stream as Disjoint Intervals.py
|
7fcf3a2a59b62c44815f299b37bcf78d3bfd76c8
|
[] |
no_license
|
c940606/leetcode
|
fe9dcee7a5daa4d52999d5f53253dd6dd33c348b
|
631df2ce6892a6fbb3e435f57e90d85f8200d125
|
refs/heads/master
| 2021-07-10T14:01:26.164966
| 2020-08-16T10:46:16
| 2020-08-16T10:46:16
| 186,588,449
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
import bisect
from typing import List
class SummaryRanges:
def __init__(self):
"""
Initialize your data structure here.
"""
self.res = []
def addNum(self, val: int) -> None:
loc = bisect.bisect_left(self.res, [val])
if loc < len(self.res):
if self.res[loc][0] == val:
return
if self.res[loc][0] > val:
if loc >= 1:
if self.res[loc - 1][1] >= val :
return
if self.res[loc - 1][1] + 1 == val and self.res[loc][0] - 1 == val:
self.res[loc - 1:loc + 1] = [[self.res[loc - 1][0], self.res[loc][1]]]
elif self.res[loc - 1][1] + 1 == val:
self.res[loc-1:loc] = [[self.res[loc-1][0], val]]
elif self.res[loc][0] - 1 == val:
self.res[loc:loc+1] = [[val, self.res[loc][1]]]
else:
if self.res[loc][0] - 1 == val:
self.res[loc:loc+1] = [[val, self.res[loc][1]]]
else:
self.res.insert(loc, [val, val])
else:
self.res.insert(loc, [val, val])
else:
if self.res[loc - 1][1] >= val:
return
elif self.res[loc - 1][1] + 1 == val:
self.res[loc - 1:loc] = [[self.res[loc - 1][0], val]]
else:
self.res.insert(loc, [val, val])
def getIntervals(self) -> List[List[int]]:
return self.res
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
a = SummaryRanges()
a.addNum(1)
print(a.res)
a.addNum(3)
print(a.res)
a.addNum(7)
print(a.res)
a.addNum(2)
a.addNum(6)
print(a.getIntervals())
|
[
"762307667@qq.com"
] |
762307667@qq.com
|
79771f0560e4f83582b509058b21e9a77b8696ae
|
8c568d5ba0c4f05b10ac831d4961f34925d3db8e
|
/09_面向对象特性/yc_12_类属性.py
|
bb5951530999d19e6324a3e2a9c8366c183e082d
|
[] |
no_license
|
Yang-yc/Python
|
dbca12bf10f7eb628ab2676e56ea5dc8ebe025af
|
985bafccb45232e3c2e24d14f5a1e0dd1ff67065
|
refs/heads/master
| 2022-12-31T00:47:01.659889
| 2020-09-27T07:11:32
| 2020-09-27T07:11:32
| 285,573,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
class Tool(object):
# 使用赋值语句定义类属性,记录所有工具对象的数量
count = 0
def __init__(self, name):
self.name = name
# 让类属性的值+1
Tool.count += 1
# 1.创建工具对象
tool1 = Tool("斧头")
tool2 = Tool("榔头")
tool3 = Tool("水桶")
# 2.输出工具对象的总数
print(Tool.count)
|
[
"ycc20121404@163.com"
] |
ycc20121404@163.com
|
37c6e185d005cfd7fee82133847c02321d343861
|
c3e0792872e6bc34299b64e532f20187ec92dd0b
|
/uvrnmt/imagesfromsentence.py
|
8f999fa08ebe4422ec2aa039219a1c39d6f59364
|
[] |
no_license
|
zubairabid/hybrid-mt
|
fc1fcb8c7b8e4837d1a8b383b9e0f6766cb32073
|
242e0588d2f6b694e5bc9b50b036e5a1c48c7a20
|
refs/heads/master
| 2022-06-10T14:15:46.959808
| 2020-05-06T02:24:28
| 2020-05-06T02:24:28
| 255,111,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pickle
# In[2]:
def load_stopwords(path_to_stopwords):
stopwords = []
with open(path_to_stopwords, 'rb') as f:
stopwords = pickle.load(f)
return stopwords
# In[3]:
def load_index_from_word(path_to_en2id):
en2id = {}
with open(path_to_en2id, 'rb') as f:
en2id = pickle.load(f)
return en2id
# In[4]:
def load_lookup_table(path_to_lookup_table):
lookup_table = []
with open(path_to_lookup_table, 'rb') as f:
lookup_table = pickle.load(f)
return lookup_table
# In[5]:
def preprocess(sentences):
processed_sentences = []
for sentence in sentences:
processed_sentences.append(sentence.lower())
return processed_sentences
# In[6]:
def topics_from_dataset(sentences):
print("Generating topics and weights for dataset")
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(vectorizer.fit_transform(sentences))
topics = vectorizer.get_feature_names()
weights = tfidf.toarray()
return topics, weights
# In[7]:
def sentence_remove_stopwords(sentence, stopwords):
filtered_words = []
reduced_sentence = ''
wordlist = sentence.strip().split(' ')
for word in wordlist:
if word not in stopwords:
filtered_words.append(word)
reduced_sentence = ' '.join(filtered_words)
return reduced_sentence
# In[8]:
def topics_from_sentence(sentence_id, sentence, weights, topics):
top_topics = []
sentence_topics = []
weight = weights[sentence_id]
location = np.argsort(-weight)
limit = min(10, len(weight))
for i in range(limit):
if weight[location[i]] > 0.0:
top_topics.append(topics[location[i]])
for word in sentence.split():
if word.lower() in top_topics:
sentence_topics.append(word)
return sentence_topics
# In[9]:
def images_from_topics(sentence_topics, stopwords, en2id, lookup_table):
imagelist = []
for topic in sentence_topics:
if topic in en2id.keys() and not topic in stopwords:
if en2id[topic] in lookup_table:
#print('<', topic, '> is in lookup table')
#print(topic, lookup_table[en2id[topic]])
for image in lookup_table[en2id[topic]]:
if image > 0.0 and not image in imagelist:
imagelist.append(image)
else:
pass
#print('>', topic, '< not in lookup table')
else:
if topic not in en2id.keys():
pass
#print('|', topic, '| not in dictionary')
return imagelist
# In[10]:
def get_features(sentences, cap):
path_to_en2id = 'en2id.pkl'
path_to_stopwords = 'stopwords-en.pkl'
path_to_lookup_table = 'cap2image_en2fr.pickle'
sentences = preprocess(sentences)
images_for_sentence = []
en2id = load_index_from_word(path_to_en2id)
stopwords = load_stopwords(path_to_stopwords)
lookup_table = load_lookup_table(path_to_lookup_table)
topics, weights = topics_from_dataset(sentences)
for sentence_id, sentence in enumerate(sentences):
sentence_topics = topics_from_sentence(sentence_id, sentence, weights, topics)
imagelist = images_from_topics(sentence_topics, stopwords, en2id, lookup_table)
if not imagelist:
imagelist=[0]
images_for_sentence.append(imagelist)
feature_index = np.load('./data/train-resnet50-avgpool.npy')
batch_sentence_features = []
for i, dummy in enumerate(sentences):
sentence = sentences[i]
images = images_for_sentence[i]
sentence_features = []
for image in images:
image_feature = feature_index[image-1]
sentence_features.append(image_feature)
if len(sentence_features) > cap:
sentence_features = sentence_features[:cap]
elif len(sentence_features) < cap:
for j in range(cap-len(sentence_features)):
sentence_features.append(np.zeros((2048,), dtype=float ))
batch_sentence_features.append(sentence_features)
pt = np.array(batch_sentence_features)
return pt
|
[
"zubairabid1999+github@gmail.com"
] |
zubairabid1999+github@gmail.com
|
7302e33689ecf7a8f5a508a4ca323c6c352a2fa7
|
69bcc45028038351a7f891025df1f8e7d4b855f1
|
/supervised_learning/0x04-error_analysis/2-precision.py
|
bbbb8199ffd49f17a12649012c3a6b52dc6fe389
|
[] |
no_license
|
linkjavier/holbertonschool-machine_learning
|
6db799844821d450fed2a33a8819cb8df0fef911
|
c7b6ea4c37b7c5dc41e63cdb8142b3cdfb3e1d23
|
refs/heads/main
| 2023-08-17T21:00:24.182003
| 2021-09-09T05:47:06
| 2021-09-09T05:47:06
| 304,503,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
#!/usr/bin/env python3
""" Precision Module """
import numpy as np
def precision(confusion):
""" Function that calculates the sensitivity for each
class in a confussion matrix
"""
classes, _ = confusion.shape
classPrecision = np.zeros(classes)
for classItem in range(classes):
classPrecision[classItem] = np.divide(
confusion[classItem][classItem], np.sum(confusion[:, classItem]))
return classPrecision
|
[
"linkjavier@hotmail.com"
] |
linkjavier@hotmail.com
|
838c922568619e9c765ef08d146d4c44efb7f403
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/testing/web-platform/tests/tools/third_party/pytest/src/_pytest/_io/terminalwriter.py
|
578b4507e30a12c62aeff72ba90fca207718f8ec
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853
| 2023-08-01T00:40:39
| 2023-08-01T00:40:39
| 211,297,481
| 0
| 0
|
NOASSERTION
| 2019-09-29T01:27:49
| 2019-09-27T10:44:24
|
C++
|
UTF-8
|
Python
| false
| false
| 6,906
|
py
|
"""Helper functions for writing to terminals and files."""
import os
import shutil
import sys
from typing import Optional
from typing import Sequence
from typing import TextIO
from .wcwidth import wcswidth
from _pytest.compat import final
def get_terminal_width() -> int:
width, _ = shutil.get_terminal_size(fallback=(80, 24))
if width < 40:
width = 80
return width
def should_do_markup(file: TextIO) -> bool:
if os.environ.get("PY_COLORS") == "1":
return True
if os.environ.get("PY_COLORS") == "0":
return False
if "NO_COLOR" in os.environ:
return False
if "FORCE_COLOR" in os.environ:
return True
return (
hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb"
)
@final
class TerminalWriter:
_esctable = dict(
black=30,
red=31,
green=32,
yellow=33,
blue=34,
purple=35,
cyan=36,
white=37,
Black=40,
Red=41,
Green=42,
Yellow=43,
Blue=44,
Purple=45,
Cyan=46,
White=47,
bold=1,
light=2,
blink=5,
invert=7,
)
def __init__(self, file: Optional[TextIO] = None) -> None:
if file is None:
file = sys.stdout
if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32":
try:
import colorama
except ImportError:
pass
else:
file = colorama.AnsiToWin32(file).stream
assert file is not None
self._file = file
self.hasmarkup = should_do_markup(file)
self._current_line = ""
self._terminal_width: Optional[int] = None
self.code_highlight = True
@property
def fullwidth(self) -> int:
if self._terminal_width is not None:
return self._terminal_width
return get_terminal_width()
@fullwidth.setter
def fullwidth(self, value: int) -> None:
self._terminal_width = value
@property
def width_of_current_line(self) -> int:
"""Return an estimate of the width so far in the current line."""
return wcswidth(self._current_line)
def markup(self, text: str, **markup: bool) -> str:
for name in markup:
if name not in self._esctable:
raise ValueError(f"unknown markup: {name!r}")
if self.hasmarkup:
esc = [self._esctable[name] for name, on in markup.items() if on]
if esc:
text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m"
return text
def sep(
self,
sepchar: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
**markup: bool,
) -> None:
if fullwidth is None:
fullwidth = self.fullwidth
if sys.platform == "win32":
fullwidth -= 1
if title is not None:
N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1)
fill = sepchar * N
line = f"{fill} {title} {fill}"
else:
line = sepchar * (fullwidth // len(sepchar))
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **markup)
def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None:
if msg:
current_line = msg.rsplit("\n", 1)[-1]
if "\n" in msg:
self._current_line = current_line
else:
self._current_line += current_line
msg = self.markup(msg, **markup)
try:
self._file.write(msg)
except UnicodeEncodeError:
msg = msg.encode("unicode-escape").decode("ascii")
self._file.write(msg)
if flush:
self.flush()
def line(self, s: str = "", **markup: bool) -> None:
self.write(s, **markup)
self.write("\n")
def flush(self) -> None:
self._file.flush()
def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None:
"""Write lines of source code possibly highlighted.
Keeping this private for now because the API is clunky. We should discuss how
to evolve the terminal writer so we can have more precise color support, for example
being able to write part of a line in one color and the rest in another, and so on.
"""
if indents and len(indents) != len(lines):
raise ValueError(
"indents size ({}) should have same size as lines ({})".format(
len(indents), len(lines)
)
)
if not indents:
indents = [""] * len(lines)
source = "\n".join(lines)
new_lines = self._highlight(source).splitlines()
for indent, new_line in zip(indents, new_lines):
self.line(indent + new_line)
def _highlight(self, source: str) -> str:
"""Highlight the given source code if we have markup support."""
from _pytest.config.exceptions import UsageError
if not self.hasmarkup or not self.code_highlight:
return source
try:
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.python import PythonLexer
from pygments import highlight
import pygments.util
except ImportError:
return source
else:
try:
highlighted: str = highlight(
source,
PythonLexer(),
TerminalFormatter(
bg=os.getenv("PYTEST_THEME_MODE", "dark"),
style=os.getenv("PYTEST_THEME"),
),
)
return highlighted
except pygments.util.ClassNotFound:
raise UsageError(
"PYTEST_THEME environment variable had an invalid value: '{}'. "
"Only valid pygment styles are allowed.".format(
os.getenv("PYTEST_THEME")
)
)
except pygments.util.OptionError:
raise UsageError(
"PYTEST_THEME_MODE environment variable had an invalid value: '{}'. "
"The only allowed values are 'dark' and 'light'.".format(
os.getenv("PYTEST_THEME_MODE")
)
)
|
[
"mcastelluccio@mozilla.com"
] |
mcastelluccio@mozilla.com
|
a7d0db784cf881f05dba47cc0b12b2e1fbbdb62d
|
d8913c1512146bb42756f61ba0872d73179884eb
|
/listinghouse/serializers.py
|
80ebe2255ddd5aae19eed9132ea1d96f83bea056
|
[
"MIT"
] |
permissive
|
sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack
|
2ed305c399edfab05ce3653e8bcaf36f09ae9015
|
10e31c4071bcebc0e4401f42084211d170b2ea56
|
refs/heads/main
| 2023-03-22T17:00:37.102265
| 2021-03-16T17:26:53
| 2021-03-16T17:26:53
| 319,297,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
from rest_framework import serializers
from .models import Listings
class ListingsSerializers(serializers.ModelSerializer):
class Meta:
model = Listings
fields = ('title', 'adress', 'city', 'state', 'price', 'house_type', 'sqft', 'open_house',
'sale_type', 'photo_main', 'bathrooms', 'bedrooms', 'photo_main', 'slug')
class ListingsDetailSerializers(serializers.ModelSerializer):
class Meta:
model = Listings
fields = '__all__'
lookup_field = 'slug'
|
[
"sahinmuratogur@gmail.com"
] |
sahinmuratogur@gmail.com
|
13fc739aab16ab38d7a30ba8b63752d9ac9fbcd2
|
ef9d0d7d305ed829ff3ef1c66869d80517eebfc0
|
/tfx/orchestration/portable/python_executor_operator_test.py
|
388a145199578452084c6774e4b14c14df31c77a
|
[
"Apache-2.0"
] |
permissive
|
Saiprasad16/tfx
|
22ee62ccef1ec4b6fbb4dfa1ece5d7f701918c94
|
c1e0704b2a83232469f55598efcdb7808b6c909f
|
refs/heads/master
| 2023-04-28T09:58:04.522405
| 2021-05-10T09:01:22
| 2021-05-10T09:02:37
| 366,007,194
| 1
| 0
|
Apache-2.0
| 2021-05-10T10:41:53
| 2021-05-10T10:40:57
| null |
UTF-8
|
Python
| false
| false
| 6,801
|
py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.python_executor_operator."""
import os
from typing import Any, Dict, List, Text
import tensorflow as tf
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import python_executor_operator
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from google.protobuf import text_format
class InprocessExecutor(base_executor.BaseExecutor):
"""A Fake in-process executor what returns execution result."""
def Do(
self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> execution_result_pb2.ExecutorOutput:
executor_output = execution_result_pb2.ExecutorOutput()
python_executor_operator._populate_output_artifact(executor_output,
output_dict)
return executor_output
class NotInprocessExecutor(base_executor.BaseExecutor):
"""A Fake not-in-process executor what writes execution result to executor_output_uri."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
executor_output = execution_result_pb2.ExecutorOutput()
python_executor_operator._populate_output_artifact(executor_output,
output_dict)
with fileio.open(self._context.executor_output_uri, 'wb') as f:
f.write(executor_output.SerializeToString())
class InplaceUpdateExecutor(base_executor.BaseExecutor):
"""A Fake noop executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
model = output_dict['output_key'][0]
model.name = 'my_model'
class PythonExecutorOperatorTest(test_case_utils.TfxTest):
def testRunExecutor_with_InprocessExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.InprocessExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {'key': 'value'}
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
executor_output = operator.run_executor(
data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri))
self.assertProtoPartiallyEquals(
"""
output_artifacts {
key: "output_key"
value {
artifacts {
}
}
}""", executor_output)
def testRunExecutor_with_NotInprocessExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.NotInprocessExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {'key': 'value'}
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
executor_output = operator.run_executor(
data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri))
self.assertProtoPartiallyEquals(
"""
output_artifacts {
key: "output_key"
value {
artifacts {
}
}
}""", executor_output)
def testRunExecutor_with_InplaceUpdateExecutor(self):
executor_sepc = text_format.Parse(
"""
class_path: "tfx.orchestration.portable.python_executor_operator_test.InplaceUpdateExecutor"
""", executable_spec_pb2.PythonClassExecutableSpec())
operator = python_executor_operator.PythonExecutorOperator(executor_sepc)
input_dict = {'input_key': [standard_artifacts.Examples()]}
output_dict = {'output_key': [standard_artifacts.Model()]}
exec_properties = {
'string': 'value',
'int': 1,
'float': 0.0,
# This should not happen on production and will be
# dropped.
'proto': execution_result_pb2.ExecutorOutput()
}
stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')
executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')
executor_output = operator.run_executor(
data_types.ExecutionInfo(
execution_id=1,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
stateful_working_dir=stateful_working_dir,
execution_output_uri=executor_output_uri))
self.assertProtoPartiallyEquals(
"""
output_artifacts {
key: "output_key"
value {
artifacts {
custom_properties {
key: "name"
value {
string_value: "my_model"
}
}
}
}
}""", executor_output)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
1897c30ae963a3b1a0dcb49603f6a9cdfbbf5117
|
2741c717fe58ac6bb78fdac2deb0c16f566590e5
|
/python-rest/myenv/bin/django-admin.py
|
0908c66abdc9f0422f98d550667abcd3fc6d2cad
|
[] |
no_license
|
guarav00009/Auth
|
bf72eed3cbec0675356a34aa0ae0cec021d0ab29
|
7ad60c5d8a74464413d89f74beefd2811acf983d
|
refs/heads/master
| 2020-12-05T12:11:29.193902
| 2020-01-06T14:04:09
| 2020-01-06T14:04:09
| 232,106,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
#!/var/www/html/python-rest/myenv/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"gauravp@clavax.com"
] |
gauravp@clavax.com
|
6c4eddee5209f157275e0c1c486d4a9000dd913a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/nextGreater_20200626113110.py
|
7ce3a916d9cbe406ef38804518f0fc5d2f6c8ae5
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
# [4,5,2,25]
def nextGreater(arr):
for i in range(len(arr)):
for j in range(i+1,len(arr)):
print('i -------->',arr[i])
print('j--->',arr[j])
next = -1
if arr[i]
nextGreater([4,5,2,25])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
488d37a9557bbaa9ff0e1776cd2ed117da779dae
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/paint_house_iii.py
|
051c5ec114bf5ae06f30f68479b4daa2fe5f9c4f
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111
| 2023-06-21T01:47:07
| 2023-06-21T01:47:07
| 135,307,912
| 0
| 0
| null | 2023-07-20T12:14:38
| 2018-05-29T14:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,763
|
py
|
# https://leetcode.com/problems/paint-house-iii/
"""
There is a row of m houses in a small city, each house must be painted with one of the n colors (labeled from 1 to n), some houses that have been painted last summer should not be painted again.
A neighborhood is a maximal group of continuous houses that are painted with the same color.
For example: houses = [1,2,2,3,3,2,1,1] contains 5 neighborhoods [{1}, {2,2}, {3,3}, {2}, {1,1}].
Given an array houses, an m x n matrix cost and an integer target where:
houses[i]: is the color of the house i, and 0 if the house is not painted yet.
cost[i][j]: is the cost of paint the house i with the color j + 1.
Return the minimum cost of painting all the remaining houses in such a way that there are exactly target neighborhoods. If it is not possible, return -1.
Example 1:
Input: houses = [0,0,0,0,0], cost = [[1,10],[10,1],[10,1],[1,10],[5,1]], m = 5, n = 2, target = 3
Output: 9
Explanation: Paint houses of this way [1,2,2,1,1]
This array contains target = 3 neighborhoods, [{1}, {2,2}, {1,1}].
Cost of paint all houses (1 + 1 + 1 + 1 + 5) = 9.
Example 2:
Input: houses = [0,2,1,2,0], cost = [[1,10],[10,1],[10,1],[1,10],[5,1]], m = 5, n = 2, target = 3
Output: 11
Explanation: Some houses are already painted, Paint the houses of this way [2,2,1,2,2]
This array contains target = 3 neighborhoods, [{2,2}, {1}, {2,2}].
Cost of paint the first and last house (10 + 1) = 11.
Example 3:
Input: houses = [3,1,2,3], cost = [[1,1,1],[1,1,1],[1,1,1],[1,1,1]], m = 4, n = 3, target = 3
Output: -1
Explanation: Houses are already painted with a total of 4 neighborhoods [{3},{1},{2},{3}] different of target = 3.
Constraints:
m == houses.length == cost.length
n == cost[i].length
1 <= m <= 100
1 <= n <= 20
1 <= target <= m
0 <= houses[i] <= n
1 <= cost[i][j] <= 104
"""
from functools import cache
from math import inf
# bottom up
def min_cost(
houses: list[int], cost: list[list[int]], m: int, n: int, target: int
) -> int:
# dp[k][i][c] := min cost to form k groups with first i houses and last house paint with c
dp = [
[[inf for _ in range(n + 1)] for _ in range(m + 1)] for _ in range(target + 1)
]
# init values: 0 groups with first 0 houses is dummy
for c in range(n + 1):
dp[0][0][c] = 0
for k in range(1, target + 1):
for i in range(k, m + 1):
hi = houses[i - 1]
hj = houses[i - 2] if i >= 2 else 0
si, ei = (hi, hi) if hi else (1, n)
sj, ej = (hj, hj) if hj else (1, n)
for ci in range(si, ei + 1):
v = 0 if ci == hi else cost[i - 1][ci - 1]
for cj in range(sj, ej + 1):
# when ci == cj: same group
# when ci != cj: form new group
dp[k][i][ci] = min(
dp[k][i][ci], dp[k - int(ci != cj)][i - 1][cj] + v
)
ans = min(dp[target][m])
return -1 if ans == inf else ans
# top down
def min_cost(
houses: list[int], cost: list[list[int]], m: int, n: int, target: int
) -> int:
@cache
def dp(i: int, p: int, h: int) -> int:
"""
Args:
i (int): index
p (int): previous color
h (int): neighborhoods
Returns:
int: cost
"""
if (h > target) or (i == m and h != target):
return inf
if i == m:
return 0
if houses[i] != 0:
return dp(i + 1, houses[i], h + int(p != houses[i]))
best = inf
for nxt_c, cst in enumerate(cost[i], start=1):
best = min(best, dp(i + 1, nxt_c, h + int(p != nxt_c)) + cst)
return best
res = dp(0, 0, 0)
return res if res != inf else -1
|
[
"tyivanwu@gmail.com"
] |
tyivanwu@gmail.com
|
492e82e716c69c6f83f8eabf883c64187f99db7a
|
118546c7bf7fe3063ed68e1c6270b33ed500c3c9
|
/python8/ex08.py
|
f223d0f2db2d8d7c1744afd39fd84c04f31a3fa4
|
[] |
no_license
|
yoonah95/Python_practice
|
83b1070f1c95d57a9ea81d2ec3898521f98544f4
|
1e8fbded66e789ba77b3af5499520b8e8e01a6a1
|
refs/heads/master
| 2022-06-12T20:55:38.490142
| 2020-05-08T02:20:20
| 2020-05-08T02:20:20
| 256,125,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
import sys
import StringIO
stdout = sys.stdout
sys.stdout = f = StringIO.StringIO()
print('Sample output')
print('good')
print('Good')
sys.stdout = stdout
s = f.getvalue()
print 'Done-------------'
print(s)
|
[
"yoon.a1@hanmail.net"
] |
yoon.a1@hanmail.net
|
056777178631514733ce8bcafa9806a3c76a691e
|
f352f9915c0b9d6f7ea010169f5dafd3a9fb8638
|
/lib/nltk/cluster/kmeans.py
|
7b6e14d16c9aef7425f6a48074a9d70471b5e92d
|
[] |
no_license
|
nltk/nltk.github.com
|
fa235e76788e6e8e7349e7195e61799c1402e61d
|
cf0d2aa508a1de9147ccf30bd070660651d55adb
|
refs/heads/master
| 2023-07-31T13:34:20.864897
| 2023-01-02T15:33:19
| 2023-01-02T15:33:19
| 2,686,706
| 34
| 41
| null | 2022-10-06T17:06:49
| 2011-11-01T09:59:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 8,592
|
py
|
# Natural Language Toolkit: K-Means Clusterer
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
import copy
import random
import sys
try:
import numpy
except ImportError:
pass
from nltk.cluster.util import VectorSpaceClusterer
class KMeansClusterer(VectorSpaceClusterer):
"""
The K-means clusterer starts with k arbitrary chosen means then allocates
each vector to the cluster with the closest mean. It then recalculates the
means of each cluster as the centroid of the vectors in the cluster. This
process repeats until the cluster memberships stabilise. This is a
hill-climbing algorithm which may converge to a local maximum. Hence the
clustering is often repeated with random initial means and the most
commonly occurring output means are chosen.
"""
def __init__(
self,
num_means,
distance,
repeats=1,
conv_test=1e-6,
initial_means=None,
normalise=False,
svd_dimensions=None,
rng=None,
avoid_empty_clusters=False,
):
"""
:param num_means: the number of means to use (may use fewer)
:type num_means: int
:param distance: measure of distance between two vectors
:type distance: function taking two vectors and returning a float
:param repeats: number of randomised clustering trials to use
:type repeats: int
:param conv_test: maximum variation in mean differences before
deemed convergent
:type conv_test: number
:param initial_means: set of k initial means
:type initial_means: sequence of vectors
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
:param rng: random number generator (or None)
:type rng: Random
:param avoid_empty_clusters: include current centroid in computation
of next one; avoids undefined behavior
when clusters become empty
:type avoid_empty_clusters: boolean
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._num_means = num_means
self._distance = distance
self._max_difference = conv_test
assert not initial_means or len(initial_means) == num_means
self._means = initial_means
assert repeats >= 1
assert not (initial_means and repeats > 1)
self._repeats = repeats
self._rng = rng if rng else random.Random()
self._avoid_empty_clusters = avoid_empty_clusters
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print("Warning: means will be discarded for subsequent trials")
meanss = []
for trial in range(self._repeats):
if trace:
print("k-means trial", trial)
if not self._means or trial > 1:
self._means = self._rng.sample(list(vectors), self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(key=sum)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference is None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
def _cluster_vectorspace(self, vectors, trace=False):
if self._num_means < len(vectors):
# perform k-means clustering
converged = False
while not converged:
# assign the tokens to clusters based on minimum distance to
# the cluster means
clusters = [[] for m in range(self._num_means)]
for vector in vectors:
index = self.classify_vectorspace(vector)
clusters[index].append(vector)
if trace:
print("iteration")
# for i in range(self._num_means):
# print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
# recalculate cluster means by computing the centroid of each cluster
new_means = list(map(self._centroid, clusters, self._means))
# measure the degree of change from the previous step for convergence
difference = self._sum_distances(self._means, new_means)
if difference < self._max_difference:
converged = True
# remember the new means
self._means = new_means
def classify_vectorspace(self, vector):
# finds the closest cluster centroid
# returns that cluster's index
best_distance = best_index = None
for index in range(len(self._means)):
mean = self._means[index]
dist = self._distance(vector, mean)
if best_distance is None or dist < best_distance:
best_index, best_distance = index, dist
return best_index
def num_clusters(self):
if self._means:
return len(self._means)
else:
return self._num_means
def means(self):
"""
The means used for clustering.
"""
return self._means
def _sum_distances(self, vectors1, vectors2):
difference = 0.0
for u, v in zip(vectors1, vectors2):
difference += self._distance(u, v)
return difference
def _centroid(self, cluster, mean):
if self._avoid_empty_clusters:
centroid = copy.copy(mean)
for vector in cluster:
centroid += vector
return centroid / (1 + len(cluster))
else:
if not len(cluster):
sys.stderr.write("Error: no centroid defined for empty cluster.\n")
sys.stderr.write(
"Try setting argument 'avoid_empty_clusters' to True\n"
)
assert False
centroid = copy.copy(cluster[0])
for vector in cluster[1:]:
centroid += vector
return centroid / len(cluster)
def __repr__(self):
return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats)
#################################################################################
def demo():
# example from figure 14.9, page 517, Manning and Schutze
from nltk.cluster import KMeansClusterer, euclidean_distance
vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
means = [[4, 3], [5, 5]]
clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means)
clusters = clusterer.cluster(vectors, True, trace=True)
print("Clustered:", vectors)
print("As:", clusters)
print("Means:", clusterer.means())
print()
vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# test k-means using the euclidean distance metric, 2 means and repeat
# clustering 10 times with random seeds
clusterer = KMeansClusterer(2, euclidean_distance, repeats=10)
clusters = clusterer.cluster(vectors, True)
print("Clustered:", vectors)
print("As:", clusters)
print("Means:", clusterer.means())
print()
# classify a new vector
vector = numpy.array([3, 3])
print("classify(%s):" % vector, end=" ")
print(clusterer.classify(vector))
print()
if __name__ == "__main__":
demo()
|
[
"stevenbird1@gmail.com"
] |
stevenbird1@gmail.com
|
9dc8de4d2758350b6b958b69e457c1d86f34e7aa
|
0eb599c3bbfa6e5b31516913b88cc9db3a1311ce
|
/AGC/agc041b.py
|
047f318a49e38b13fc421cfd7491200922169bae
|
[] |
no_license
|
Linus-MK/AtCoder
|
5b84dc88c2d2773d0f97ed18265d303290da7879
|
a587e89a9e0c2ab4d36b09176bcc95e901e14326
|
refs/heads/master
| 2022-11-25T05:37:12.148722
| 2022-11-17T16:04:10
| 2022-11-17T16:04:10
| 169,840,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
# うーん分からん
# ある特定の問題を入れたい人の気持ちになって考えた場合、トップにする必要はない
# 上位P問が使われるので、同率P位に滑り込めればOK
# 明らかに、ソートしても問題ない
# 投票して数字を増やすより減らしたほうが見通しが立てやすい気がするぞ
# (P問の値を1引き上げるのではなく、N-P問の値を1引き下げる、と考える。これでも同じ。)
# 降順にソートする。
# 「ある特定の問題」がk問目だとする。
# 上位(P-1)問は確定。P問目〜k-1問目をk問目と同じ数まで引き下げられたらOK。
# この引き下げが可能である条件は
# ・P問目の数 - k問目の数 <= 投票者の数M
# ・引き下げに必要な票数合計 <= 投票者の数M * 引き下げ票数(N-V)
# これが必要条件なのは分かるけど、十分性は……? この2つを満たせば必ず引き下げができるのか……?
# まぁ多分できそうな気がする
# 実際は毎回この判定をやると計算量が間に合わない
# 差分だけ調べて合計を更新する(累積和っぽい感じ)
n, m, v, p = list(map(int, input().split()))
vote = list(map(int, input().split()))
vote.sort(reverse=True)
target_score = vote[p-1] # P問目
ans = p # 最初p問は明らかに条件を満たす
vote_num_to_match = 0
# print(vote)
for i in range(p, n):
vote_num_to_match += (vote[i-1] - vote[i]) * (i-(p-1))
# print(vote_num_to_match)
if target_score - vote[i] <= m and vote_num_to_match <= m * (n-v):
ans += 1
else:
break
print(ans)
|
[
"13600386+Linus-MK@users.noreply.github.com"
] |
13600386+Linus-MK@users.noreply.github.com
|
192d9b5bc4efbf95468b5095c4c8a2857a41666c
|
0910e259a9bd252300f19b2ff22049d640f19b1a
|
/ml/m16_pipeline_RS4_boston.py
|
fddc26054c201ab70e936aa04c527f0903fdb23a
|
[] |
no_license
|
kimtaeuk-AI/Study
|
c7259a0ed1770f249b78f096ad853be7424a1c8e
|
bad5a0ea72a0117035b5e45652819a3f7206c66f
|
refs/heads/master
| 2023-05-05T12:34:52.471831
| 2021-05-22T16:16:12
| 2021-05-22T16:16:12
| 368,745,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
#전처리 하나와 모델을 합침
import numpy as np
import tensorflow as tf
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline, make_pipeline
import timeit
start_time = timeit.default_timer()
import warnings
warnings.filterwarnings('ignore')
dataset = load_boston()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=44)
# Pipeline은 전처리 + 모델해줘서 MinMaxScaler문 생략 가능
# from sklearn.preprocessing import MinMaxScaler
# scale = MinMaxScaler()
# scale.fit(x_train)
# x_train = scale.transform(x_train)
# x_test = scale.transform(x_test)
parameters = [
{"svc__C" :[1,10,100,1000], "svc__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"svc__C" :[1,10,100], "svc__kernel":["rbf"], "svc__gamma":[0.001, 0.0001]}, #3x2 6번
{"svc__C" :[1,10,100,1000], "svc__kernel":["sigmoid"],"svc__gamma":[0.001, 0.0001]}] #4x2 8번
parameters = [
{"mal__C" :[1,10,100,1000], "mal__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"mal__C" :[1,10,100], "mal__kernel":["rbf"], "mal__gamma":[0.001, 0.0001]}, #3x2 6번
{"mal__C" :[1,10,100,1000], "mal__kernel":["sigmoid"],"mal__gamma":[0.001, 0.0001]}] #4x2 8번
# 언더바 (_) 두개 써줘야한다
# 2. 모델
Pipe = Pipeline([('scale', MinMaxScaler()), ('mal', SVC())]) #SVC모델과 MinMax 를합친다 , 괄호 조심
# pipe = make_pipeline(StandardScaler(), SVC()) # 두가지 방법이 있다.
# Pipeline 써주는 이유 : 트레인만 하는게 효과적, cv만큼 스케일링, 과적합 방지, 모델에 적합해서 성능이 강화 .....
model = GridSearchCV(Pipe, parameters, cv=5)
model.fit(x_train, y_train)
results = model.score(x_test, y_test)
print('results : ', results)
# MinMaxScaler
# results : 0.9666666666666667
# StandardScaler
# results : 0.9666666666666667
|
[
"ki3123.93123@gmail.com"
] |
ki3123.93123@gmail.com
|
942af07212df99cf419268d0a99a758b26bcbd9b
|
262195faec1b59ff67067f2dc7e3eb7db8dba946
|
/src/follow.py
|
9a204856a7170ebd404ce32e362b91c1f6278f99
|
[
"MIT"
] |
permissive
|
sudeep0901/python
|
3a090ae2cd8a61e8e375cebb4722c051d2d766aa
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
refs/heads/master
| 2022-04-21T14:15:25.606241
| 2020-04-13T02:35:56
| 2020-04-13T02:35:56
| 155,167,294
| 0
| 0
|
MIT
| 2020-03-07T06:59:36
| 2018-10-29T07:08:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
'''
Created on Dec 7, 2018
@author: _patels13
'''
import time
def follow(thefile):
thefile.seek(0, 2)
while True:
line = thefile.readline()
print(line)
if not line:
time.sleep(0.1)
continue
yield line
# Example use
if __name__ == '__main__':
logfile = open("access-log.log")
print(logfile)
for line in follow(logfile):
print(line)
|
[
"sudeep.tech.patel@gmail.com"
] |
sudeep.tech.patel@gmail.com
|
635df689e111bfcf60068fb814cf2a224fc3dc42
|
893656022f3d70c0fc0cab0e864c2600885125bb
|
/setup.py
|
a8886101c13999b4ad5286f9573fc2235eeb0662
|
[
"MIT"
] |
permissive
|
iaxyzHpi/froide
|
85fdd421db71afcf3ca83e2b0760e8328cd1d4b1
|
fa159d352e77960f5ee696a1271509ced31785d1
|
refs/heads/master
| 2020-04-19T19:24:43.468542
| 2019-01-30T16:32:10
| 2019-01-30T16:32:10
| 168,387,397
| 0
| 0
| null | 2019-01-30T17:46:42
| 2019-01-30T17:46:42
| null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
#!/usr/bin/env python
import codecs
import re
import os
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="froide",
version=find_version("froide", "__init__.py"),
url='https://github.com/okfde/froide',
license='MIT',
description="German Freedom of Information Portal",
long_description=read('README.md'),
author='Stefan Wehrmeyer',
author_email='mail@stefanwehrmeyer.com',
packages=find_packages(),
scripts=['manage.py'],
install_requires=[
'Django',
'Markdown',
'celery',
'geoip2',
'django-elasticsearch-dsl',
'django-taggit',
'pytz',
'requests',
'python-magic',
'djangorestframework',
'djangorestframework-csv',
'djangorestframework-jsonp',
'python-mimeparse',
'django-configurations',
'django-storages',
'dj-database-url',
'django-cache-url',
'django-filter',
'phonenumbers',
'django-filingcabinet',
'icalendar',
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP'
]
)
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
62cbd89d4141d9d2c4b797326954958585d031c9
|
4c03def55433d8fa736c59a6a00f8e3b0ab4bbe4
|
/scripts/aws/availability.py
|
54e89598682f7c97efb203d12e1d1599799b7d17
|
[
"MIT"
] |
permissive
|
stanford-futuredata/training_on_a_dime
|
58c9884e9621db8c56c4a2d189b8079d9bf6bc65
|
85f659572ff9da2701e5f309fbad7e828e6be46b
|
refs/heads/master
| 2022-11-22T21:14:09.685491
| 2020-02-21T00:49:45
| 2020-07-27T19:51:18
| 242,011,847
| 5
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,444
|
py
|
import argparse
from datetime import datetime
import signal
import json
import subprocess
import sys
import time
instances = {}
instance_types = {
("v100", 1): "p3.2xlarge",
("v100", 4): "p3.8xlarge",
("v100", 8): "p3.16xlarge",
("k80", 1): "p2.xlarge",
("k80", 8): "p2.8xlarge",
("k80", 16): "p2.16xlarge",
}
def signal_handler(sig, frame):
global instances
# Clean up all instances when program is interrupted.
for (zone, gpu_type, num_gpus) in instances:
[instance_id, _] = instances[(zone, gpu_type, num_gpus)]
if instance_id is not None:
delete_spot_instance(zone, instance_id)
sys.exit(0)
def launch_spot_instance(zone, gpu_type, num_gpus, instance_id):
instance_type = instance_types[(gpu_type, num_gpus)]
with open("specification.json.template", 'r') as f1, open("specification.json", 'w') as f2:
template = f1.read()
specification_file = template % (instance_type, zone)
f2.write(specification_file)
command = """aws ec2 request-spot-instances --instance-count 1 --type one-time --launch-specification file://specification.json"""
try:
spot_instance_request_id = None
print("[%s] Trying to create instance with %d GPU(s) of type %s in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
num_gpus, gpu_type, zone), file=sys.stderr)
output = subprocess.check_output(command, shell=True).decode()
return_obj = json.loads(output)
spot_instance_request_id = return_obj["SpotInstanceRequests"][0]["SpotInstanceRequestId"]
command = """aws ec2 describe-spot-instance-requests --spot-instance-request-id %s""" % (
spot_instance_request_id)
time.sleep(30)
output = subprocess.check_output(command, shell=True).decode()
return_obj = json.loads(output)
instance_id = return_obj["SpotInstanceRequests"][0]["InstanceId"]
print("[%s] Created instance %s with %d GPU(s) of type %s in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
instance_id, num_gpus, gpu_type, zone))
return [instance_id, True]
except Exception as e:
pass
if spot_instance_request_id is not None:
command = """aws ec2 cancel-spot-instance-requests --spot-instance-request-ids %s""" % (
spot_instance_request_id)
subprocess.check_output(command, shell=True)
print("[%s] Instance with %d GPU(s) of type %s creation failed" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'), num_gpus, gpu_type))
return [None, False]
def monitor_spot_instance(zone, instance_id):
command = """aws ec2 describe-instances --instance-id %(instance_id)s""" % {
"instance_id": instance_id,
}
try:
output = subprocess.check_output(command, shell=True).decode()
if "running" in output:
print("[%s] Instance %s running in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
instance_id, zone))
return True
except Exception as e:
pass
print("[%s] Instance %s not running in zone %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'), instance_id, zone))
# Delete spot instance in case it exists.
delete_spot_instance(zone, instance_id)
return False
def delete_spot_instance(zone, instance_id):
command = """aws ec2 terminate-instances --instance-ids %(instance_id)s""" % {
"instance_id": instance_id,
}
try:
output = subprocess.check_output(command, shell=True)
print("[%s] Successfully deleted instance %s" % (
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'), instance_id))
except:
return
def main(args):
global instances
for zone in args.zones:
for gpu_type in args.gpu_types:
for num_gpus in args.all_num_gpus:
instances[(zone, gpu_type, num_gpus)] = [None, False]
while True:
# Spin in a loop; try to launch spot instances of particular type if
# not running already. Check on status of instances, and update to
# "not running" as needed.
for (zone, gpu_type, num_gpus) in instances:
[instance_id, running] = instances[(zone, gpu_type, num_gpus)]
if instance_id is not None:
running = \
monitor_spot_instance(zone, instance_id)
if not running:
[instance_id, running] = \
launch_spot_instance(zone, gpu_type, num_gpus, instance_id)
instances[(zone, gpu_type, num_gpus)] = [instance_id, running]
time.sleep(600)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Get AWS spot instance availability')
parser.add_argument('--zones', type=str, nargs='+',
default=["us-east-1b", "us-east-1c"],
help='AWS availability zones')
parser.add_argument('--gpu_types', type=str, nargs='+',
default=["v100", "k80"],
help='GPU types')
parser.add_argument('--all_num_gpus', type=int, nargs='+',
default=[1, 8],
help='Number of GPUs per instance')
args = parser.parse_args()
signal.signal(signal.SIGINT, signal_handler)
main(args)
|
[
"deepakn94@gmail.com"
] |
deepakn94@gmail.com
|
8f08bea2b00b6368d534a49cc1fb79cce05d5036
|
4bc696d97f9fec7e5ce136593556007a8b889d5f
|
/server/apps/reportAdmin/serializers.py
|
64305a4b89bca1e9f3e6cba439f38354d22ba3bd
|
[] |
no_license
|
davidhorst/FirstDjangular
|
37224a72ebd1e487b4b07755b06432a99f572eaf
|
5d18577f8d52e7e276c2c850d33f929de8e77ee6
|
refs/heads/master
| 2021-06-12T09:34:21.103774
| 2016-12-13T14:53:24
| 2016-12-13T14:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from rest_framework.serializers import ModelSerializer
from .models import Report
class ReportSerializer(ModelSerializer):
class Meta:
model = Report
|
[
"="
] |
=
|
a6aea563186b15e750ba2fdd61cbf03c3df667ad
|
14e7058adf766352a0b90b66b7dcf887105a481c
|
/portal/disciplines/forms.py
|
39c5085371b163ba74eb94f11e5700a74e0c2746
|
[
"BSD-2-Clause"
] |
permissive
|
brunogamacatao/portalsaladeaula
|
2b7f07f07c2518dd359f043483fbb27417f62aaf
|
9429e485aa37ffea3208339a807032e9230a3c84
|
refs/heads/master
| 2020-12-29T01:42:18.594281
| 2012-06-22T12:24:44
| 2012-06-22T12:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
__author__ = 'brunocatao'
from django import forms
from portal.models import Discipline
from django.utils.translation import ugettext as _
class DisciplineForm(forms.ModelForm):
class Meta:
model = Discipline
fields = ('name', 'acronym', 'description', 'feed_url', 'twitter_id',
'registration_type', 'access_type', )
|
[
"brunogamacatao@gmail.com"
] |
brunogamacatao@gmail.com
|
95bc7ec13f8bee30543025c5b1d8cd5d1232d287
|
1f63dde39fcc5f8be29f2acb947c41f1b6f1683e
|
/Boss2D/addon/opencv-3.1.0_for_boss/modules/python/test/tickets.py
|
de51e7aa16571d34a9c9ac96bbdc616da0f6650b
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
koobonil/Boss2D
|
09ca948823e0df5a5a53b64a10033c4f3665483a
|
e5eb355b57228a701495f2660f137bd05628c202
|
refs/heads/master
| 2022-10-20T09:02:51.341143
| 2019-07-18T02:13:44
| 2019-07-18T02:13:44
| 105,999,368
| 7
| 2
|
MIT
| 2022-10-04T23:31:12
| 2017-10-06T11:57:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,236
|
py
|
#!/usr/bin/env python
import unittest
import random
import time
import math
import sys
import array
import os
import cv2.cv as cv
def find_sample(s):
for d in ["../samples/c/", "../doc/pics/"]:
path = os.path.join(d, s)
if os.access(path, os.R_OK):
return path
return s
class TestTickets(unittest.TestCase):
def test_2542670(self):
xys = [(94, 121), (94, 122), (93, 123), (92, 123), (91, 124), (91, 125), (91, 126), (92, 127), (92, 128), (92, 129), (92, 130), (92, 131), (91, 132), (90, 131), (90, 130), (90, 131), (91, 132), (92, 133), (92, 134), (93, 135), (94, 136), (94, 137), (94, 138), (95, 139), (96, 140), (96, 141), (96, 142), (96, 143), (97, 144), (97, 145), (98, 146), (99, 146), (100, 146), (101, 146), (102, 146), (103, 146), (104, 146), (105, 146), (106, 146), (107, 146), (108, 146), (109, 146), (110, 146), (111, 146), (112, 146), (113, 146), (114, 146), (115, 146), (116, 146), (117, 146), (118, 146), (119, 146), (120, 146), (121, 146), (122, 146), (123, 146), (124, 146), (125, 146), (126, 146), (126, 145), (126, 144), (126, 143), (126, 142), (126, 141), (126, 140), (127, 139), (127, 138), (127, 137), (127, 136), (127, 135), (127, 134), (127, 133), (128, 132), (129, 132), (130, 131), (131, 130), (131, 129), (131, 128), (132, 127), (133, 126), (134, 125), (134, 124), (135, 123), (136, 122), (136, 121), (135, 121), (134, 121), (133, 121), (132, 121), (131, 121), (130, 121), (129, 121), (128, 121), (127, 121), (126, 121), (125, 121), (124, 121), (123, 121), (122, 121), (121, 121), (120, 121), (119, 121), (118, 121), (117, 121), (116, 121), (115, 121), (114, 121), (113, 121), (112, 121), (111, 121), (110, 121), (109, 121), (108, 121), (107, 121), (106, 121), (105, 121), (104, 121), (103, 121), (102, 121), (101, 121), (100, 121), (99, 121), (98, 121), (97, 121), (96, 121), (95, 121)]
#xys = xys[:12] + xys[16:]
pts = cv.CreateMat(len(xys), 1, cv.CV_32SC2)
for i,(x,y) in enumerate(xys):
pts[i,0] = (x, y)
storage = cv.CreateMemStorage()
hull = cv.ConvexHull2(pts, storage)
hullp = cv.ConvexHull2(pts, storage, return_points = 1)
defects = cv.ConvexityDefects(pts, hull, storage)
vis = cv.CreateImage((1000,1000), 8, 3)
x0 = min([x for (x,y) in xys]) - 10
x1 = max([x for (x,y) in xys]) + 10
y0 = min([y for (y,y) in xys]) - 10
y1 = max([y for (y,y) in xys]) + 10
def xform(pt):
x,y = pt
return (1000 * (x - x0) / (x1 - x0),
1000 * (y - y0) / (y1 - y0))
for d in defects[:2]:
cv.Zero(vis)
# First draw the defect as a red triangle
cv.FillConvexPoly(vis, [xform(p) for p in d[:3]], cv.RGB(255,0,0))
# Draw the convex hull as a thick green line
for a,b in zip(hullp, hullp[1:]):
cv.Line(vis, xform(a), xform(b), cv.RGB(0,128,0), 3)
# Draw the original contour as a white line
for a,b in zip(xys, xys[1:]):
cv.Line(vis, xform(a), xform(b), (255,255,255))
self.snap(vis)
def test_2686307(self):
lena = cv.LoadImage(find_sample("lena.jpg"), 1)
dst = cv.CreateImage((512,512), 8, 3)
cv.Set(dst, (128,192,255))
mask = cv.CreateImage((512,512), 8, 1)
cv.Zero(mask)
cv.Rectangle(mask, (10,10), (300,100), 255, -1)
cv.Copy(lena, dst, mask)
self.snapL([lena, dst, mask])
m = cv.CreateMat(480, 640, cv.CV_8UC1)
print "ji", m
print m.rows, m.cols, m.type, m.step
def snap(self, img):
self.snapL([img])
def snapL(self, L):
for i,img in enumerate(L):
cv.NamedWindow("snap-%d" % i, 1)
cv.ShowImage("snap-%d" % i, img)
cv.WaitKey()
cv.DestroyAllWindows()
if __name__ == '__main__':
random.seed(0)
if len(sys.argv) == 1:
suite = unittest.TestLoader().loadTestsFromTestCase(TestTickets)
unittest.TextTestRunner(verbosity=2).run(suite)
else:
suite = unittest.TestSuite()
suite.addTest(TestTickets(sys.argv[1]))
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"slacealic@gmail.com"
] |
slacealic@gmail.com
|
f8ce9dd28dcb500d0b0228e76c9a387e5584278c
|
cee11bed1fd868fc87ef113f6062440cd190a40c
|
/detect/model/backbone/MobilenetV2.py
|
95ec1e952dae01010670f264bbf36bfb9f651726
|
[] |
no_license
|
Peiiii/plate_detect_recongnize_tf_py3
|
f1d41270c7e6ed1718cb9d0d46784d8c83701439
|
39a04ef6475cdbaf8b4ff6e6f729e5b28b24daf1
|
refs/heads/master
| 2020-07-10T12:30:17.863818
| 2019-08-25T07:52:44
| 2019-08-25T07:52:44
| 204,263,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,977
|
py
|
# coding: utf-8
import tensorflow as tf
from ...model.layers import *
def MobilenetV2(input_data, training):
with tf.variable_scope('MobilenetV2'):
conv = convolutional(name='Conv', input_data=input_data, filters_shape=(3, 3, 3, 32),
training=training, downsample=True, activate=True, bn=True)
conv = inverted_residual(name='expanded_conv', input_data=conv, input_c=32, output_c=16,
training=training, t=1)
conv = inverted_residual(name='expanded_conv_1', input_data=conv, input_c=16, output_c=24, downsample=True,
training=training)
conv = inverted_residual(name='expanded_conv_2', input_data=conv, input_c=24, output_c=24, training=training)
conv = inverted_residual(name='expanded_conv_3', input_data=conv, input_c=24, output_c=32, downsample=True,
training=training)
conv = inverted_residual(name='expanded_conv_4', input_data=conv, input_c=32, output_c=32, training=training)
feature_map_s = inverted_residual(name='expanded_conv_5', input_data=conv, input_c=32, output_c=32,
training=training)
conv = inverted_residual(name='expanded_conv_6', input_data=feature_map_s, input_c=32, output_c=64,
downsample=True, training=training)
conv = inverted_residual(name='expanded_conv_7', input_data=conv, input_c=64, output_c=64, training=training)
conv = inverted_residual(name='expanded_conv_8', input_data=conv, input_c=64, output_c=64, training=training)
conv = inverted_residual(name='expanded_conv_9', input_data=conv, input_c=64, output_c=64, training=training)
conv = inverted_residual(name='expanded_conv_10', input_data=conv, input_c=64, output_c=96, training=training)
conv = inverted_residual(name='expanded_conv_11', input_data=conv, input_c=96, output_c=96, training=training)
feature_map_m = inverted_residual(name='expanded_conv_12', input_data=conv, input_c=96, output_c=96,
training=training)
conv = inverted_residual(name='expanded_conv_13', input_data=feature_map_m, input_c=96, output_c=160,
downsample=True, training=training)
conv = inverted_residual(name='expanded_conv_14', input_data=conv, input_c=160, output_c=160, training=training)
conv = inverted_residual(name='expanded_conv_15', input_data=conv, input_c=160, output_c=160, training=training)
conv = inverted_residual(name='expanded_conv_16', input_data=conv, input_c=160, output_c=320, training=training)
feature_map_l = convolutional(name='Conv_1', input_data=conv, filters_shape=(1, 1, 320, 1280),
training=training, downsample=False, activate=True, bn=True)
return feature_map_s, feature_map_m, feature_map_l
|
[
"1535376447@qq.com"
] |
1535376447@qq.com
|
491a906ed44c2bb3341d88b41d6bb070781fff0d
|
77717d0024c8597fec83600259ea5547abbc183a
|
/mmdet/apis/inference.py
|
2d4a987e2606fa6e324e8245d6eabe1877171244
|
[
"Apache-2.0"
] |
permissive
|
fengyouliang/wheat_detection
|
0a090ef5eda7f2c5463996f4795f9ce06dd04050
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
refs/heads/master
| 2022-11-17T15:09:29.113493
| 2020-07-18T13:47:34
| 2020-07-18T13:47:34
| 276,532,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
import warnings
import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmdet.ops import RoIAlign, RoIPool
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# Use torchvision ops for CPU mode instead
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
if not m.aligned:
# aligned=False is not implemented on CPU
# set use_torchvision on-the-fly
m.use_torchvision = True
warnings.warn('We set use_torchvision=True in CPU mode.')
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'][0].data
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
async def async_inference_detector(model, img):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
Awaitable detection results.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
result = await model.aforward_test(rescale=True, **data)
return result
def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.show()
|
[
"1654388696@qq.com"
] |
1654388696@qq.com
|
bfd6a2a39dc4018db6176fae203a4a0bded8c670
|
1a04e02811c844ecf53cc041b104667e5c987a09
|
/vgrabber/datalayer/serializer/test.py
|
4fe5c4679330e91f2240bb6902bee4213cce9703
|
[] |
no_license
|
janjanech/vzdelavanieGui
|
dff17add6e6946063597d4c1eba5d6d76b6f5374
|
b2015f41f7cb1be1ecccf1c4778a91f43f8fba12
|
refs/heads/master
| 2021-10-24T16:21:24.911817
| 2019-01-15T17:03:49
| 2019-01-15T17:03:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
from lxml.etree import Element
from vgrabber.model import Test
class TestSerializer:
__test: Test
def __init__(self, test):
self.__test = test
def serialize(self):
test_element = Element(
'test',
id=str(self.__test.id),
name=self.__test.name,
moodleid=str(self.__test.moodle_id)
)
return test_element
|
[
"janik@janik.ws"
] |
janik@janik.ws
|
0be06179167ae3177f62d6f0f00b960ebd3eacda
|
1a6919459bd4619bfef7527bc9c49ced3901e483
|
/tests/test_permissions_sql.py
|
a146e330aa370d7398c2e2786d0be3d5641e7cb2
|
[
"Apache-2.0"
] |
permissive
|
simonw/datasette-permissions-sql
|
870b1129b13377b812353183ba64e0bb69fa7339
|
e0103ea1c13389391a3e40241485df45739aa638
|
refs/heads/master
| 2022-10-09T19:47:28.383910
| 2020-06-12T07:03:35
| 2020-06-12T07:03:35
| 271,408,895
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,570
|
py
|
from datasette.app import Datasette
import httpx
import sqlite_utils
import pytest
def create_tables(conn):
db = sqlite_utils.Database(conn)
db["table_access"].insert_all(
[
{"user_id": 1, "database": "test", "table": "dogs"},
{"user_id": 2, "database": "test", "table": "dogs"},
{"user_id": 1, "database": "test", "table": "cats"},
]
)
db["cats"].insert({"name": "Casper"})
db["dogs"].insert({"name": "Cleo"})
db["other"].insert({"name": "Other"})
# user_id = 3 is banned from 'sqlite_master'
db["banned"].insert({"table": "other", "user_id": 3})
@pytest.fixture
async def ds(tmpdir):
filepath = tmpdir / "test.db"
ds = Datasette(
[filepath],
metadata={
"plugins": {
"datasette-permissions-sql": [
{
"action": "view-query",
"fallback": True,
"resource": ["test", "sqlite_master"],
"sql": """
SELECT
-1
FROM
banned
WHERE
user_id = :actor_id
""",
},
{
"action": "view-table",
"sql": """
SELECT
*
FROM
table_access
WHERE
user_id = :actor_id
AND "database" = :resource_1
AND "table" = :resource_2
""",
},
]
},
"databases": {
"test": {
"allow_sql": {},
"queries": {"sqlite_master": "select * from sqlite_master"},
}
},
},
)
await ds.get_database().execute_write_fn(create_tables, block=True)
return ds
@pytest.mark.asyncio
async def test_ds_fixture(ds):
assert {"table_access", "cats", "dogs", "banned", "other"} == set(
await ds.get_database().table_names()
)
@pytest.mark.parametrize(
"actor,table,expected_status",
[
(None, "dogs", 403),
(None, "cats", 403),
({"id": 1}, "dogs", 200),
({"id": 2}, "dogs", 200),
({"id": 1}, "cats", 200),
({"id": 2}, "cats", 403),
],
)
@pytest.mark.asyncio
async def test_permissions_sql(ds, actor, table, expected_status):
async with httpx.AsyncClient(app=ds.app()) as client:
cookies = {}
if actor:
cookies = {"ds_actor": ds.sign({"a": actor}, "actor")}
response = await client.get(
"http://localhost/test/{}".format(table), cookies=cookies
)
assert expected_status == response.status_code
@pytest.mark.parametrize(
"actor,expected_status", [(None, 200), ({"id": 1}, 200), ({"id": 3}, 403),],
)
@pytest.mark.asyncio
async def test_fallback(ds, actor, expected_status):
async with httpx.AsyncClient(app=ds.app()) as client:
cookies = {}
if actor:
cookies = {"ds_actor": ds.sign({"a": actor}, "actor")}
response = await client.get(
"http://localhost/test/sqlite_master", cookies=cookies
)
assert expected_status == response.status_code
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
c9be03f3e886d8a684bee9b8789e37ca03bdd523
|
4720b2f296b21b60836510d1fe997d58026ff573
|
/remo/remozilla/admin.py
|
dc3cf6f8678f1beed6df00e8c775b45a04ceca17
|
[] |
no_license
|
seocam/remo
|
9bc9b9e52bfdbef87a5c333e4f4f2be14630ccba
|
879cbbb0132f12dff64dfbd4ed118d0f5169615f
|
refs/heads/master
| 2021-01-15T13:06:39.844096
| 2014-05-13T10:23:06
| 2014-05-13T10:23:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from django.contrib import admin
from remo.remozilla.models import Bug, Status
class BugAdmin(admin.ModelAdmin):
"""Bug Admin."""
list_display = ('__unicode__', 'summary', 'status', 'resolution')
list_filter = ('status', 'resolution', 'council_vote_requested')
search_fields = ('bug_id', )
admin.site.register(Bug, BugAdmin)
admin.site.register(Status)
|
[
"giorgos@mozilla.com"
] |
giorgos@mozilla.com
|
ce2abc1434b7dcd4474ff498d805bd178c9cf4cc
|
72ea8dbdbd68813156b76c077edb5a3806bf42ab
|
/synapse/lib/scrape.py
|
61310192069525156af418567b158186df1f3042
|
[
"Apache-2.0"
] |
permissive
|
williballenthin/synapse
|
5c6f197f5a3cb3566c48dc444770592e89d4152a
|
799854da814b79d6631e5cc2796c347bf4a80ce7
|
refs/heads/master
| 2020-12-24T14:19:12.530026
| 2017-03-16T20:30:38
| 2017-03-16T20:30:38
| 41,521,212
| 2
| 0
| null | 2015-08-28T02:01:50
| 2015-08-28T02:01:50
| null |
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
import re
import synapse.data as s_data
import synapse.cortex as s_cortex
import synapse.lib.datfile as s_datfile
from synapse.common import *
tldlist = list(s_data.get('iana.tlds'))
tldlist.sort(key=lambda x: len(x))
tldlist.reverse()
tldcat = '|'.join(tldlist)
fqdn_re = r'((?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))' % tldcat
scrape_types = [
('hash:md5', r'(?=(?:[^A-Za-z0-9]|^)([A-Fa-f0-9]{32})(?:[^A-Za-z0-9]|$))',{}),
('hash:sha1', r'(?=(?:[^A-Za-z0-9]|^)([A-Fa-f0-9]{40})(?:[^A-Za-z0-9]|$))',{}),
('hash:sha256', r'(?=(?:[^A-Za-z0-9]|^)([A-Fa-f0-9]{64})(?:[^A-Za-z0-9]|$))',{}),
('inet:url', r'\w+://[^ \'"\t\n\r\f\v]+',{}),
('inet:ipv4', r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)',{}),
('inet:tcp4', r'((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):[0-9]{1,5})',{}),
('inet:fqdn', r'(?:[^a-z0-9_.-]|^)((?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))(?:[^a-z0-9_.-]|$)' % tldcat, {}),
('inet:email', r'(?:[^a-z0-9_.+-]|^)([a-z0-9_\.\-+]{1,256}@(?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))(?:[^a-z0-9_.-]|$)' % tldcat, {} ),
]
regexes = { name:re.compile(rule,re.IGNORECASE) for (name,rule,opts) in scrape_types }
def scrape(text, data=None):
'''
Scrape types from a blob of text and return an ingest compatible dict.
'''
if data == None:
data = {}
for ptype,rule,info in scrape_types:
regx = regexes.get(ptype)
for valu in regx.findall(text):
yield (ptype,valu)
def getsync(text, tags=()):
ret = []
core = s_cortex.openurl('ram://')
with s_cortex.openurl('ram://'):
core.setConfOpt('enforce',1)
core.on('core:sync', ret.append)
for form,valu in scrape(text):
tufo = core.formTufoByFrob(form,valu)
for tag in tags:
core.addTufoTag(tufo,tag)
return ret
if __name__ == '__main__':
import sys
data = {}
for path in sys.argv[1:]:
byts = reqbytes(path)
text = byts.decode('utf8')
data = scrape(text,data=data)
#FIXME options for taging all / tagging forms / form props
print( json.dumps( {'format':'syn','data':data} ) )
#
#print( repr( data ) )
#def scanForEmailAddresses(txt):
#return [ m[0] for m in email_regex.findall(txt) ]
|
[
"invisigoth.kenshoto@gmail.com"
] |
invisigoth.kenshoto@gmail.com
|
cd852578580f51734828c8130405eaf66f147395
|
697af415566ba649502bd18751a6521ac526892c
|
/get_er2_mvis.py
|
27bdde56be929ded3f2d22aebef00f4cf2ef64c6
|
[] |
no_license
|
srbrodzik/impacts-scripts
|
df44c8f34746499b8397b5b1a4ad09859b4cc8d4
|
263c7545bbb912bbcea563a21d0619e5112b1788
|
refs/heads/master
| 2023-05-31T05:01:09.558641
| 2023-05-22T23:24:52
| 2023-05-22T23:24:52
| 215,638,568
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,561
|
py
|
#!/usr/bin/python3
# Inconsistent naming of daily subdirectories after unzip. Sometimes HH, othertimes HHMM
import os
import sys
import shutil
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from ftplib import FTP
from zipfile import ZipFile
def listFD(url, ext=''):
page = requests.get(url).text
#print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
def getImageHHMM(path):
flist = os.listdir(path)
hhmmList = []
for file in flist:
(base,ext) = os.path.splitext(file)
# assumes base is YYYYMMDDhhmmss
hhmm = base[8:12]
if hhmm not in hhmmList:
hhmmList.append(hhmm)
return hhmmList
if len(sys.argv) != 2:
print('Usage: sys.argv[0] [YYYY-MM-DD]')
sys.exit()
else:
date = sys.argv[1]
# User inputs
debug = 1
file_ext = 'zip'
#url = 'https://asp-archive.arc.nasa.gov/IMPACTS/N809NA/video_2022/'+date+'/MVIS'
url = 'https://asp-archive.arc.nasa.gov/IMPACTS/N809NA/still-images_2022/'+date+'/MVIS'
tempDir = "/tmp"
targetDirBase = "/home/disk/bob/impacts/images/MVIS"
catPrefix = 'aircraft.NASA_ER2'
catSuffix = 'MVIS'
ftpCatalogServer = 'catalog.eol.ucar.edu'
ftpCatalogUser = 'anonymous'
catalogDestDir = '/pub/incoming/catalog/impacts'
# Create image directory, if needed
targetDir = targetDirBase+'/'+date.replace('-','')
if not os.path.exists(targetDir):
os.makedirs(targetDir)
# Get filelist from url
urlFlist = listFD(url, file_ext)
# Save first file every minute
os.chdir(targetDir)
for file in urlFlist:
command = 'wget '+file
os.system(command)
# naming convention is:
# IMPACTS-MVIS_ER2_2022010815_R0_still-images-jpeg.zip
fname = os.path.basename(file)
(proj,plane,dateHour,junk,suffix) = fname.split('_')
# ONE OR THE OTHER - DUE TO INCONSISTENT DIRECTORY NAMING CONVENTIONS
#time = dateHour[-2:]+'00'
time = dateHour[-2:]
try:
with ZipFile(fname, 'r') as zip:
zip.extractall()
os.remove(fname)
if os.path.exists('__MACOSX'):
shutil.rmtree('__MACOSX')
os.chdir(targetDir+'/'+time)
for imgFile in os.listdir():
print(imgFile)
if '_' in imgFile or os.path.getsize(imgFile) == 0:
print(' {} removed'.format(imgFile))
os.remove(targetDir+'/'+time+'/'+imgFile)
else:
(base,ext) = os.path.splitext(imgFile)
hhmm = base[8:12]
if hhmm not in getImageHHMM(targetDir):
shutil.move(targetDir+'/'+time+'/'+imgFile,
targetDir+'/'+imgFile)
else:
os.remove(targetDir+'/'+time+'/'+imgFile)
os.chdir(targetDir)
os.rmdir(time)
except:
print('Unable to unzip {}'.format(fname))
"""
# Open ftp connection
catalogFTP = FTP(ftpCatalogServer,ftpCatalogUser)
catalogFTP.cwd(catalogDestDir)
# Rename jpg files & upload to catalog
for file in os.listdir(targetDir):
print(file)
(imageTime,ext) = os.path.splitext(file)
imageTime = imageTime[:-2]
catName = catPrefix+'.'+imageTime+'.'+catSuffix+ext
shutil.copy(targetDir+'/'+file,
tempDir+'/'+catName)
ftpFile = open(tempDir+'/'+catName,'rb')
catalogFTP.storbinary('STOR '+catName,ftpFile)
ftpFile.close()
os.remove(tempDir+'/'+catName)
# Close ftp connection
catalogFTP.quit()
"""
|
[
"brodzik@uw.edu"
] |
brodzik@uw.edu
|
20591bf17f76442d97811693898a12de521db530
|
2cbf3aaad62f4922d827af658fb5dbb7ac651bef
|
/teledusite/teledu/admin/gameSystem.py
|
6ec61995a947768d4240c49b802f454dea506c89
|
[] |
no_license
|
tctimmeh/teledu
|
0266240aa864cd2eed75857e66eaeb8270f44c1a
|
04135ffb04f397f29152ca48f868a957b18d504a
|
refs/heads/master
| 2021-01-23T08:52:32.817693
| 2013-10-29T01:34:41
| 2013-10-29T01:34:41
| 2,566,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
from django.contrib import admin
from teledu.models import GameSystem
admin.site.register(GameSystem)
|
[
"tctimmeh@gmail.com"
] |
tctimmeh@gmail.com
|
e6371302f8591fb2a405866ff5192a52cc735e72
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_quadricepses.py
|
05b399424fc9c4dc2fe68ec222b1600fea37c16f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from xai.brain.wordbase.nouns._quadriceps import _QUADRICEPS
#calss header
class _QUADRICEPSES(_QUADRICEPS, ):
def __init__(self,):
_QUADRICEPS.__init__(self)
self.name = "QUADRICEPSES"
self.specie = 'nouns'
self.basic = "quadriceps"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4dd9fda777a418611e522466a3fcad13b7b456bf
|
080a6b7be74dc2d2fac61e0bb60a5402533294de
|
/week7/bc-ints-avg-float.py
|
3104214222f46aab04b7ba40ece6c87de394fb3c
|
[] |
no_license
|
rosmoke/DCU-Projects
|
cfec4c59ba00beb68d174cf869952b7a88e5c1dc
|
1478f476e1d81756d00a206b8f5bfcd0a1094649
|
refs/heads/master
| 2021-01-20T17:03:59.642966
| 2016-06-23T15:06:46
| 2016-06-23T15:06:46
| 61,814,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
i = 0
integer = raw_input()
total = 0.0
while integer != "end":
total = total + int(integer)
integer = raw_input()
i = i + 1
if i > 1:
print total / i
else:
print total
|
[
"danielasofiei@yahoo.ie"
] |
danielasofiei@yahoo.ie
|
6f7318386169ac167772e18f4034f3b8da28d5a7
|
f93fde3ad0c7f96710f8f8f8495adfa14484763b
|
/ld12/gene.py
|
d2678af23a96a69bc93a9d4e8569b51c75c8e227
|
[
"MIT"
] |
permissive
|
xapple/ld12
|
0f80b0b4fc353327779e3189d7152f110cc0cf78
|
e2dfc98beaec8d6dcecaec86fb7854ea5bb6f333
|
refs/heads/master
| 2021-01-10T01:52:50.282298
| 2016-04-04T19:19:34
| 2016-04-04T19:19:34
| 53,348,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# Built-in modules #
import re
# First party modules #
# Third party modules #
###############################################################################
class Gene(object):
"""A DNA sequence with an ID associated and belonging to a genome."""
def __repr__(self): return '<%s object %s>' % (self.__class__.__name__, self.name)
def __str__(self): return str(self.seq.seq)
def __len__(self): return len(self.seq)
def __init__(self, seq, genome):
self.seq = seq
self.name = seq.id
self.genome = genome
self.annotation = None # Filled in by the __init__.py
self.raw_hits = [] # Filled in by the duplications.py
self.best_tax = None # Filled in by the duplications.py
@property
def long_name(self):
"""A more descriptive name"""
return self.name + " (from " + self.genome.long_name + ")"
@property
def ribo_group(self):
"""If it is a ribosomal protein, what group is it part of ?"""
results = re.findall("ribosomal protein ([LS][1-9]+)", self.annotation)
if not results: return False
else: return results[0]
|
[
"lucas.sinclair@me.com"
] |
lucas.sinclair@me.com
|
06e0ddc21cdd990cd36cfa9d2d2fcbe3eddc2d2e
|
10d89b6e07a7c72c385eb1d1c60a3e0ed9f9fc3c
|
/boss/report/views/phone_fee.py
|
ead5501cffbbe306fc0cb441b004269ec0037dac
|
[] |
no_license
|
cash2one/pt
|
2a4998a6627cf1604fb64ea8ac62ff1c227f0296
|
8a8c12375610182747099e5e60e15f1a9bb3f953
|
refs/heads/master
| 2021-01-20T00:36:43.779028
| 2016-11-07T03:27:18
| 2016-11-07T03:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
#coding: utf-8
"""
服务质量追踪-充话费
"""
from report_pub import *
@login_required
@permission_required(u'man.%s' % ReportConst.PHONE_FEE, raise_exception=True)
@add_common_var
def phone_fee(request, template_name):
app = request.GET.get("app")
report_check_app(request, app)
vers = get_app_versions(app)
channels = get_app_channels(app)
operators = get_report_filters("gy_fee_prod_isptype")
provinces = get_report_filters("gy_fee_prod_province")
faces = get_report_filters("gy_fee_prod_content")
faces.sort(key=lambda a: int(a))
product_type = get_product_type(ReportConst.PHONE_FEE)
cps = get_cp_info(product_type)
return report_render(request, template_name,{
"currentdate": get_datestr(1, "%Y-%m-%d"),
"operators": operators,
"provinces": provinces,
"faces": faces,
"cps": cps,
"vers": vers,
"channels": channels
})
@login_required
@permission_required(u'man.%s' % ReportConst.PHONE_FEE, raise_exception=True)
def phone_fee_ajax(request):
start_date = request.POST.get("start_date")
end_date = request.POST.get("end_date")
app = request.POST.get("app")
report_check_app(request, app)
ver = request.POST.get("ver")
channel = request.POST.get("channel")
operator = request.POST.get("operator")
province = request.POST.get("province")
face = request.POST.get("face")
cp = request.POST.get("cp")
result = get_service_quality_data(start_date, end_date, app, ver, channel, operator, province, face, cp, ReportConst.PHONE_FEE)
return HttpResponse(json.dumps(result))
@login_required
@permission_required(u'man.%s' % ReportConst.PHONE_FEE, raise_exception=True)
def phone_fee_csv(request):
start_date = request.GET.get("start_date")
end_date = request.GET.get("end_date")
app = request.GET.get("app")
report_check_app(request, app)
ver = request.GET.get("ver")
channel = request.GET.get("channel")
operator = request.GET.get("operator")
province = request.GET.get("province")
face = request.GET.get("face")
cp = request.GET.get("cp")
filename = '%s-质量追踪(%s-%s-%s).csv' % (ReportConst.PHONE_FEE, str(get_app_name(app)), str(start_date), str(end_date))
csv_data = [["日期",
"总单数",
"成功数",
"失败数",
"失败率",
"1分钟到账数",
"1分钟到账率",
"3分钟到账数",
"3分钟到账率",
"10分钟到账数",
"10分钟到账率",
"30分钟到账数",
"30分钟到账率",
"30分钟以上到账数",
"30分钟以上到账率"]]
csv_data.extend(get_service_quality_data(start_date, end_date, app, ver, channel, operator, province, face, cp, ReportConst.PHONE_FEE))
return get_csv_response(filename, csv_data)
|
[
"xl@putao.cn"
] |
xl@putao.cn
|
2032fcdbc5f7bfd3980087825cefef8a1b0f3e7e
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/arneb-django-export/allPythonContent.py
|
c1f0cbe7c45766156c8d3fdd4513c94e9d1ed073
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693
| 2016-06-27T05:15:15
| 2016-06-27T05:15:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,667
|
py
|
__FILENAME__ = models
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import *
urlpatterns = patterns('export.views',
url(r'^database/$', 'export_database', {}, name="export_database"),
url(r'^database_s3/$', 'export_to_s3', {}, name="export_database_s3"),
url(r'^media/$', 'export_media', {}, name="export_mediaroot"),
url(r'^list_s3/$', 'list_s3', {}, name="export_list_s3"),
url(r'^$', 'export_index', {}, name="export_index"),
)
########NEW FILE########
__FILENAME__ = views
import os, time
from datetime import date
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.views.generic.simple import direct_to_template
from django.contrib.admin.views.decorators import staff_member_required
try:
import S3
except ImportError:
S3 = None
# default dump commands, you can overwrite these in your settings.
MYSQLDUMP_CMD = getattr(settings, 'MYSQLDUMP_CMD', '/usr/bin/mysqldump -h %s --opt --compact --skip-add-locks -u %s -p%s %s | bzip2 -c')
SQLITE3DUMP_CMD = getattr(settings, 'SQLITE3DUMP_CMD', 'echo ".dump" | /usr/bin/sqlite3 %s | bzip2 -c')
DISABLE_STREAMING = getattr(settings, 'DISABLE_STREAMING', False)
@staff_member_required
def export_database(request):
"""
Dump the database directly to the browser
"""
if request.method == 'POST':
if settings.DATABASE_ENGINE == 'mysql':
cmd = MYSQLDUMP_CMD % (settings.DATABASE_HOST, settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
elif settings.DATABASE_ENGINE == 'sqlite3':
cmd = SQLITE3DUMP_CMD % settings.DATABASE_NAME
else:
raise ImproperlyConfigured, "Sorry, django-export only supports mysql and sqlite3 database backends."
stdin, stdout = os.popen2(cmd)
stdin.close()
if DISABLE_STREAMING:
stdout = stdout.read()
response = HttpResponse(stdout, mimetype="application/octet-stream")
response['Content-Disposition'] = 'attachment; filename=%s' % date.today().__str__()+'_db.sql.bz2'
return response
return direct_to_template(request, 'export/export.html', {'what': _(u'Export Database')})
@staff_member_required
def export_media(request):
"""
Tar the MEDIA_ROOT and send it directly to the browser
"""
if request.method == 'POST':
stdin, stdout = os.popen2('tar -cf - %s' % settings.MEDIA_ROOT)
stdin.close()
if DISABLE_STREAMING:
stdout = stdout.read()
response = HttpResponse(stdout, mimetype="application/octet-stream")
response['Content-Disposition'] = 'attachment; filename=%s' % date.today().__str__()+'_media.tar'
return response
return direct_to_template(request, 'export/export.html', {'what': _(u'Export Media Root')})
@staff_member_required
def export_to_s3(request):
"""
Dump the database and upload the dump to Amazon S3
"""
if request.method == 'POST':
if settings.DATABASE_ENGINE == 'mysql':
cmd = MYSQLDUMP_CMD % (settings.DATABASE_HOST, settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
elif settings.DATABASE_ENGINE == 'sqlite3':
cmd = SQLITE3DUMP_CMD % settings.DATABASE_NAME
else:
raise ImproperlyConfigured, "Sorry, django-export only supports mysql and sqlite3 database backends."
stdin, stdout = os.popen2(cmd)
stdin.close()
file_name = 'dump_%s.sql.bz2' % time.strftime('%Y%m%d-%H%M')
conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
res = conn.put(settings.AWS_BUCKET_NAME, file_name, S3.S3Object(stdout.read()), {'Content-Type': 'application/x-bzip2',})
if res.http_response.status == 200:
request.user.message_set.create(message="%s" % _(u"%(filename)s saved on Amazon S3") % {'filename': file_name})
else:
request.user.message_set.create(message="%s" % _(u"Upload failed with %(status)s") % {'status': res.http_response.status})
stdout.close()
return HttpResponseRedirect('/admin/')
return direct_to_template(request, 'export/export.html', {'what': _(u'Export Database to S3'), 's3support': (S3 is not None), 's3': True})
@staff_member_required
def list_s3(request):
"""
List Amazon S3 bucket contents
"""
if S3 is not None:
conn = S3.AWSAuthConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
generator = S3.QueryStringAuthGenerator(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, calling_format=S3.CallingFormat.VANITY)
generator.set_expires_in(300)
bucket_entries = conn.list_bucket(settings.AWS_BUCKET_NAME).entries
entries = []
for entry in bucket_entries:
entry.s3url = generator.get(settings.AWS_BUCKET_NAME, entry.key)
entries.append(entry)
return direct_to_template(request, 'export/list_s3.html', {'object_list': entries, 's3support': True})
else:
return direct_to_template(request, 'export/list_s3.html', {'object_list': [], 's3support': False})
@staff_member_required
def export_index(request):
"""
List all available export views.
"""
return direct_to_template(request, 'export/index.html', {'s3support': (S3 is not None),})
########NEW FILE########
|
[
"dyangUCI@github.com"
] |
dyangUCI@github.com
|
c6fa6a2edb99f5bcef6bebbe9f0f17b78178e9aa
|
dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0
|
/src/main/scala/mock/25092020/ShortestPathInGridWithObstaclesElimination.py
|
d006e85a23d9c40ffbbddf03061da34dabd8a5b3
|
[] |
no_license
|
joestalker1/leetcode
|
8a5cdda17abd33c3eef859732f75d7bec77a9d0e
|
ae392ddbc7eb56cb814b9e9715043c98a89a6314
|
refs/heads/master
| 2023-04-13T22:09:54.407864
| 2023-04-09T19:22:54
| 2023-04-09T19:22:54
| 131,803,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
from heapq import heappop, heappush
class Solution:
def shortestPath(self, grid, k):
if not grid or not grid[0]:
return -1
n = len(grid)
m = len(grid[0])
q = [[0, 0, 0, 0]] # len, row,col, eliminated obstacles < k
seen = set()
seen.add((0,0))
while q:
d, r, c, elim = q.pop(0)
if r == n - 1 and c == m - 1:
return d
for dr, dc in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
nr = r + dr
nc = c + dc
if 0 <= nr < n and 0 <= nc < m:
# if (nr, nc) in seen:
# continue
if grid[nr][nc] == 0 or grid[nr][nc] == 1 and elim < k:
paths[nr][nc] = d + 1
q.append([d + 1, nr, nc, elim + 1 if grid[nr][nc] == 1 else elim])
return -1
sol = Solution()
# print(sol.shortestPath([[0, 0, 0],
# [1, 1, 0],
# [0, 0, 0],
# [0, 1, 1],
# [0, 0, 0]], 1))
print(sol.shortestPath([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]],
1))
|
[
"denys@dasera.com"
] |
denys@dasera.com
|
47f4c767c2a7b9c7065929c3f93e659bcb8be2b3
|
fd6525073b8bd0f9387ccd14b801fdb6bfecd374
|
/nur/path.py
|
260f0d7957dea351b7c4bc2b1ca3abc02dcda391
|
[
"MIT"
] |
permissive
|
demyanrogozhin/NUR
|
5a92757c52d28ff0bbe8684b4bf25fc8998bfc43
|
a7746bf35b2fda77e2cb7a3a1f22db3e4d21f399
|
refs/heads/master
| 2020-12-12T17:40:47.783164
| 2020-01-26T21:10:26
| 2020-01-26T21:38:16
| 234,187,682
| 1
| 0
|
MIT
| 2020-01-15T22:40:27
| 2020-01-15T22:40:27
| null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
import os
import subprocess
from pathlib import Path
from .error import NurError
def _is_repo(path: Path) -> bool:
return path.joinpath("lib/evalRepo.nix").exists()
def _find_root() -> Path:
source_root = Path(__file__).parent.parent.resolve()
if _is_repo(source_root):
# if it was not build with release.nix
return source_root
else:
root = Path(os.getcwd()).resolve()
while True:
if _is_repo(root):
return root
new_root = root.parent.resolve()
if new_root == root:
if _is_repo(new_root):
return new_root
else:
raise NurError("NUR repository not found in current directory")
ROOT = _find_root()
LOCK_PATH = ROOT.joinpath("repos.json.lock")
MANIFEST_PATH = ROOT.joinpath("repos.json")
EVALREPO_PATH = ROOT.joinpath("lib/evalRepo.nix")
_NIXPKGS_PATH = None
def nixpkgs_path() -> str:
global _NIXPKGS_PATH
if _NIXPKGS_PATH is not None:
return _NIXPKGS_PATH
cmd = ["nix-instantiate", "--find-file", "nixpkgs"]
path = subprocess.check_output(cmd).decode("utf-8").strip()
_NIXPKGS_PATH = str(Path(path).resolve())
return _NIXPKGS_PATH
|
[
"joerg@thalheim.io"
] |
joerg@thalheim.io
|
3b293693b6b6ec55a2f05ceb0a7ce642c441bdba
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/security/azure-mgmt-security/azure/mgmt/security/v2020_01_01/operations/_server_vulnerability_assessment_operations.py
|
a9f17636c842d926979789c6b493a74d6cec9412
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 27,727
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union, cast
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_extended_resource_request(
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url(
"subscription_id", subscription_id, "str", pattern=r"^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$"
),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"resourceNamespace": _SERIALIZER.url("resource_namespace", resource_namespace, "str"),
"resourceType": _SERIALIZER.url("resource_type", resource_type, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url(
"subscription_id", subscription_id, "str", pattern=r"^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$"
),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"resourceNamespace": _SERIALIZER.url("resource_namespace", resource_namespace, "str"),
"resourceType": _SERIALIZER.url("resource_type", resource_type, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
"serverVulnerabilityAssessment": _SERIALIZER.url(
"server_vulnerability_assessment", server_vulnerability_assessment, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url(
"subscription_id", subscription_id, "str", pattern=r"^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$"
),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"resourceNamespace": _SERIALIZER.url("resource_namespace", resource_namespace, "str"),
"resourceType": _SERIALIZER.url("resource_type", resource_type, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
"serverVulnerabilityAssessment": _SERIALIZER.url(
"server_vulnerability_assessment", server_vulnerability_assessment, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url(
"subscription_id", subscription_id, "str", pattern=r"^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$"
),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"resourceNamespace": _SERIALIZER.url("resource_namespace", resource_namespace, "str"),
"resourceType": _SERIALIZER.url("resource_type", resource_type, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
"serverVulnerabilityAssessment": _SERIALIZER.url(
"server_vulnerability_assessment", server_vulnerability_assessment, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class ServerVulnerabilityAssessmentOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2020_01_01.SecurityCenter`'s
:attr:`server_vulnerability_assessment` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_extended_resource(
self, resource_group_name: str, resource_namespace: str, resource_type: str, resource_name: str, **kwargs: Any
) -> _models.ServerVulnerabilityAssessmentsList:
"""Gets a list of server vulnerability assessment onboarding statuses on a given resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param resource_namespace: The Namespace of the resource. Required.
:type resource_namespace: str
:param resource_type: The type of the resource. Required.
:type resource_type: str
:param resource_name: Name of the resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerVulnerabilityAssessmentsList or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.ServerVulnerabilityAssessmentsList
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ServerVulnerabilityAssessmentsList]
request = build_list_by_extended_resource_request(
resource_group_name=resource_group_name,
resource_namespace=resource_namespace,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_extended_resource.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ServerVulnerabilityAssessmentsList", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_extended_resource.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, resource_namespace: str, resource_type: str, resource_name: str, **kwargs: Any
) -> _models.ServerVulnerabilityAssessment:
"""Gets a server vulnerability assessment onboarding statuses on a given resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param resource_namespace: The Namespace of the resource. Required.
:type resource_namespace: str
:param resource_type: The type of the resource. Required.
:type resource_type: str
:param resource_name: Name of the resource. Required.
:type resource_name: str
:keyword server_vulnerability_assessment: ServerVulnerabilityAssessment status. only a
'default' value is supported. Default value is "default". Note that overriding this default
value may result in unsupported behavior.
:paramtype server_vulnerability_assessment: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.ServerVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ServerVulnerabilityAssessment]
request = build_get_request(
resource_group_name=resource_group_name,
resource_namespace=resource_namespace,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
server_vulnerability_assessment=server_vulnerability_assessment,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ServerVulnerabilityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}"} # type: ignore
@distributed_trace
def create_or_update(
self, resource_group_name: str, resource_namespace: str, resource_type: str, resource_name: str, **kwargs: Any
) -> _models.ServerVulnerabilityAssessment:
"""Creating a server vulnerability assessment on a resource, which will onboard a resource for
having a vulnerability assessment on it.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param resource_namespace: The Namespace of the resource. Required.
:type resource_namespace: str
:param resource_type: The type of the resource. Required.
:type resource_type: str
:param resource_name: Name of the resource. Required.
:type resource_name: str
:keyword server_vulnerability_assessment: ServerVulnerabilityAssessment status. only a
'default' value is supported. Default value is "default". Note that overriding this default
value may result in unsupported behavior.
:paramtype server_vulnerability_assessment: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.ServerVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ServerVulnerabilityAssessment]
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_namespace=resource_namespace,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
server_vulnerability_assessment=server_vulnerability_assessment,
api_version=api_version,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ServerVulnerabilityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_namespace: str, resource_type: str, resource_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
resource_namespace=resource_namespace,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
server_vulnerability_assessment=server_vulnerability_assessment,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, resource_namespace: str, resource_type: str, resource_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Removing server vulnerability assessment from a resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param resource_namespace: The Namespace of the resource. Required.
:type resource_namespace: str
:param resource_type: The type of the resource. Required.
:type resource_type: str
:param resource_name: Name of the resource. Required.
:type resource_name: str
:keyword server_vulnerability_assessment: ServerVulnerabilityAssessment status. only a
'default' value is supported. Default value is "default". Note that overriding this default
value may result in unsupported behavior.
:paramtype server_vulnerability_assessment: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
server_vulnerability_assessment = kwargs.pop("server_vulnerability_assessment", "default") # type: str
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_namespace=resource_namespace,
resource_type=resource_type,
resource_name=resource_name,
server_vulnerability_assessment=server_vulnerability_assessment,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}"} # type: ignore
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
1c50b1cb0dad2f0bc854e38e79cd4a34774cb970
|
28dbe47aba287ed94ef7bba734203736bcc06249
|
/.history/dmac_20200715002741.py
|
56378030f386fb56196f8f2ccc62c0c32ea76ac5
|
[] |
no_license
|
ntung88/Trading_Algorithms
|
242fd816b19df95e02e9fcd8c5c91c862d2ede40
|
d96488b1754e3751f739d9c3f094a8f8dc54a0a9
|
refs/heads/master
| 2022-11-19T16:04:07.800344
| 2020-07-17T21:14:10
| 2020-07-17T21:14:10
| 276,239,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,471
|
py
|
import yfinance as yf
import numpy as np
import pandas as pd
from pandasgui import show
from scipy.optimize import minimize
import matplotlib.pyplot as plt
'''
A library for running Dual Moving Average Crossover trading strategy, with backtesting,
period optimization, and vizualization tools.
'''
#Period of time (in years) that we look back when optimizing in return calculation
HINDSIGHT = 2
def clean_data(data):
'''
Removes row (days) with no data from dataframe or series
'''
incomplete_idxs = False
if isinstance(data, pd.DataFrame):
for col in data.columns:
incomplete_idxs |= np.isnan(data[col])
else:
incomplete_idxs |= np.isnan(data)
return data[~incomplete_idxs]
def calc_crossovers(sma, lma):
'''
Returns a dataframe containing only the rows where a crossover of the sma and lma
is detected. 1 indicates a buy point (sma moving above lma), -1 a sell point
'''
num_points = len(clean_data(lma))
high = (sma > lma)[-num_points:]
crossovers = high.astype(int).diff()[1:]
trimmed = crossovers[crossovers != 0]
return trimmed
def profit(data, crossovers):
'''
Calculates profit assuming data covers a continuous time period with the given crossovers
'''
if len(crossovers) == 0:
return 0
total = 0
# If first crossover is a sell point assume implicit buy point at very start of data
print(crossovers.iloc[0])
if crossovers.iloc[0] == -1:
total += data.loc[crossovers.index[0]] - data.iloc[0]
# Add the difference between value at sell points and value at buy points to our profit
for i in range(1,len(crossovers)):
left_bound = crossovers.index[i-1]
if crossovers.loc[left_bound] == 1:
right_bound = crossovers.index[i]
total += data.loc[right_bound] - data.loc[left_bound]
# If last crossover is a buy point assume implicit sell point at end of data (include
# profit we have made on current holding)
if crossovers.iloc[-1] == 1:
total += data.iloc[-1] - data.loc[crossovers.index[-1]]
return total
def optimize(data):
'''
Uses scipy's convex minimization library to find optimal short period and long period
for moving averages. Because the profit certainly isn't a truly convex function I use a
wide range of seeds as initial guesses in hopes of detecting all the local minimums
and comparing them to get a good guess of the global min
'''
cons = ({'type': 'ineq', 'fun': lambda x: x[1] - x[0]},
{'type': 'ineq', 'fun': lambda x: x[0] - 5})
# Ranges of initial guesses for short and long periods
#30 and 40 step size for max accuracy, larger for faster runtime
short_seeds = range(5, 300, 50)
long_seeds = range(20, 800, 70)
# short_seeds = [100]
# long_seeds = [750]
minimum = float('inf')
best_short = 0
best_long = 0
for short_seed in short_seeds:
for long_seed in long_seeds:
# Use all combinations of ranges where long_seed > short_seed as initial guesses
if long_seed > short_seed:
res = minimize(run_analysis, [short_seed, long_seed], args=(data,), method='COBYLA', constraints=cons, options={'rhobeg': 10.0, 'catol': 0.0})
if res.fun < minimum:
best_short = res.x[0]
best_long = res.x[1]
minimum = res.fun
return (int(round(best_short)), int(round(best_long)), minimum)
def run_analysis(periods, data):
'''
Objective function for minimization, runs profit calculation with given periods and data
Returns negative profit for minimization (maximization of profit)
'''
short_period = int(round(periods[0]))
long_period = int(round(periods[1]))
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
print('sma')
print(sma)
print('lma')
print(lma)
crossovers = calc_crossovers(sma, lma)
return -1 * profit(data, crossovers)
def visualize(data, short_period, long_period):
'''
Useful for visualizing the algorithm's decisions. Plots the stock price with colored
vertical bars at buy and sell points
'''
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
buys = pd.DataFrame(crossovers[crossovers == 1.0])
sells = pd.DataFrame(crossovers[crossovers == -1.0])
data.plot(color='black')
for buy in buys.index:
plt.axvline(buy, color="green")
for sell in sells.index:
plt.axvline(sell, color="red")
plt.show()
def split_year(data):
'''
Split dataframe into a list of dataframes, each corresponding to the data for each year
'''
years = np.unique(data.index.year)
split = []
for year in years:
split.append(data[data.index.year == year])
return split
def calc_returns(split_data):
'''
Calculate annual returns for periods optimized over slices (of size HINDSIGHT) of past data. Gives an idea of what kind of results to realistically expect
'''
annual_returns = []
max_return = float('-inf')
min_return = float('inf')
for i in range(2, len(split_data)):
test_year = split_data[i]
optimize_period = pd.DataFrame(np.concatenate(split_data[i-HINDSIGHT:i]))
print('optimize period:')
print(optimize_period)
periods = optimize(optimize_period)
print('periods:')
print(periods)
profit = run_analysis(periods, test_year)
annual_returns.append(profit)
if profit > max_return: max_return = profit
if profit < min_return: min_return = profit
return annual_returns, max_return, min_return
def main():
'''
Main's current functionality: Find optimal windows for TSLA and print them, along with profit since 6/29/2010
'''
ticker = yf.Ticker('MRNA')
# data = yf.download(tickers, period='max', group_by='ticker')
data = ticker.history(period="max")[:-4]
dirty = pd.DataFrame(data)
#Currently using only closing prices
frame = clean_data(dirty)['Close']
periods = optimize(frame)
# periods = calc_returns(split_year(frame))
print(periods)
# visualize(frame, periods[0], periods[1])
if __name__ == "__main__":
main()
'''
how to quantify number of shares you want to buy (steepness of trend, volatility, top 20 stocks?)
'''
|
[
"nathantung@Nathans-MacBook-Pro.local"
] |
nathantung@Nathans-MacBook-Pro.local
|
e56fe457c611b069400b8d96e73af45fe2389bdb
|
4474fb478f27f9caa5e4c9c465369230daf2c3ac
|
/project 2/task2.py
|
856b3e4454d95c47243c65920022d9319d6751e3
|
[] |
no_license
|
hariprasath95/computer_vision_image_processing
|
0a6a8f107028c498ba47de23d0e744eb9f9b34a4
|
3dbc2a82911afec1238206495507447997a63a23
|
refs/heads/master
| 2020-03-30T05:09:23.556754
| 2018-12-03T09:56:32
| 2018-12-03T09:56:32
| 150,783,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,233
|
py
|
UBIT = 'hparthas'
import cv2
import numpy as np
from matplotlib import pyplot as plt
import random
np.random.seed(sum([ord(c) for c in UBIT]))
# read image 1 and convert to BW
m1_clr = cv2.imread('data/tsucuba_left.png')
image1_bw= cv2.cvtColor(m1_clr,cv2.COLOR_BGR2GRAY)
# read image 2 and convert to BW
m2_clr = cv2.imread('data/tsucuba_right.png')
image2_bw = cv2.cvtColor(m2_clr,cv2.COLOR_BGR2GRAY)
# Extract Sift features and compute Descriptors for image 1 and image 2
sift = cv2.xfeatures2d.SIFT_create()
keypoints_mountain1 ,m1_des= sift.detectAndCompute(image1_bw,None)
image1_withkp = cv2.drawKeypoints(m1_clr,keypoints_mountain1,None)
cv2.imwrite('output/task2/task2_sift1.jpg',image1_withkp)
keypoints_mountain2,m2_des = sift.detectAndCompute(image2_bw,None)
image2_withkp = cv2.drawKeypoints(m2_clr,keypoints_mountain2,None)
cv2.imwrite('output/task2/task2_sift2.jpg',image2_withkp)
def drawlines(img1,img2,lines,pts1,pts2,color):
r,c = (cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)).shape
i = 0
for r,pt1,pt2 in zip(lines,pts1,pts2):
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color[i],1)
img1 = cv2.circle(img2,tuple(pt1),5,color[i],-1)
i = i+1
return img1
pts1 = []
pts2 = []
bf = cv2.BFMatcher()
matches = bf.knnMatch(m1_des,m2_des, k=2)
for i,(m,n) in enumerate(matches):
pts2.append(keypoints_mountain2[m.trainIdx].pt)
pts1.append(keypoints_mountain1[m.queryIdx].pt)
fundamentalmat, mask = cv2.findFundamentalMat(np.array(pts1),np.array(pts2),cv2.FM_RANSAC)
print(fundamentalmat)
pts1 = np.array(pts1)[mask.ravel() == 1]
pts2 = np.array(pts2)[mask.ravel() == 1]
random_points = np.random.randint(0, len(pts1), 10)
selected_point1,selected_point2 = list(), list()
for i, (p1, p2) in enumerate(zip(pts1, pts1)):
if i in random_points:
selected_point1.append(p1)
selected_point2.append(p2)
selected_point1 = np.float32(selected_point1)
selected_point2 = np.float32(selected_point2)
colors = []
for i in range(0,10):
colors.append(tuple(np.random.randint(0,255,3).tolist()))
img1_lines = cv2.computeCorrespondEpilines(selected_point1.reshape(-1, 1, 2), 2, fundamentalmat)
img1_lines = img1_lines.reshape(-1, 3)
img1_lines1 = drawlines(m1_clr,m2_clr,img1_lines,selected_point1,selected_point2,colors)
img2_lines = cv2.computeCorrespondEpilines(selected_point2.reshape(-1, 1, 2), 2, fundamentalmat)
img2_lines = img1_lines.reshape(-1, 3)
img2_lines1 = drawlines(m2_clr,m1_clr,img2_lines,selected_point2,selected_point1,colors)
stereo = cv2.StereoBM_create(96, blockSize=17)
stereo.setMinDisparity(16)
stereo.setDisp12MaxDiff(0)
stereo.setUniquenessRatio(10)
stereo.setSpeckleRange(32)
stereo.setSpeckleWindowSize(100)
disparity_map = stereo.compute(image1_bw, image2_bw).astype(np.float32) / 16.0
disp_map = (disparity_map - 16)/96
# printing out all the output
plt.imsave('output/task2/task2_disparity.jpg', disp_map, cmap=plt.cm.gray)
cv2.imwrite('output/task2/task2_epi_right.jpg', img2_lines1)
cv2.imwrite('output/task2/task2_epi_left.jpg', img1_lines1)
cv2.imwrite("output/task2/merged.jpg", np.hstack([img2_lines1, img1_lines1]))
|
[
"-"
] |
-
|
569d9b2f6f80fca2a538781490709f78f5bb87c9
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/not/sample_good810.py
|
2d825235e439c825c7db724e4c12e9a012a6fdd9
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674
| 2020-05-05T08:37:16
| 2020-05-05T08:37:16
| 138,386,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
import difflib
import textwrap
import random
import readline
import datetime
nterms = 422
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 == True & 0 < 422:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count = count - -1
|
[
"barnsa@uni.coventry.ac.uk"
] |
barnsa@uni.coventry.ac.uk
|
a02f9b72024d40ecfced70d6044ca509b2e7e823
|
effce116340b7d937bd285e43b49e1ef83d56156
|
/data_files/673 Number of Longest Increasing Subsequence.py
|
324b988bc2e0f1fbbc1e5247543d6f56b9c932ca
|
[] |
no_license
|
DL2021Spring/CourseProject
|
a7c7ef57d69bc1b21e3303e737abb27bee3bd585
|
108cdd906e705e9d4d05640af32d34bfc8b124da
|
refs/heads/master
| 2023-04-11T18:52:30.562103
| 2021-05-18T09:59:59
| 2021-05-18T09:59:59
| 365,733,976
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
from typing import List
class LenCnt:
def __init__(self, l, c):
self.l = l
self.c = c
def __repr__(self):
return repr((self.l, self.c))
class Solution:
def findNumberOfLIS(self, A: List[int]) -> int:
if not A:
return 0
n = len(A)
F = [LenCnt(l=1, c=1) for _ in A]
mx = LenCnt(l=1, c=1)
for i in range(1, n):
for j in range(i):
if A[i] > A[j]:
if F[i].l < F[j].l + 1:
F[i].l = F[j].l + 1
F[i].c = F[j].c
elif F[i].l == F[j].l + 1:
F[i].c += F[j].c
if F[i].l > mx.l:
mx.l = F[i].l
mx.c = F[i].c
elif F[i].l == mx.l:
mx.c += F[i].c
return mx.c
if __name__ == "__main__":
assert Solution().findNumberOfLIS([1,1,1,2,2,2,3,3,3]) == 27
assert Solution().findNumberOfLIS([1, 3, 5, 4, 7]) == 2
assert Solution().findNumberOfLIS([2, 2, 2, 2, 2]) == 5
|
[
"1042448815@qq.com"
] |
1042448815@qq.com
|
c71847414fb17baa6000c236edad0ccc41ceef33
|
3fdad7e4cf4725e90354a674eddea4ec34f2344c
|
/myia/operations/macro_dtype.py
|
3a8c6fd0f5874ab38ee713d174cf5ba6a72c6c5f
|
[
"MIT"
] |
permissive
|
zangmunger/myia
|
1f2e9045af62da5a5d832eed0436de7c5813cd99
|
0aa38aa3c43648ee408dc031352ba442f6bed59f
|
refs/heads/master
| 2020-12-13T04:10:28.154027
| 2020-01-15T20:33:05
| 2020-01-15T20:33:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
"""Implementation of the 'dtype' macro."""
from ..lib import AbstractArray, Constant, macro
@macro
async def dtype(info, arr: AbstractArray):
"""Macro implementation for 'dtype'."""
return Constant((await arr.get()).element)
__operation_defaults__ = {
'name': 'dtype',
'registered_name': 'dtype',
'mapping': dtype,
'python_implementation': None,
}
|
[
"abergeron@gmail.com"
] |
abergeron@gmail.com
|
5bf4d8bfacc9a85401f649b40ed3654d51bcc16f
|
b231c58c841dfc3e90677ce15cb6fe246b56bac5
|
/backend/oloffers_24608/urls.py
|
c4b31a37d38eccfb3cdf67e3a3ddc7782d33c132
|
[] |
no_license
|
crowdbotics-apps/oloffers-24608
|
025688272bd3322ce8748160fbe89ebd0a8691d7
|
0900f3d64303e932aca702335024bfe398e68cd9
|
refs/heads/master
| 2023-03-19T04:33:32.594244
| 2021-02-18T22:08:13
| 2021-02-18T22:08:13
| 340,189,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
"""oloffers_24608 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "OLOffers"
admin.site.site_title = "OLOffers Admin Portal"
admin.site.index_title = "OLOffers Admin"
# swagger
api_info = openapi.Info(
title="OLOffers API",
default_version="v1",
description="API documentation for OLOffers App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
6674b616ab97613bb411dc42120f9e008a75d530
|
23ed45d816476bc2c73a0e0b0d6bf96c713c979d
|
/bearsong.py
|
45b321d8c2af89b38a3356ca648e9b1100f6e379
|
[] |
no_license
|
jason12360/zixue
|
e5fd0c6cd0ba8d4c72420360697ad23a5479615d
|
acc696732bb770f6fc0f0a8d4d076305ae39a9a6
|
refs/heads/master
| 2020-03-17T20:22:02.306897
| 2018-06-01T09:46:13
| 2018-06-01T09:46:13
| 133,906,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
word = 'bottles'
for beer_num in range(99,0,-1):
print(beer_num,word,'of beer on the wall.')
print(beer_num,word,'of beer.')
print('Take it down.')
print('Pass it around.')
if beer_num == 1:
print('No more bottle of beer on the wall')
else:
new_num = beer_num - 1
if new_num == 1:
word = 'bottle'
print(new_num,word,'of beer on the wall.')
|
[
"370828117@qq.com"
] |
370828117@qq.com
|
0c1bef08943f239f67d9037534080ced61668cfd
|
f6e83bc298b24bfec278683341b2629388b22e6c
|
/scripts/check_db_integrity.py
|
3a994897b46425ba6bb41e00d88a664abd1712c1
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
noaOrMlnx/sonic-utilities
|
8d8ee86a9c258b4a5f37af69359ce100c29ad99c
|
9881f3edaa136233456408190367a09e53386376
|
refs/heads/master
| 2022-08-17T23:15:57.577454
| 2022-05-18T21:49:32
| 2022-05-18T21:49:32
| 225,886,772
| 1
| 0
|
NOASSERTION
| 2022-07-19T08:49:40
| 2019-12-04T14:31:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
#!/usr/bin/env python3
"""
This is to verify if Database has critical tables present before warmboot can proceed.
If warmboot is allowed with missing critical tables, it can lead to issues in going
down path or during the recovery path. This test detects such issues before proceeding.
The verification procedure here uses JSON schemas to verify the DB entities.
In future, to verify new tables or their content, just the schema modification is needed.
No modification may be needed to the integrity check logic.
"""
import os, sys
import json, jsonschema
import syslog
import subprocess
import traceback
DB_SCHEMA = {
"COUNTERS_DB":
{
"$schema": "http://json-schema.org/draft-06/schema",
"type": "object",
"title": "Schema for COUNTERS DB's entities",
"required": ["COUNTERS_PORT_NAME_MAP"],
"properties": {
"COUNTERS_PORT_NAME_MAP": {"$id": "#/properties/COUNTERS_PORT_NAME_MAP", "type": "object"}
}
}
}
def main():
if not DB_SCHEMA:
return 0
for db_name, schema in DB_SCHEMA.items():
db_dump_file = "/tmp/{}.json".format(db_name)
dump_db_cmd = "sonic-db-dump -n 'COUNTERS_DB' -y > {}".format(db_dump_file)
p = subprocess.Popen(dump_db_cmd, shell=True, text=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(_, err) = p.communicate()
rc = p.wait()
if rc != 0:
print("Failed to dump db {}. Return code: {} with err: {}".format(db_name, rc, err))
try:
with open(db_dump_file) as fp:
db_dump_data = json.load(fp)
except ValueError as err:
syslog.syslog(syslog.LOG_DEBUG, "DB json file is not a valid json file. " +\
"Error: {}".format(str(err)))
return 1
# What: Validate if critical tables and entries are present in DB.
# Why: This is needed to avoid warmbooting with a bad DB; which can
# potentially trigger failures in the reboot recovery path.
# How: Validate DB against a schema which defines required tables.
try:
jsonschema.validate(instance=db_dump_data, schema=schema)
except jsonschema.exceptions.ValidationError as err:
syslog.syslog(syslog.LOG_ERR, "Database is missing tables/entries needed for reboot procedure. " +\
"DB integrity check failed with:\n{}".format(str(err.message)))
return 1
syslog.syslog(syslog.LOG_DEBUG, "Database integrity checks passed.")
return 0
if __name__ == '__main__':
res = 0
try:
res = main()
except KeyboardInterrupt:
syslog.syslog(syslog.LOG_NOTICE, "SIGINT received. Quitting")
res = 1
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc()))
res = 2
finally:
syslog.closelog()
try:
sys.exit(res)
except SystemExit:
os._exit(res)
|
[
"noreply@github.com"
] |
noaOrMlnx.noreply@github.com
|
bc67cc314ee03bbe982d2d0f9e6123cef9b45598
|
8550f21689b0549d899b738411d2f94e1e5cc7e2
|
/catkin_ws/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_CommandTriggerControl.py
|
2ecd8ccdd401bf391ec50284ebbee533562d2a0e
|
[] |
no_license
|
pratyusv/px4_data_collection
|
fb21c0e53b9b9c5b7d3d4f1db25589ca852561b9
|
cb45a180ff2c2b3cf7d9693c54997123dbfd46b5
|
refs/heads/master
| 2020-03-20T01:27:21.756306
| 2018-06-12T17:34:01
| 2018-06-12T17:34:01
| 137,076,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
/home/pratyush/git/px4_data_collection/catkin_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_CommandTriggerControl.py
|
[
"pratyushvarshney91@gmail.com"
] |
pratyushvarshney91@gmail.com
|
33b9a78047691b1cba997ab1118c3c51209a9d79
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/sentry/2016/8/mail.py
|
04a7cd3cd597f4687513a9b5f50d11221517a4a9
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 17,616
|
py
|
from __future__ import absolute_import, print_function
import itertools
import logging
import time
import traceback
import uuid
from datetime import datetime, timedelta
from random import Random
import six
from django.contrib.webdesign.lorem_ipsum import WORDS
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.views.generic import View
from sentry.constants import LOG_LEVELS
from sentry.digests import Record
from sentry.digests.notifications import Notification, build_digest
from sentry.digests.utilities import get_digest_metadata
from sentry.http import get_server_hostname
from sentry.models import (
Activity, Event, Group, GroupStatus, Organization, OrganizationMember,
Project, Release, Rule, Team
)
from sentry.plugins.sentry_mail.activity import emails
from sentry.utils.dates import to_datetime, to_timestamp
from sentry.utils.email import inline_css
from sentry.utils.http import absolute_uri
from sentry.utils.samples import load_data
from sentry.web.decorators import login_required
from sentry.web.helpers import render_to_response, render_to_string
logger = logging.getLogger(__name__)
def get_random(request):
seed = request.GET.get('seed', six.text_type(time.time()))
return Random(seed)
def make_message(random, length=None):
if length is None:
length = int(random.weibullvariate(8, 3))
return ' '.join(random.choice(WORDS) for _ in range(length))
def make_culprit(random):
def make_module_path_components(min, max):
for _ in range(random.randint(min, max)):
yield ''.join(random.sample(WORDS, random.randint(1, int(random.paretovariate(2.2)))))
return '{module} in {function}'.format(
module='.'.join(make_module_path_components(1, 4)),
function=random.choice(WORDS)
)
def make_group_metadata(random, group):
return {
'type': 'error',
'metadata': {
'type': '{}Error'.format(
''.join(word.title() for word in random.sample(WORDS, random.randint(1, 3))),
),
'value': make_message(random),
}
}
def make_group_generator(random, project):
epoch = to_timestamp(datetime(2016, 6, 1, 0, 0, 0, tzinfo=timezone.utc))
for id in itertools.count(1):
first_seen = epoch + random.randint(0, 60 * 60 * 24 * 30)
last_seen = random.randint(first_seen, first_seen + (60 * 60 * 24 * 30))
group = Group(
id=id,
project=project,
culprit=make_culprit(random),
level=random.choice(LOG_LEVELS.keys()),
message=make_message(random),
first_seen=to_datetime(first_seen),
last_seen=to_datetime(last_seen),
status=random.choice((
GroupStatus.UNRESOLVED,
GroupStatus.RESOLVED,
)),
)
if random.random() < 0.8:
group.data = make_group_metadata(random, group)
yield group
# TODO(dcramer): use https://github.com/disqus/django-mailviews
class MailPreview(object):
def __init__(self, html_template, text_template, context=None):
self.html_template = html_template
self.text_template = text_template
self.context = context if context is not None else {}
def text_body(self):
return render_to_string(self.text_template, self.context)
def html_body(self):
try:
return inline_css(render_to_string(self.html_template, self.context))
except Exception:
traceback.print_exc()
raise
def render(self, request):
return render_to_response('sentry/debug/mail/preview.html', {
'preview': self,
'format': request.GET.get('format'),
})
class ActivityMailPreview(object):
def __init__(self, activity):
self.email = emails.get(activity.type)(activity)
def get_context(self):
context = self.email.get_base_context()
context.update(self.email.get_context())
return context
def text_body(self):
return render_to_string(self.email.get_template(), self.get_context())
def html_body(self):
try:
return inline_css(render_to_string(
self.email.get_html_template(), self.get_context()))
except Exception:
import traceback
traceback.print_exc()
raise
class ActivityMailDebugView(View):
def get(self, request):
org = Organization(
id=1,
slug='organization',
name='My Company',
)
team = Team(
id=1,
slug='team',
name='My Team',
organization=org,
)
project = Project(
id=1,
organization=org,
team=team,
slug='project',
name='My Project',
)
group = next(
make_group_generator(
get_random(request),
project,
),
)
event = Event(
id=1,
project=project,
group=group,
message=group.message,
data=load_data('python'),
datetime=datetime(2016, 6, 13, 3, 8, 24, tzinfo=timezone.utc),
)
activity = Activity(
group=event.group, project=event.project,
**self.get_activity(request, event)
)
return render_to_response('sentry/debug/mail/preview.html', {
'preview': ActivityMailPreview(activity),
'format': request.GET.get('format'),
})
@login_required
def new_event(request):
platform = request.GET.get('platform', 'python')
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
project = Project(
id=1,
slug='example',
name='Example',
team=team,
organization=org,
)
random = get_random(request)
group = next(
make_group_generator(random, project),
)
event = Event(
id=1,
project=project,
group=group,
message=group.message,
data=load_data(platform),
datetime=to_datetime(
random.randint(
to_timestamp(group.first_seen),
to_timestamp(group.last_seen),
),
),
)
rule = Rule(label="An example rule")
interface_list = []
for interface in six.itervalues(event.interfaces):
body = interface.to_email_html(event)
if not body:
continue
interface_list.append((interface.get_title(), mark_safe(body)))
return MailPreview(
html_template='sentry/emails/error.html',
text_template='sentry/emails/error.txt',
context={
'rule': rule,
'group': group,
'event': event,
'link': 'http://example.com/link',
'interfaces': interface_list,
'tags': event.get_tags(),
'project_label': project.name,
'tags': [
('logger', 'javascript'),
('environment', 'prod'),
('level', 'error'),
('device', 'Other')
]
},
).render(request)
@login_required
def digest(request):
random = get_random(request)
# TODO: Refactor all of these into something more manageable.
org = Organization(
id=1,
slug='example',
name='Example Organization',
)
team = Team(
id=1,
slug='example',
name='Example Team',
organization=org,
)
project = Project(
id=1,
slug='example',
name='Example Project',
team=team,
organization=org,
)
rules = {i: Rule(
id=i,
project=project,
label="Rule #%s" % (i,),
) for i in range(1, random.randint(2, 4))}
state = {
'project': project,
'groups': {},
'rules': rules,
'event_counts': {},
'user_counts': {},
}
records = []
event_sequence = itertools.count(1)
group_generator = make_group_generator(random, project)
for i in range(random.randint(1, 30)):
group = next(group_generator)
state['groups'][group.id] = group
offset = timedelta(seconds=0)
for i in range(random.randint(1, 10)):
offset += timedelta(seconds=random.random() * 120)
event = Event(
id=next(event_sequence),
event_id=uuid.uuid4().hex,
project=project,
group=group,
message=group.message,
data=load_data('python'),
datetime=to_datetime(
random.randint(
to_timestamp(group.first_seen),
to_timestamp(group.last_seen),
),
)
)
records.append(
Record(
event.event_id,
Notification(
event,
random.sample(state['rules'], random.randint(1, len(state['rules']))),
),
to_timestamp(event.datetime),
)
)
state['event_counts'][group.id] = random.randint(10, 1e4)
state['user_counts'][group.id] = random.randint(10, 1e4)
digest = build_digest(project, records, state)
start, end, counts = get_digest_metadata(digest)
return MailPreview(
html_template='sentry/emails/digests/body.html',
text_template='sentry/emails/digests/body.txt',
context={
'project': project,
'counts': counts,
'digest': digest,
'start': start,
'end': end,
},
).render(request)
@login_required
def report(request):
from sentry.tasks import reports
random = get_random(request)
duration = 60 * 60 * 24 * 7
timestamp = random.randint(
to_timestamp(datetime(2016, 6, 1, 0, 0, 0, tzinfo=timezone.utc)),
to_timestamp(datetime(2016, 7, 1, 0, 0, 0, tzinfo=timezone.utc)),
)
organization = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=organization,
)
project = Project(
id=1,
organization=organization,
team=team,
slug='project',
name='My Project',
)
start, stop = reports._to_interval(timestamp, duration)
group_instances = {}
def fetch_group_instances(id_list):
results = {}
for id in id_list:
instance = group_instances.get(id)
if instance is not None:
results[id] = instance
return results
def make_group_id_generator():
group_generator = make_group_generator(random, project)
while True:
group = next(group_generator)
if random.random() < 0.95:
group_instances[group.id] = group
yield group.id
group_id_sequence = make_group_id_generator()
def make_release_generator():
id_sequence = itertools.count(1)
while True:
dt = to_datetime(
random.randint(
timestamp - (30 * 24 * 60 * 60),
timestamp,
),
)
yield Release(
id=next(id_sequence),
project=project,
version=''.join([
random.choice('0123456789abcdef') for _ in range(40)
]),
date_added=dt,
date_started=dt,
)
release_instances = {}
def make_release_id_generator():
release_generator = make_release_generator()
while True:
release = next(release_generator)
release_instances[release.id] = release
yield release.id
release_id_generator = make_release_id_generator()
def build_issue_list():
summaries = []
for i in range(3):
summaries.append(
int(random.weibullvariate(10, 1) * random.paretovariate(0.5))
)
return summaries, [(
next(group_id_sequence),
(
int(random.paretovariate(0.3)),
int(random.paretovariate(0.3)),
),
) for _ in xrange(0, random.randint(1, 5))]
def build_release_list():
return reports.trim_release_list([
(
next(release_id_generator),
max(1, int(random.weibullvariate(20, 0.15))),
) for _ in range(random.randint(0, 10))
])
def build_report():
daily_maximum = random.randint(1000, 10000)
rollup = 60 * 60 * 24
series = [(
timestamp + (i * rollup),
(random.randint(0, daily_maximum), random.randint(0, daily_maximum))
) for i in xrange(0, 7)]
aggregates = [
random.randint(0, daily_maximum * 7) if random.random() < 0.9 else None for _ in xrange(0, 4)
]
return series, aggregates, build_issue_list(), build_release_list()
report = reduce(
reports.merge_reports,
[build_report() for _ in xrange(0, random.randint(1, 3))]
)
if random.random() < 0.85:
personal = {
'resolved': random.randint(0, 100),
'users': int(random.paretovariate(0.2)),
}
else:
personal = {
'resolved': 0,
'users': 0,
}
return MailPreview(
html_template='sentry/emails/reports/body.html',
text_template='sentry/emails/reports/body.txt',
context={
'duration': reports.durations[duration],
'interval': {
'start': reports.date_format(start),
'stop': reports.date_format(stop),
},
'report': reports.to_context(
report,
fetch_group_instances,
),
'organization': organization,
'personal': personal,
'user': request.user,
},
).render(request)
@login_required
def request_access(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
return MailPreview(
html_template='sentry/emails/request-team-access.html',
text_template='sentry/emails/request-team-access.txt',
context={
'email': 'foo@example.com',
'name': 'George Bush',
'organization': org,
'team': team,
'url': absolute_uri(reverse('sentry-organization-members', kwargs={
'organization_slug': org.slug,
}) + '?ref=access-requests'),
},
).render(request)
@login_required
def invitation(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
om = OrganizationMember(
id=1,
email='foo@example.com',
organization=org,
)
return MailPreview(
html_template='sentry/emails/member-invite.html',
text_template='sentry/emails/member-invite.txt',
context={
'email': 'foo@example.com',
'organization': org,
'url': absolute_uri(reverse('sentry-accept-invite', kwargs={
'member_id': om.id,
'token': om.token,
})),
},
).render(request)
@login_required
def access_approved(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
return MailPreview(
html_template='sentry/emails/access-approved.html',
text_template='sentry/emails/access-approved.txt',
context={
'email': 'foo@example.com',
'name': 'George Bush',
'organization': org,
'team': team,
},
).render(request)
@login_required
def confirm_email(request):
email = request.user.emails.first()
email.set_hash()
email.save()
return MailPreview(
html_template='sentry/emails/confirm_email.html',
text_template='sentry/emails/confirm_email.txt',
context={
'confirm_email': 'foo@example.com',
'user': request.user,
'url': absolute_uri(reverse(
'sentry-account-confirm-email',
args=[request.user.id, email.validation_hash]
)),
'is_new_user': True,
},
).render(request)
@login_required
def recover_account(request):
return MailPreview(
html_template='sentry/emails/recover_account.html',
text_template='sentry/emails/recover_account.txt',
context={
'user': request.user,
'url': absolute_uri(reverse(
'sentry-account-confirm-email',
args=[request.user.id, 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX']
)),
'domain': get_server_hostname(),
},
).render(request)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
b93802b90b96587bbc604e99f8afd807224026b6
|
d752046f2a056ca1b26323d431dc0a02153fe071
|
/corphub_app/views.py
|
81be1004dd1aa6fa1e8611c3b38e9714b8818496
|
[] |
no_license
|
calixo888/corphub
|
2cecac4a116ce49df64428da2f602cc00c7ed2d6
|
bc8e811b0edef18a906595e93c3ef8abf2198fca
|
refs/heads/master
| 2020-07-06T07:41:49.775105
| 2019-08-18T00:24:22
| 2019-08-18T00:24:22
| 202,943,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from corphub_app import forms
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import re
# Create your views here.
links = []
agent = UserAgent()
header = {'user-agent': agent.chrome}
query = ""
def index(request):
global links
global query
if request.method == "POST":
form = forms.SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data['search']
links = []
queries = []
queries.append(query)
queries.append("\"{}\"".format(query))
for new_query in queries:
links = search_web(links, new_query, False)
links = search_web(links, new_query, True)
else:
form = forms.SearchForm()
query = ""
midpoint = len(links) // 2
return render(request, "corphub_app/index.html", context={"form": form, "links1": links[:20], "links2": links[20:40]})
def search_web(links, query, news):
if news:
page = requests.get("https://news.google.com/search?q=" + query + "&hl=en-US&gl=US&ceid=US%3Aen", headers=header)
soup = BeautifulSoup(page.content)
for i in soup.find_all('a', href=True):
if str(i['href']).startswith("./articles/"):
link = "https://news.google.com" + i['href'][1:]
links.append(link)
else:
page = requests.get("https://www.google.dz/search?q=see")
soup = BeautifulSoup(page.content)
for link in soup.find_all("a",href=re.compile("(?<=/url\?q=)(htt.*://.*)")):
new_link = re.split(":(?=http)",link["href"].replace("/url?q=",""))
links.append(new_link[0])
return list(set(links))
def viewall(request):
global query
links = []
queries = []
queries.append(query)
# queries.append(query + " news")
# queries.append(query + " speculations")
# queries.append(query + " stock")
# queries.append(query + " startup")
# queries.append(query + " development")
# queries.append(query + " founder")
# queries.append(query + " funding")
# queries.append(query + " products")
# queries.append(query + " market")
# queries.append(query + " evaluation")
# queries.append(query + " launches")
# queries.append("\"{}\"".format(query))
# queries.append("\"{} CEO\"".format(query))
for new_query in queries:
links = search_web(links, new_query, False)
links = search_web(links, new_query, True)
midpoint = len(links) // 2
return render(request, "corphub_app/viewall.html", context={"links1": links[:midpoint], "links2": links[midpoint:-1]})
|
[
"calix.huang1@gmail.com"
] |
calix.huang1@gmail.com
|
706653fd415c83d411ace8de9143ce7840d68d3b
|
631847fafbcfa07bf33eee078d9b59b464ce4b50
|
/optimization/first_sdEta_mjj_optimization/sdEta_mistake_analyses/dEta_mmjj_cuts_plots/loose_analysis_sdeta_2.6_mmjj_1250/Output/Histos/MadAnalysis5job_0/selection_6.py
|
676e32566983b0ec5ab94df2e16d9c835f8efb95
|
[
"MIT"
] |
permissive
|
sheride/axion_pheno
|
7b46aeb7cc562800d78edd9048504fdbc0f5fa42
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
refs/heads/master
| 2021-07-01T08:47:59.981416
| 2021-02-03T23:03:50
| 2021-02-03T23:03:50
| 219,261,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,235
|
py
|
def selection_6():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(0.0,15.0,76,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([0.1,0.3,0.5,0.7,0.9,1.1,1.3,1.5,1.7,1.9,2.1,2.3,2.5,2.7,2.9,3.1,3.3,3.5,3.7,3.9,4.1,4.3,4.5,4.7,4.9,5.1,5.3,5.5,5.7,5.9,6.1,6.3,6.5,6.7,6.9,7.1,7.3,7.5,7.7,7.9,8.1,8.3,8.5,8.7,8.9,9.1,9.3,9.5,9.7,9.9,10.1,10.3,10.5,10.7,10.9,11.1,11.3,11.5,11.7,11.9,12.1,12.3,12.5,12.7,12.9,13.1,13.3,13.5,13.7,13.9,14.1,14.3,14.5,14.7,14.9])
# Creating weights for histo: y7_DELTAR_0
y7_DELTAR_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,14.2269397856,29.2686068716,42.0216839226,53.8740337468,65.345663898,74.9462956554,84.5878873777,85.8406463021,68.0355015886,53.7389538628,43.5119226432,35.2910057012,30.0505782003,25.2645903093,21.7518693251,18.7017759437,16.0078702566,13.977204,12.4255453321,10.7838187416,9.2853840281,7.80332530051,6.9026260738,6.07561878382,4.93746376098,4.22918836906,3.48815940527,2.97230504815,2.34590998594,1.75636209209,1.32648326116,0.978485959928,0.704182595429,0.425784834446,0.266115451529,0.155575186432,0.077787613216,0.0163763339402,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_1
y7_DELTAR_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0243513843729,0.437219398517,1.90818329045,4.98180620258,10.6690529283,14.0212372097,14.7747180546,13.4634543873,11.1414093722,9.25864852021,7.3992469442,4.88447188044,3.72933124821,2.67278182098,1.785830671,0.886755287737,0.437538625934,0.206430031749,0.14567736925,0.0364983561051,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_2
y7_DELTAR_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.010015500946,0.210827354383,1.1043367519,3.92531257812,13.5234914809,30.2912178236,46.0838361656,58.5243306794,62.5391968847,63.3436328869,56.3138671526,44.9199493321,32.1881132876,22.5490791347,14.7893253303,10.0407480386,7.01801405988,4.31735199282,2.72113783099,1.69689179851,0.984039934326,0.491984844366,0.160645101078,0.060271545181,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_3
y7_DELTAR_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0495711330339,0.274957426964,0.973627117883,2.55762299097,7.76663885706,19.9214122151,34.6961896523,50.4545726139,63.3301783937,72.7695267728,75.0469451217,63.9590999488,52.4348162857,40.451246417,31.5271822715,21.8594217387,14.9992489287,8.81199090124,5.32442770432,3.04691429202,1.76012762759,1.02287320723,0.401628955731,0.159564926421,0.0825047904422,0.0109912867624,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_4
y7_DELTAR_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00887767014811,0.0759772789329,0.232925657028,0.637530473781,1.5829580809,4.51377023518,11.9062611638,19.6372610107,25.9343437817,24.8418055666,21.3486646516,17.5619678984,14.2514731715,10.7506924374,7.89660274505,5.41992652692,3.46179761542,2.11585149266,1.21774025906,0.610805536993,0.273377576975,0.125312833378,0.0602072654643,0.0167761688189,0.00493094604148,0.000987661857571,0.000986317874714,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_5
y7_DELTAR_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00529321572864,0.032265123723,0.100087042889,0.237969571997,0.472390264535,1.07840164214,2.87399061276,8.78438888738,9.35412207477,8.02839354176,6.7025049691,5.44290482357,4.16685260101,3.08921594844,2.17438844212,1.41491783529,0.837907644561,0.467092151593,0.242753117423,0.106387124132,0.0375573952177,0.0148761385032,0.00554593435565,0.00126014468047,0.000504083718591,0.000504337781551,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_6
y7_DELTAR_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.011726445518,0.0343623072018,0.0667015745523,0.123708920398,0.226725995141,0.459739007655,1.30382609683,4.19569814032,3.99820256236,3.11386650599,2.34601586286,1.6786639287,1.14890441592,0.701102953853,0.394525310231,0.200446400474,0.106762134161,0.0389371717401,0.0134482051709,0.00487045362199,0.00142975738105,0.000856878251999,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_7
y7_DELTAR_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00203093481187,0.00457892515879,0.00807620683047,0.0138846935016,0.0262791762867,0.0514614332342,0.158958463992,0.632041840455,0.546463122043,0.363087015128,0.221461048474,0.118847596591,0.0580068739514,0.0272951566595,0.00932449750143,0.00302238162131,0.000777307441283,0.000323807246285,0.000151288016484,4.31956675371e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_8
y7_DELTAR_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000397353899821,0.000851817282859,0.00130394912405,0.00133173945735,0.00363008249248,0.00767978756739,0.0298868199775,0.149025608528,0.108398517773,0.0580069166798,0.0272855297697,0.00966175853683,0.00356842933188,0.00104881704937,0.000311672595736,8.51463370875e-05,2.82221564232e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_9
y7_DELTAR_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,7.8176445648,7.81885224201,26.0706471194,20.8588238205,28.6702607706,15.6291007454,7.82206758643,2.60887622775,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_10
y7_DELTAR_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.05238929114,2.10669359492,9.48357970457,23.1623222351,41.0651894164,73.7367210511,83.2093963036,71.6096602945,69.5256940557,62.1407558123,24.2184303523,16.8530462465,8.42834886866,6.31964175934,1.05389798404,2.10719341437,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_11
y7_DELTAR_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.460173181188,2.53547733378,5.52939261055,21.1876541413,55.0573097014,94.6651205523,121.15612909,123.455281276,129.442122021,101.815908052,74.8570845222,38.2410033778,23.261106246,11.2831135567,5.99121726511,2.99425878938,1.61274751646,0.69062134378,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_12
y7_DELTAR_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0831404702383,0.249176732371,0.664691804604,2.10478467023,5.06703074763,21.1848242564,67.0946490953,105.806841104,117.522932757,84.8151703951,53.1401353085,33.5614589539,19.3015896713,10.162149492,5.78678133508,2.79690199338,1.24588250771,0.609061720901,0.13844381602,0.0831275053638,0.0276859124705,0.0,0.0276409432069,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_13
y7_DELTAR_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100785377154,0.120920679639,0.383225938119,0.826641328569,1.61310465394,4.58649534825,17.3819593289,78.708592968,64.5751717463,40.2059961189,23.6527862349,15.1750273073,7.92495183879,4.46651719202,2.51018279784,0.917429901511,0.453710935501,0.201587745459,0.0906670862141,0.0100661645167,0.0100795814575,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_14
y7_DELTAR_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0566037567452,0.11036622211,0.178201717181,0.410221686761,0.76113021364,1.79387172429,6.78141036292,32.8407576729,24.3281294271,14.3691203548,7.7406395668,3.85625760331,1.86444847881,0.959156873108,0.339487305005,0.138659905559,0.0679308331898,0.019809310375,0.0,0.00565236542584,0.0,0.00282703721639,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_15
y7_DELTAR_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00608990105335,0.0137132010778,0.025935347506,0.0472171441118,0.0608769864765,0.140025149504,0.674488634573,4.3170093677,2.92640403151,1.45929345457,0.717569533183,0.272545577971,0.0822253042415,0.0304505391214,0.0106678754096,0.00762122995213,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_16
y7_DELTAR_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00180640904432,0.00198614334736,0.00270663264263,0.00469407696684,0.00686384291275,0.0173364292146,0.0996748146434,0.970327686842,0.603058611542,0.224992506803,0.0707826916138,0.0189580556419,0.00361129180941,0.000902789349561,0.000360840331731,0.0,0.000180186409828,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights+y7_DELTAR_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=3, linestyle="dashed",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\Delta R [ j_{1} , j_{2} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights+y7_DELTAR_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights+y7_DELTAR_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_6.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_6.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_6.eps')
# Running!
if __name__ == '__main__':
selection_6()
|
[
"elijah.sheridan@vanderbilt.edu"
] |
elijah.sheridan@vanderbilt.edu
|
1767d3910fac27679191cb881124e7753f02b9dc
|
d29a5ce285083043a37cb0da2abb5a3045e05551
|
/reviewboard/admin/tests/test_related_user_widget.py
|
81f98b7213a9063a1f659f54219a18d85eaa4dc9
|
[
"MIT"
] |
permissive
|
wen501271303/reviewboard
|
83fa35123b851a5b42e3c2a3eb44f477a3da6198
|
a3b548437deb703792b805cf80f80313c7dd7f8a
|
refs/heads/master
| 2020-06-12T06:16:28.241753
| 2019-06-24T22:25:09
| 2019-06-24T22:25:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,736
|
py
|
"""Unit tests for reviewboard.admin.form_widgets.RelatedUserWidget."""
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from reviewboard.admin.form_widgets import RelatedUserWidget
from reviewboard.testing.testcase import TestCase
class TestForm(forms.Form):
"""A Test Form with a field that contains a RelatedUserWidget."""
my_multiselect_field = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True),
label=('Default users'),
required=False,
widget=RelatedUserWidget())
class LocalSiteTestForm(forms.Form):
"""A Test Form with a field that contains a RelatedUserWidget.
The RelatedUserWidget is defined to have a local_site_name.
"""
my_multiselect_field = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True),
label=('Default users'),
required=False,
widget=RelatedUserWidget(local_site_name='supertest'))
class SingleValueTestForm(forms.Form):
"""A Test Form with a field that contains a RelatedUserWidget.
The RelatedUserWidget is defined as setting multivalued to False.
"""
my_select_field = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True),
label=('Default users'),
required=False,
widget=RelatedUserWidget(multivalued=False))
class RelatedUserWidgetTests(TestCase):
"""Unit tests for RelatedUserWidget."""
fixtures = ['test_users']
def test_render_empty(self):
"""Testing RelatedUserWidget.render with no initial data"""
my_form = TestForm()
html = my_form.fields['my_multiselect_field'].widget.render(
'Default users',
[],
{'id': 'default-users'})
self.assertHTMLEqual(
"""<input id="default-users" name="Default users" type="hidden" />
<script>
$(function() {
var view = new RB.RelatedUserSelectorView({
$input: $('#default\\u002Dusers'),
initialOptions: [],
useAvatars: true,
multivalued: true
}).render();
});
</script>""",
html)
def test_render_with_data(self):
"""Testing RelatedUserWidget.render with initial data"""
my_form = TestForm()
html = my_form.fields['my_multiselect_field'].widget.render(
'Default users',
[1, 2, 3],
{'id': 'default-users'})
self.assertHTMLEqual(
"""<input id="default-users" name="Default users"
type="hidden" value="1,2,3" />
<script>
$(function() {
var view = new RB.RelatedUserSelectorView({
$input: $('#default\\u002Dusers'),
initialOptions: [{"avatarURL": "https://secure.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\\u0026d=mm",
"fullname": "Admin User",
"id": 1,
"username": "admin"},
{"avatarURL": "https://secure.gravatar.com/avatar/b0f1ae4342591db2695fb11313114b3e?s=40\\u0026d=mm",
"fullname": "Doc Dwarf",
"id": 2,
"username": "doc"},
{"avatarURL": "https://secure.gravatar.com/avatar/1a0098e6600792ea4f714aa205bf3f2b?s=40\\u0026d=mm",
"fullname": "Dopey Dwarf",
"id": 3,
"username": "dopey"}],
useAvatars: true,
multivalued: true
}).render();
});
</script>""",
html)
def test_render_with_local_site(self):
"""Testing RelatedUserWidget.render with a local site defined"""
my_form = LocalSiteTestForm()
html = my_form.fields['my_multiselect_field'].widget.render(
'Default users',
[],
{'id': 'default-users'})
self.assertIn("localSitePrefix: 's/supertest/',", html)
def test_value_from_datadict(self):
"""Testing RelatedUserWidget.value_from_datadict"""
my_form = TestForm()
value = (
my_form.fields['my_multiselect_field']
.widget
.value_from_datadict(
{'people': ['1', '2']},
{},
'people'))
self.assertEqual(value, ['1', '2'])
def test_value_from_datadict_single_value(self):
"""Testing RelatedUserWidget.value_from_datadict with a single value"""
my_form = SingleValueTestForm()
value = (
my_form.fields['my_select_field']
.widget
.value_from_datadict(
{'people': ['1']},
{},
'people'))
self.assertEqual(value, ['1'])
def test_value_from_datadict_with_no_data(self):
"""Testing RelatedUserWidget.value_from_datadict with no data"""
my_form = TestForm()
value = (
my_form.fields['my_multiselect_field']
.widget
.value_from_datadict(
{'people': []},
{},
'people'))
self.assertEqual(value, [])
def test_value_from_datadict_with_missing_data(self):
"""Testing RelatedUserWidget.value_from_datadict with missing data"""
my_form = TestForm()
value = (
my_form.fields['my_multiselect_field']
.widget
.value_from_datadict(
{},
{},
'people'))
self.assertIsNone(value)
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
62ae74f067f9e799dea7e452ee0644d0e64f3f79
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_091/ch25_2020_09_30_19_23_04_594122.py
|
56f82a63d1ac642611703c6c1b15d4481a550f7c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import math
v=int(input('Digite o valor da velocidade da jaca: '))
m=math.degrees(input('Digite em graus o ângulo de lançamento: '))
d=((v**2)*math.sin(math.degrees(2*m)))/9.8
if d<98:
print('Muito perto')
elif d>=98 and d<=102:
print('Acertou')
elif d>102:
print('Muito longe')
|
[
"you@example.com"
] |
you@example.com
|
7595dee2388e7d0424519ce001bd1a177e831d2c
|
de707c94c91f554d549e604737b72e6c86eb0755
|
/supervised_learning/0x12-transformer_apps/1-dataset.py
|
ecbc295eff78c8f60161c3dbab97b078db9527a8
|
[] |
no_license
|
ejonakodra/holbertonschool-machine_learning-1
|
885cf89c1737573228071e4dc8e26304f393bc30
|
8834b201ca84937365e4dcc0fac978656cdf5293
|
refs/heads/main
| 2023-07-10T09:11:01.298863
| 2021-08-11T03:43:59
| 2021-08-11T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
#!/usr/bin/env python3
"""
Defines class Dataset that loads and preps a dataset for machine translation
"""
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class Dataset:
"""
Loads and preps a dataset for machine translation
class constructor:
def __init__(self)
public instance attributes:
data_train:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset train split, loaded as_supervided
data_valid:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset validate split, loaded as_supervided
tokenizer_pt:
the Portuguese tokenizer created from the training set
tokenizer_en:
the English tokenizer created from the training set
instance method:
def tokenize_dataset(self, data):
that creates sub-word tokenizers for our dataset
def encode(self, pt, en):
that encodes a translation into tokens
"""
def __init__(self):
"""
Class constructor
Sets the public instance attributes:
data_train:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset train split, loaded as_supervided
data_valid:
contains the ted_hrlr_translate/pt_to_en
tf.data.Dataset validate split, loaded as_supervided
tokenizer_pt:
the Portuguese tokenizer created from the training set
tokenizer_en:
the English tokenizer created from the training set
"""
self.data_train = tfds.load("ted_hrlr_translate/pt_to_en",
split="train",
as_supervised=True)
self.data_valid = tfds.load("ted_hrlr_translate/pt_to_en",
split="validation",
as_supervised=True)
self.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(
self.data_train)
def tokenize_dataset(self, data):
"""
Creates sub_word tokenizers for our dataset
parameters:
data [tf.data.Dataset]:
dataset to use whose examples are formatted as tuple (pt, en)
pt [tf.Tensor]:
contains the Portuguese sentence
en [tf.Tensor]:
contains the corresponding English sentence
returns:
tokenizer_pt, tokenizer_en:
tokenizer_pt: the Portuguese tokenizer
tokenizer_en: the English tokenizer
"""
SubwordTextEncoder = tfds.deprecated.text.SubwordTextEncoder
tokenizer_pt = SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in data),
target_vocab_size=(2 ** 15))
tokenizer_en = SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in data),
target_vocab_size=(2 ** 15))
return tokenizer_pt, tokenizer_en
def encode(self, pt, en):
"""
Encodes a translation into tokens
parameters:
pt [tf.Tensor]:
contains the Portuguese sentence
en [tf.Tensor]:
contains the corresponding English sentence
returns:
pt_tokens, en_tokens:
pt_tokens [np.ndarray]: the Portuguese tokens
en_tokens [np.ndarray]: the English tokens
"""
pt_start_index = self.tokenizer_pt.vocab_size
pt_end_index = pt_start_index + 1
en_start_index = self.tokenizer_en.vocab_size
en_end_index = en_start_index + 1
pt_tokens = [pt_start_index] + self.tokenizer_pt.encode(
pt.numpy()) + [pt_end_index]
en_tokens = [en_start_index] + self.tokenizer_en.encode(
en.numpy()) + [en_end_index]
return pt_tokens, en_tokens
|
[
"eislek02@gmail.com"
] |
eislek02@gmail.com
|
4a8e80cd7e903d4068d0a5758d594014ae06a533
|
477a669e9ab041f411a728dacd983b20e20073a5
|
/cosmos.py
|
e1d5ab469f7f67c126c1e466a0fd5e1f10ce8cb4
|
[] |
no_license
|
DriftingPig/postrunbrick
|
01c2a008bfd29ed93ac058b5eb39d553517999fd
|
39456e64ded3bd12234719057df047c024db7213
|
refs/heads/main
| 2023-07-03T03:36:50.463758
| 2021-08-04T18:11:11
| 2021-08-04T18:11:11
| 390,432,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,761
|
py
|
from filesystem import LegacySimData
import subprocess
from glob import glob
import astropy.io.fits as fits
from astropy.table import Table,vstack
from SurveySource import BaseSource
import os
import numpy as np
class CosmosRepeats(LegacySimData):
def __init__(self, survey_dir=None, outdir=None, subsection=None, brick=None, startid=None, **kwargs):
self.reference_surveydir = '/global/cscratch1/sd/dstn/cosmos-subs-lsd/'
self.reference_outdir_top = '/global/cscratch1/sd/dstn/dr9-cosmos-subs/'
self.reference_outdir = '/global/cscratch1/sd/dstn/dr9-cosmos-subs/80/'
super(CosmosRepeats,self).__init__(survey_dir=survey_dir, outdir=outdir, subsection=subsection, brick=brick)
self.total_sets = ['80', '81', '82', '83', '84', '85', '86', '87', '88', '89']
self.set_bricklist()
#self.maskdir = self.find_file('masks')
self.startid = startid
def get_legacypipe_ver(self):
#docker version used in this cosmos repeats run, documentation purpose
return 'legacysurvey/legacypipe:DR9.7.1'
def set_bricklist(self):
fns = glob(os.path.join(self.reference_outdir,'coadd','*','*'))
bricklist = []
for fn in fns:
bricklist.append(os.path.basename(fn))
self.bricklist = bricklist
def stack_all_rs(self):
startids = ['0','50','100','150','200','250','300','350','400','450','500']
for one_set in self.total_sets:
print(one_set)
final_tab = None
for startid in startids:
catalog = BaseSource(survey_dir=self.survey_dir, outdir=self.outdir,subsection = 'rs%s_cosmos%s'%(startid,one_set))
fn = catalog.find_file("processed_one")
tab = Table.read(fn)
tab['startid'] = np.array([startid]*len(tab),dtype=np.str)
if final_tab is None:
final_tab = tab
else:
final_tab = vstack((final_tab,tab))
#just setting 9999 as stacking 'all'
catalog = BaseSource(survey_dir=self.survey_dir, outdir=self.outdir,subsection = 'rs9999_cosmos%s'%(one_set))
fn = catalog.find_file("processed_one")
final_tab.write(fn,overwrite=True)
print("written fn %s"%fn)
def set_maskbits_cross(self,mp=None):
if mp is None:
print('mp needs to be set')
return None
subprocess.call(["mkdir","-p",self.outdir+'/maskbits'])
mp.map(self._set_maskbits_cross,self.bricklist)
def _set_maskbits_cross(self,brickname):
maskbits_img = np.zeros((3600,3600),dtype=np.int)
for one_set in self.total_sets:
cosmos_catalog = LegacySimData(survey_dir=self.survey_dir,outdir=self.outdir,brick=brickname, subsection='rs%d_cosmos%s'%(self.startid,one_set))
maskbits_fn = cosmos_catalog.find_file('maskbits')
maskbits = fits.getdata(maskbits_fn)
maskbits_img |= maskbits
hdu = fits.ImageHDU(data=maskbits_img)
fn = cosmos_catalog.find_file('maskbits_cross')
fn = fn.replace('.fits','_rs%d.fits'%self.startid)
hdu.writeto(fn,overwrite=True)
print("written %s"%fn)
def mask_tractor(self,mp=None):
#add an additial colum to tractor file, 'matched_cosmos': the sources that's in the common maskbits==0 footprint
if mp is None:
print('mp needs to be set')
return None
mp.map(self._mask_tractor_core,self.bricklist)
return True
def _mask_tractor_core(self,brickname):
if brickname is None:
return None
cosmos_catalog = LegacySimData(survey_dir=self.survey_dir,outdir=self.outdir,brick=brickname, subsection='rs%d_cosmos80'%(self.startid))
cosmos_catalog.get_mask(brick=brickname)
cosmos_catalog.get_maskbits_corss(brick=brickname,startid=self.startid)
for one_set in self.total_sets:
cosmos_catalog.subsection = 'rs%d_cosmos%s'%(self.startid, one_set)
tractor_fn = cosmos_catalog.find_file('tractor')
tractor = Table.read(tractor_fn)
bx = (tractor['bx']+0.5).astype(int)
by = (tractor['by']+0.5).astype(int)
mask_flag = cosmos_catalog.mask[(by),(bx)]
maskbits_corss_flag = cosmos_catalog.maskbits_cross[(by),(bx)]
sel = (mask_flag==1)
tractor['matched_cosmos'] = sel
tractor['maskbits_cross'] = maskbits_corss_flag
tractor.write(tractor_fn,overwrite=True)
return True
def match_catalog(self,mp=None,south=True):
#adding additial columns to sources, showing the corresponding values in other sets
if mp is None:
#for debug
X = (self.bricklist[0],self.total_sets[1],south)
self._match_catalog_core(X)
return None
inputs = []
for brickname in self.bricklist:
for one_set in self.total_sets:
inputs.append((brickname,one_set,south))
mp.map(self._match_catalog_core,inputs)
def _match_catalog_core(self,X):
(brickname,set_num,south) = X
self.brick = brickname
self.subsection = 'rs%d_cosmos%s'%(self.startid,set_num)
tractor_fn = self.find_file('tractor')
T = Table.read(tractor_fn)
catalog = BaseSource(filetype='tractor', survey_dir=self.survey_dir, outdir=self.outdir,subsection='rs%d_cosmos%s'%(self.startid,set_num),brick=brickname)
LRG = catalog.target_selection('LRG_sv3',south=south)
#lrg
T['set_%s_lrg_sv3'%set_num] = LRG
LRG_like = catalog.target_selection('LRG_sv3_like', south=south)
T['set_%s_lrg_sv3_like'%set_num] = LRG_like
for one_set in self.total_sets:
#if one_set == set_num:
#continue
catalog_i = BaseSource(filetype='tractor', survey_dir=self.survey_dir, outdir=self.outdir,subsection='rs%d_cosmos%s'%(self.startid,one_set),brick=brickname)
cat1, cat2, matched = catalog.match_catalog(catalog.source, catalog_i.source)
catalog_i.source = cat2
catalog_i._construct_class()
LRG = catalog_i.target_selection('LRG_sv3',south=south)
T['set_%s_lrg_sv3'%one_set] = LRG
T['set_%s_matched'%one_set]=matched
T['set_%s_flux_g'%one_set]=cat2['flux_g']
T['set_%s_flux_r'%one_set]=cat2['flux_r']
T['set_%s_flux_z'%one_set]=cat2['flux_z']
T['set_%s_flux_w1'%one_set]=cat2['flux_w1']
T['set_%s_flux_w2'%one_set]=cat2['flux_w2']
T['set_%s_fiberflux_z'%one_set]=cat2['fiberflux_z']
T['set_%s_fiberflux_g'%one_set]=cat2['fiberflux_g']
tmp_fn = tractor_fn.replace('tractor-','tmp-tractor-')
T.write(tmp_fn, overwrite=True)
subprocess.call(["mv",tmp_fn,tractor_fn])
def _match_catalog_core_old(self,X): #not used now
(brickname,set_num,south) = X
print(X)
self.brick = brickname
self.subsection = 'cosmos%s'%set_num
tractor_fn = self.find_file('tractor')
T = Table.read(tractor_fn)
catalog = BaseSource(filetype='tractor', survey_dir=self.survey_dir, outdir=self.outdir,subsection='cosmos%s'%set_num,brick=brickname)
#select ELG, LRG in this set
LRG = catalog.target_selection('LRG',south=south)
#lrg_opt, lrg_ir, lrg_sv_opt, lrg_sv_ir
T['set_%s_lrgopt'%set_num] = LRG[0]
T['set_%s_lrgir'%set_num] = LRG[1]
T['set_%s_lrgsvopt'%set_num] = LRG[2]
T['set_%s_lrgsvir'%set_num] = LRG[3]
ELG = catalog.target_selection('ELG',south=south)
#svgtot, svgfib, fdrgtot, fdrgfib
T['set_%s_svgtot'%set_num] = ELG[0]
T['set_%s_svgfib'%set_num] = ELG[1]
T['set_%s_fdrgtot'%set_num] = ELG[2]
T['set_%s_fdrgfib'%set_num] = ELG[3]
for one_set in self.total_sets:
#if one_set == set_num:
#continue
catalog_i = BaseSource(filetype='tractor', survey_dir=self.survey_dir, outdir=self.outdir,subsection='cosmos%s'%one_set,brick=brickname)
cat1, cat2, matched = catalog.match_catalog(catalog.source, catalog_i.source)
catalog_i.source = cat2
catalog_i._construct_class()
LRG_i = catalog_i.target_selection('LRG',south=south)
ELG_i = catalog_i.target_selection('ELG',south=south)
T['set_%s_lrgopt'%one_set] = LRG_i[0]
T['set_%s_lrgir'%one_set] = LRG_i[1]
T['set_%s_lrgsvopt'%one_set] = LRG_i[2]
T['set_%s_lrgsvir'%one_set] = LRG_i[3]
T['set_%s_svgtot'%one_set] = ELG_i[0]
T['set_%s_svgfib'%one_set] = ELG_i[1]
T['set_%s_fdrgtot'%one_set] = ELG_i[2]
T['set_%s_fdrgfib'%one_set] = ELG_i[3]
T['set_%s_matched'%one_set]=matched
T['set_%s_flux_g'%one_set]=cat2['flux_g']
T['set_%s_flux_r'%one_set]=cat2['flux_r']
T['set_%s_flux_z'%one_set]=cat2['flux_z']
T['set_%s_flux_w1'%one_set]=cat2['flux_w1']
T['set_%s_flux_w2'%one_set]=cat2['flux_w2']
T['set_%s_fiberflux_z'%one_set]=cat2['fiberflux_z']
T['set_%s_fiberflux_g'%one_set]=cat2['fiberflux_g']
tmp_fn = tractor_fn.replace('tractor-','tmp-tractor-')
T.write(tmp_fn, overwrite=True)
subprocess.call(["mv",tmp_fn,tractor_fn])
|
[
"kong.291@osu.edu"
] |
kong.291@osu.edu
|
0fb84358f3bca4f1f4d759fa3813602aabe58d22
|
53cac2d67603d32b82a3237e38755577819f1aad
|
/pytext/loss/loss.py
|
9db304f404b3d167b8dcaf17302b4a6745e281fc
|
[
"BSD-3-Clause"
] |
permissive
|
sahupankaj10/test-pytext
|
c1957b67b137ff5433a9e8d0ec1fb1212326f779
|
3c88baf811ac182203e8025f016371d662a94385
|
refs/heads/master
| 2020-04-15T10:57:40.046773
| 2019-01-08T09:10:49
| 2019-01-16T06:31:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,850
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.nn.functional as F
from pytext.config import ConfigBase
from pytext.config.component import Component, ComponentType
from pytext.utils import loss_utils
from pytext.utils.cuda_utils import FloatTensor
from torch import nn
class Loss(Component):
"""Base class for loss functions"""
__COMPONENT_TYPE__ = ComponentType.LOSS
def __init__(self, config=None, *args, **kwargs):
super().__init__(config)
def __call__(self, logit, targets, reduce=True):
raise NotImplementedError
class CrossEntropyLoss(Loss):
def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):
self.ignore_index = ignore_index
self.weight = weight
def __call__(self, logits, targets, reduce=True):
return F.cross_entropy(
logits,
targets,
ignore_index=self.ignore_index,
reduction="elementwise_mean" if reduce else "none",
weight=self.weight,
)
class BinaryCrossEntropyLoss(Loss):
class Config(ConfigBase):
reweight_negative: bool = True
reduce: bool = True
def __call__(self, m_out, targets, reduce=True):
"""
Computes 1-vs-all binary cross entropy loss for multiclass
classification.
"""
# Converts targets to one-hot representation. Dim: [batch, n_classes]
one_hot_targets = (
FloatTensor(targets.size(0), m_out.size(1))
.zero_()
.scatter_(1, targets.unsqueeze(1).data, 1)
)
# This weighting applies uniform class weights.
# examples_per_class = one_hot_target.sum(0).clamp(min=1)
# total_positive = examples_per_class.sum()
# weights = total_positive.unsqueeze(0) / examples_per_class
loss = F.binary_cross_entropy_with_logits(
m_out, one_hot_targets, reduction="none"
)
if self.config.reweight_negative:
# This makes sure we have same weights for all negative classes and
# single positive class. Weight is 1 for the correct class and
# 1 / (n - 1) for other ones.
weights = one_hot_targets + (1.0 - one_hot_targets) / max(
1, one_hot_targets.size(1) - 1.0
)
loss = loss * weights
return loss.sum(1).mean() if reduce else loss.sum(1)
class AUCPRHingeLoss(nn.Module, Loss):
"""area under the precision-recall curve loss,
Reference: "Scalable Learning of Non-Decomposable Objectives", Section 5 \
TensorFlow Implementation: \
https://github.com/tensorflow/models/tree/master/research/global_objectives\
"""
class Config(ConfigBase):
"""
Attributes:
precision_range_lower (float): the lower range of precision values over
which to compute AUC. Must be nonnegative, `\leq precision_range_upper`,
and `leq 1.0`.
precision_range_upper (float): the upper range of precision values over
which to compute AUC. Must be nonnegative, `\geq precision_range_lower`,
and `leq 1.0`.
num_classes (int): number of classes(aka labels)
num_anchors (int): The number of grid points used to approximate the
Riemann sum.
"""
precision_range_lower: float = 0.0
precision_range_upper: float = 1.0
num_classes: int = 1
num_anchors: int = 20
def __init__(self, config, weights=None, *args, **kwargs):
"""Args:
config: Config containing `precision_range_lower`, `precision_range_upper`,
`num_classes`, `num_anchors`
"""
nn.Module.__init__(self)
Loss.__init__(self, config)
self.num_classes = self.config.num_classes
self.num_anchors = self.config.num_anchors
self.precision_range = (
self.config.precision_range_lower,
self.config.precision_range_upper,
)
# Create precision anchor values and distance between anchors.
# coresponding to [alpha_t] and [delta_t] in the paper.
# precision_values: 1D `Tensor` of shape [K], where `K = num_anchors`
# delta: Scalar (since we use equal distance between anchors)
self.precision_values, self.delta = loss_utils.range_to_anchors_and_delta(
self.precision_range, self.num_anchors
)
# notation is [b_k] in paper, Parameter of shape [C, K]
# where `C = number of classes` `K = num_anchors`
self.biases = nn.Parameter(
FloatTensor(self.config.num_classes, self.config.num_anchors).zero_()
)
self.lambdas = nn.Parameter(
FloatTensor(self.config.num_classes, self.config.num_anchors).data.fill_(
1.0
)
)
def forward(self, logits, targets, reduce=True, size_average=True, weights=None):
"""
Args:
logits: Variable :math:`(N, C)` where `C = number of classes`
targets: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weights: Coefficients for the loss. Must be a `Tensor` of shape
[N] or [N, C], where `N = batch_size`, `C = number of classes`.
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per input/target element instead and ignores
size_average. Default: True
"""
C = 1 if logits.dim() == 1 else logits.size(1)
if self.num_classes != C:
raise ValueError(
"num classes is %d while logits width is %d" % (self.num_classes, C)
)
labels, weights = AUCPRHingeLoss._prepare_labels_weights(
logits, targets, weights=weights
)
# Lagrange multipliers
# Lagrange multipliers are required to be nonnegative.
# Their gradient is reversed so that they are maximized
# (rather than minimized) by the optimizer.
# 1D `Tensor` of shape [K], where `K = num_anchors`
lambdas = loss_utils.lagrange_multiplier(self.lambdas)
# print("lambdas: {}".format(lambdas))
# A `Tensor` of Shape [N, C, K]
hinge_loss = loss_utils.weighted_hinge_loss(
labels.unsqueeze(-1),
logits.unsqueeze(-1) - self.biases,
positive_weights=1.0 + lambdas * (1.0 - self.precision_values),
negative_weights=lambdas * self.precision_values,
)
# 1D tensor of shape [C]
class_priors = loss_utils.build_class_priors(labels, weights=weights)
# lambda_term: Tensor[C, K]
# according to paper, lambda_term = lambda * (1 - precision) * |Y^+|
# where |Y^+| is number of postive examples = N * class_priors
lambda_term = class_priors.unsqueeze(-1) * (
lambdas * (1.0 - self.precision_values)
)
per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term
# Riemann sum over anchors, and normalized by precision range
# loss: Tensor[N, C]
loss = per_anchor_loss.sum(2) * self.delta
loss /= self.precision_range[1] - self.precision_range[0]
if not reduce:
return loss
elif size_average:
return loss.mean()
else:
return loss.sum()
@staticmethod
def _prepare_labels_weights(logits, targets, weights=None):
"""
Args:
logits: Variable :math:`(N, C)` where `C = number of classes`
targets: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weights: Coefficients for the loss. Must be a `Tensor` of shape
[N] or [N, C], where `N = batch_size`, `C = number of classes`.
Returns:
labels: Tensor of shape [N, C], one-hot representation
weights: Tensor of shape broadcastable to labels
"""
N, C = logits.size()
# Converts targets to one-hot representation. Dim: [N, C]
labels = FloatTensor(N, C).zero_().scatter(1, targets.unsqueeze(1).data, 1)
if weights is None:
weights = FloatTensor(N).data.fill_(1.0)
if weights.dim() == 1:
weights.unsqueeze_(-1)
return labels, weights
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
349a7c36c49b2122284b4e8861c63a455bcb3fb4
|
e77b92df446f0afed18a923846944b5fd3596bf9
|
/Inflearn_algo/section5_StackQueueHash/pro_8.py
|
4b75091911002a18320692cf54624df1ef6ac406
|
[] |
no_license
|
sds1vrk/Algo_Study
|
e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e
|
fbbc21bb06bb5dc08927b899ddc20e6cde9f0319
|
refs/heads/main
| 2023-06-27T05:49:15.351644
| 2021-08-01T12:43:06
| 2021-08-01T12:43:06
| 356,512,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
# 단어 찾기
# 시에 쓰이지 않는 단어 찾기
import sys
# sys.stdin=open("3190.txt","r")
n=int(input())
node=[]
content=[]
for i in range(n):
node.append(input())
for j in range(n-1):
content.append(input())
# 오름차순으로 정렬 ==> 무조건 답은 1개이기 때문에
node.sort()
content.sort()
# print(node)
# print(content)
result=""
for i in range(len(content)):
if content[i]!=node[i]:
# print(content[i])
result=node[i]
break
if result=="none":
result=content[-1]
print(result)
|
[
"51287886+sds1vrk@users.noreply.github.com"
] |
51287886+sds1vrk@users.noreply.github.com
|
98ff009947702d0d5bfca3498b6c22479e18e62e
|
e07ba3eeea2d9a3ce44bdc6fb26386db5b72d8fc
|
/ijvine_ebay/ijvine_ebay_base/wizard/imports/__init__.py
|
c8511c6ba816f058973a66f4a88db0a32fef9ec2
|
[] |
no_license
|
tosink/ab
|
8e4b931214eb333e141fd5c6512ba956e5cde3d4
|
1c410562edce9be367ad6cab7ac3370353e395c8
|
refs/heads/master
| 2023-06-14T10:34:21.120996
| 2021-07-15T20:09:14
| 2021-07-15T20:09:14
| 275,185,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) 2021-Present IjVine Corporation (<https://ijvine.com/>)
##############################################################################
from . import import_operation
from . import import_attribute
from . import import_attribute_value
from . import import_category
from . import import_order
from . import import_partner
from . import import_template
from . import import_product
|
[
"komolafetosin@gmail.com"
] |
komolafetosin@gmail.com
|
faca11ef879f43fcfd751e4cb4ffee515534cc55
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/K/kazumasakata/line.py
|
e36352dd4877f40656b7704b874aece5d488856b
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,320
|
py
|
import scraperwiki
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import glob
import datetime
import locale
import sqlite3
import sys
import traceback
import urllib
import cgi
print "Content-Type: text/plain\n\n"
def fOpen(filename):
buff = []
f = open(filename,"r")
for line in f:
buff.append(line)
f.close()
return ''.join(buff)
################################################################################
# Main
################################################################################
deTag = re.compile('<.*?>')
# 一時格納用のレコード配列
dataRows = []
# ファイル名
html =scraperwiki.scrape('http://line-friends.com/category/woman/page/')
# 必要部分を取得
posts = re.findall('<div class="postmetadata">.*?<p class="admin_del">', html, re.DOTALL)
for post in posts:
# 空白を除外
buff = re.sub('\t| ','',post)
# レコード格納用
dr= {}
# deTag.sub('置換文字列',検索対象文字列変数)
# deTag部分が正規表現
# 投稿年月日を抽出
dr["y"] = str(deTag.sub('',re.search('<spanclass="date-year">.*?</span>',buff).group()))
dr["m"] = str(deTag.sub('',re.search('<spanclass="date-month">.*?</span>',buff).group())).replace('月','')
dr["d"] = str(deTag.sub('',re.search('<spanclass="date-day">.*?</span>',buff).group()))
dr["t"] = str(deTag.sub('',re.search('<pclass="posted_time">.*?</p>',buff).group()))
# 投稿内容を抽出
dr["name"] = deTag.sub('',re.search('<pclass="poster_name">.*?</p>',buff).group())
dr["age"] = deTag.sub('',re.search('<pclass="poster_age">.*?</p>',buff).group())
dr["area"] = deTag.sub('',re.search('<pclass="poster_area">.*?</p>',buff).group())
work = re.search('<pclass="poster_line">.*?readonly/></p>',buff).group()
work = re.sub('.*"type="text"value="','',work)
dr["ID"] = re.sub('"readonly/></p>','',work)
dr["txt"] = deTag.sub('',re.search('<pclass="poster_txt"><p>.*?</p>',buff,re.DOTALL).group())
# 写真
result = re.search('<imgsrc="http://line-friends.com/uploads/.*?/>',buff)
if not result is None:
dr["img"] = re.sub('<imgsrc="|"/>','',result.group())
else:
dr["img"] = ""
dataRows.append(dr)
print dr["ID"]
import scraperwiki
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import glob
import datetime
import locale
import sqlite3
import sys
import traceback
import urllib
import cgi
print "Content-Type: text/plain\n\n"
def fOpen(filename):
buff = []
f = open(filename,"r")
for line in f:
buff.append(line)
f.close()
return ''.join(buff)
################################################################################
# Main
################################################################################
deTag = re.compile('<.*?>')
# 一時格納用のレコード配列
dataRows = []
# ファイル名
html =scraperwiki.scrape('http://line-friends.com/category/woman/page/')
# 必要部分を取得
posts = re.findall('<div class="postmetadata">.*?<p class="admin_del">', html, re.DOTALL)
for post in posts:
# 空白を除外
buff = re.sub('\t| ','',post)
# レコード格納用
dr= {}
# deTag.sub('置換文字列',検索対象文字列変数)
# deTag部分が正規表現
# 投稿年月日を抽出
dr["y"] = str(deTag.sub('',re.search('<spanclass="date-year">.*?</span>',buff).group()))
dr["m"] = str(deTag.sub('',re.search('<spanclass="date-month">.*?</span>',buff).group())).replace('月','')
dr["d"] = str(deTag.sub('',re.search('<spanclass="date-day">.*?</span>',buff).group()))
dr["t"] = str(deTag.sub('',re.search('<pclass="posted_time">.*?</p>',buff).group()))
# 投稿内容を抽出
dr["name"] = deTag.sub('',re.search('<pclass="poster_name">.*?</p>',buff).group())
dr["age"] = deTag.sub('',re.search('<pclass="poster_age">.*?</p>',buff).group())
dr["area"] = deTag.sub('',re.search('<pclass="poster_area">.*?</p>',buff).group())
work = re.search('<pclass="poster_line">.*?readonly/></p>',buff).group()
work = re.sub('.*"type="text"value="','',work)
dr["ID"] = re.sub('"readonly/></p>','',work)
dr["txt"] = deTag.sub('',re.search('<pclass="poster_txt"><p>.*?</p>',buff,re.DOTALL).group())
# 写真
result = re.search('<imgsrc="http://line-friends.com/uploads/.*?/>',buff)
if not result is None:
dr["img"] = re.sub('<imgsrc="|"/>','',result.group())
else:
dr["img"] = ""
dataRows.append(dr)
print dr["ID"]
import scraperwiki
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import glob
import datetime
import locale
import sqlite3
import sys
import traceback
import urllib
import cgi
print "Content-Type: text/plain\n\n"
def fOpen(filename):
buff = []
f = open(filename,"r")
for line in f:
buff.append(line)
f.close()
return ''.join(buff)
################################################################################
# Main
################################################################################
deTag = re.compile('<.*?>')
# 一時格納用のレコード配列
dataRows = []
# ファイル名
html =scraperwiki.scrape('http://line-friends.com/category/woman/page/')
# 必要部分を取得
posts = re.findall('<div class="postmetadata">.*?<p class="admin_del">', html, re.DOTALL)
for post in posts:
# 空白を除外
buff = re.sub('\t| ','',post)
# レコード格納用
dr= {}
# deTag.sub('置換文字列',検索対象文字列変数)
# deTag部分が正規表現
# 投稿年月日を抽出
dr["y"] = str(deTag.sub('',re.search('<spanclass="date-year">.*?</span>',buff).group()))
dr["m"] = str(deTag.sub('',re.search('<spanclass="date-month">.*?</span>',buff).group())).replace('月','')
dr["d"] = str(deTag.sub('',re.search('<spanclass="date-day">.*?</span>',buff).group()))
dr["t"] = str(deTag.sub('',re.search('<pclass="posted_time">.*?</p>',buff).group()))
# 投稿内容を抽出
dr["name"] = deTag.sub('',re.search('<pclass="poster_name">.*?</p>',buff).group())
dr["age"] = deTag.sub('',re.search('<pclass="poster_age">.*?</p>',buff).group())
dr["area"] = deTag.sub('',re.search('<pclass="poster_area">.*?</p>',buff).group())
work = re.search('<pclass="poster_line">.*?readonly/></p>',buff).group()
work = re.sub('.*"type="text"value="','',work)
dr["ID"] = re.sub('"readonly/></p>','',work)
dr["txt"] = deTag.sub('',re.search('<pclass="poster_txt"><p>.*?</p>',buff,re.DOTALL).group())
# 写真
result = re.search('<imgsrc="http://line-friends.com/uploads/.*?/>',buff)
if not result is None:
dr["img"] = re.sub('<imgsrc="|"/>','',result.group())
else:
dr["img"] = ""
dataRows.append(dr)
print dr["ID"]
import scraperwiki
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import glob
import datetime
import locale
import sqlite3
import sys
import traceback
import urllib
import cgi
print "Content-Type: text/plain\n\n"
def fOpen(filename):
buff = []
f = open(filename,"r")
for line in f:
buff.append(line)
f.close()
return ''.join(buff)
################################################################################
# Main
################################################################################
deTag = re.compile('<.*?>')
# 一時格納用のレコード配列
dataRows = []
# ファイル名
html =scraperwiki.scrape('http://line-friends.com/category/woman/page/')
# 必要部分を取得
posts = re.findall('<div class="postmetadata">.*?<p class="admin_del">', html, re.DOTALL)
for post in posts:
# 空白を除外
buff = re.sub('\t| ','',post)
# レコード格納用
dr= {}
# deTag.sub('置換文字列',検索対象文字列変数)
# deTag部分が正規表現
# 投稿年月日を抽出
dr["y"] = str(deTag.sub('',re.search('<spanclass="date-year">.*?</span>',buff).group()))
dr["m"] = str(deTag.sub('',re.search('<spanclass="date-month">.*?</span>',buff).group())).replace('月','')
dr["d"] = str(deTag.sub('',re.search('<spanclass="date-day">.*?</span>',buff).group()))
dr["t"] = str(deTag.sub('',re.search('<pclass="posted_time">.*?</p>',buff).group()))
# 投稿内容を抽出
dr["name"] = deTag.sub('',re.search('<pclass="poster_name">.*?</p>',buff).group())
dr["age"] = deTag.sub('',re.search('<pclass="poster_age">.*?</p>',buff).group())
dr["area"] = deTag.sub('',re.search('<pclass="poster_area">.*?</p>',buff).group())
work = re.search('<pclass="poster_line">.*?readonly/></p>',buff).group()
work = re.sub('.*"type="text"value="','',work)
dr["ID"] = re.sub('"readonly/></p>','',work)
dr["txt"] = deTag.sub('',re.search('<pclass="poster_txt"><p>.*?</p>',buff,re.DOTALL).group())
# 写真
result = re.search('<imgsrc="http://line-friends.com/uploads/.*?/>',buff)
if not result is None:
dr["img"] = re.sub('<imgsrc="|"/>','',result.group())
else:
dr["img"] = ""
dataRows.append(dr)
print dr["ID"]
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
16433d05f74a4f2012471c58b28b8c8e80c34dbd
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/src/transformers/models/poolformer/__init__.py
|
904dd02ac05522a70491dedf6e4862494d96cd6c
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...file_utils import _LazyModule, is_torch_available, is_vision_available
_import_structure = {
"configuration_poolformer": ["POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig"],
}
if is_vision_available():
_import_structure["feature_extraction_poolformer"] = ["PoolFormerFeatureExtractor"]
if is_torch_available():
_import_structure["modeling_poolformer"] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig
if is_vision_available():
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
if is_torch_available():
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
23297565cf5f63a35de1cce9992af035d53c2a35
|
21e177a4d828f4e0a003e9424c4952dbc0b47d29
|
/testlints/test_lint_ext_ian_space_dns_name.py
|
f149b7266693b0ec79697e0bc1d1d61d1bd555a3
|
[] |
no_license
|
846468230/Plint
|
1071277a55144bb3185347a58dd9787562fc0538
|
c7e7ca27e5d04bbaa4e7ad71d8e86ec5c9388987
|
refs/heads/master
| 2020-05-15T12:11:22.358000
| 2019-04-19T11:46:05
| 2019-04-19T11:46:05
| 182,255,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
import sys
sys.path.append("..")
from lints import base
from lints import lint_ext_ian_space_dns_name
import unittest
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
class TestIANEmptyDNS(unittest.TestCase):
'''test lint_ext_ian_space_dns_name.py'''
def test_IANEmptyDNS(self):
certPath ='..\\testCerts\\IANEmptyDNS.pem'
lint_ext_ian_space_dns_name.init()
with open(certPath, "rb") as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
out = base.Lints["e_ext_ian_space_dns_name"].Execute(cert)
self.assertEqual(base.LintStatus.Error,out.Status)
def test_IANNotEmptyDNS(self):
certPath ='..\\testCerts\\SANNoEntries.pem'
lint_ext_ian_space_dns_name.init()
with open(certPath, "rb") as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
out = base.Lints["e_ext_ian_space_dns_name"].Execute(cert)
self.assertEqual(base.LintStatus.Pass,out.Status)
if __name__=="__main__":
unittest.main(verbosity=2)
|
[
"846468230@qq.com"
] |
846468230@qq.com
|
f3a4f9d862a4e6d05ac0f4a9d2af4620e88d4183
|
a9cd70686c362d946f40ed4314f6cf871a0149aa
|
/appsflyer_processor.py
|
04ad2789d92c543bdc2c728fa6e1e744b0b7c473
|
[
"MIT"
] |
permissive
|
lxzero/bot_appsflyer
|
73ec3f33784f4fadd2d60416fddf28098a8dea26
|
e0e7c0439e7448e5645c262151c7d35fd7295886
|
refs/heads/main
| 2023-03-19T16:01:47.367603
| 2020-10-21T17:45:50
| 2020-10-21T17:45:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
import pandas as pd
import numpy as np
from datetime import date
from pathlib import Path
from typing import Optional, Dict
class AppsFlyerProcessor:
source_directory_path: Path
platform_directory_map: Dict[str, str]
processed_data: Optional[pd.DataFrame]=None
def __init__(self, source_directory_path: Path, platform_directory_map: Dict[str, str]):
self.source_directory_path = source_directory_path
self.platform_directory_map = platform_directory_map
def process(self):
processed_data = pd.DataFrame()
for platform, app_id in self.platform_directory_map.items():
files_path = self.source_directory_path / app_id
for path in files_path.glob('*.csv'):
file_name = path.absolute()
df = pd.read_csv(file_name)
day_list = [
x
for x in df.columns
if x not in ('Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id', 'Users',
'Cost', 'Average eCPI', 'Users')
]
df_final = pd.DataFrame()
for i in day_list:
event_day = i.split(' ')[-1]
if event_day == 'partial':
event_day = i.split(' ')[-3]
df_temp = df[['Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id']]
# Ensure Campaign Id can be read as a string
df_temp['Campaign Id'] = df_temp['Campaign Id'].astype(str)
df_temp['Campaign Id'] = '"' + df_temp['Campaign Id'] + '"'
df_temp['event_day'] = event_day
df_temp['cohort_revenue'] = df[[i]]
df_temp.cohort_revenue = df_temp.cohort_revenue.apply(lambda s: float(s.split('/')[0]) / float(s.split('/')[1]) if isinstance(s, str) and '/' in s else s)
df_temp['platform'] = platform
df_temp['install'] = df[['Users']]
df_final = df_temp.append(df_final, sort=True)
processed_data = processed_data.append(df_final, sort=True)
self.processed_data = processed_data
def process_old(self):
today = date.today()
file_name = input('Please enter file name: ')
platform = ''
if file_name.find('ios') != -1: platform = 'ios'
elif file_name.find('android') != -1: platform = 'android'
else: platform = 'error'
df = pd.read_csv('{}.csv'.format(file_name))
day_list = [x for x in df.columns if x not in ('Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id', 'Users',
'Cost', 'Average eCPI','Users')]
df_final = pd.DataFrame()
for i in day_list:
event_day = i.split(' ')[-1]
df_temp = df[['Cohort Day', 'Media Source', 'Ltv Country', 'Campaign Id']]
# Ensure Campaign Id can be read as a string
df_temp['Campaign Id'] = df_temp['Campaign Id'].astype(str)
df_temp['Campaign Id'] = '"' + df_temp['Campaign Id'] + '"'
df_temp['event_day'] = event_day
df_temp['cohort_revenue'] = df[[i]]
df_temp['platform'] = platform
df_temp['install'] = df[['Users']]
df_final = df_temp.append(df_final, sort = True)
df_final.to_csv('AF Total Revenue Data Lot - {}.csv'.format(today), index=False)
print('Exported CSV')
|
[
"leif@leifmeyer.io"
] |
leif@leifmeyer.io
|
0a82b035a5f5e69b90154c800f8c8daa9dde3af8
|
6371acdb640e62e4e6addac2ba1aa70002a8c1b1
|
/Algorithms/pySINDy/env/lib/python3.6/site-packages/matplotlib/backends/_gtk3_compat.py
|
e0ac33c8d3433fbe37af268ab3bbd11ec424236c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
M-Vause/SEED
|
263307152ebac1e4f49cd81dcd5207ecbdf51139
|
cda94a02a5ef47a1e9a885d330eef2821301ebed
|
refs/heads/master
| 2022-12-13T20:11:58.893994
| 2020-04-27T16:10:09
| 2020-04-27T16:10:09
| 252,790,026
| 3
| 3
|
MIT
| 2022-12-08T01:52:05
| 2020-04-03T16:55:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
"""
GObject compatibility loader; supports ``gi`` and ``pgi``.
The binding selection rules are as follows:
- if ``gi`` has already been imported, use it; else
- if ``pgi`` has already been imported, use it; else
- if ``gi`` can be imported, use it; else
- if ``pgi`` can be imported, use it; else
- error out.
Thus, to force usage of PGI when both bindings are installed, import it first.
"""
import importlib
import sys
if "gi" in sys.modules:
import gi
elif "pgi" in sys.modules:
import pgi as gi
else:
try:
import gi
except ImportError:
try:
import pgi as gi
except ImportError:
raise ImportError("The GTK3 backends require PyGObject or pgi")
from .backend_cairo import cairo # noqa
# The following combinations are allowed:
# gi + pycairo
# gi + cairocffi
# pgi + cairocffi
# (pgi doesn't work with pycairo)
# We always try to import cairocffi first so if a check below fails it means
# that cairocffi was unavailable to start with.
if gi.__name__ == "pgi" and cairo.__name__ == "cairo":
raise ImportError("pgi and pycairo are not compatible")
if gi.__name__ == "pgi" and gi.version_info < (0, 0, 11, 2):
raise ImportError("The GTK3 backends are incompatible with pgi<0.0.11.2")
gi.require_version("Gtk", "3.0")
globals().update(
{name:
importlib.import_module("{}.repository.{}".format(gi.__name__, name))
for name in ["GLib", "GObject", "Gtk", "Gdk"]})
|
[
"58262117+M-Vause@users.noreply.github.com"
] |
58262117+M-Vause@users.noreply.github.com
|
27b11c1cfa45069236e8505d414d2d41fd14cbba
|
00946ddaec6fc10781a5cd4c6242c4674e599c90
|
/TwoPointers/986. Interval List Intersections.py
|
cdbf19243dd41d55282757efc2ebb683634791d9
|
[] |
no_license
|
XihangJ/leetcode
|
618f15c1fb57a57499924145afaa93be0dfebc4c
|
f7d215ef4780d88b91d2478b75ae09aed0e257f1
|
refs/heads/main
| 2023-08-22T00:59:55.239744
| 2021-10-26T05:21:58
| 2021-10-26T05:21:58
| 375,885,476
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
'''
You are given two lists of closed intervals, firstList and secondList, where firstList[i] = [starti, endi] and secondList[j] = [startj, endj]. Each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
A closed interval [a, b] (with a <= b) denotes the set of real numbers x with a <= x <= b.
The intersection of two closed intervals is a set of real numbers that are either empty or represented as a closed interval. For example, the intersection of [1, 3] and [2, 4] is [2, 3].
'''
class Solution:
# method 1. 2 pointers. O(m + n), S(1)
def intervalIntersection(self, firstList: List[List[int]], secondList: List[List[int]]) -> List[List[int]]:
if not firstList or not secondList: return []
res = []
i1 = 0
i2 = 0
while i1 < len(firstList) and i2 < len(secondList):
first = firstList[i1]
second = secondList[i2]
left, right = max(first[0], second[0]), min(first[1], second[1])
if left <= right: res.append([left, right])
if first[1] < second[1]:
i1 += 1
elif first[1] > second[1]:
i2 += 1
else:
i1 += 1
i2 += 1
return res
|
[
"noreply@github.com"
] |
XihangJ.noreply@github.com
|
10cc915e429d025238de2714b821afd172faa197
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/ABC/A/065_a.py
|
9045ea98ef9fe34b57a5544b42250b279866012e
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """4 3 6"""
output = """safe"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """6 5 1"""
output = """delicious"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """3 7 12"""
output = """dangerous"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
def resolve():
x,a,b = map(int, input().split())
if b <= a:
print("delicious")
elif b <= (a+x):
print("safe")
else:
print("dangerous")
|
[
"kanai@wide.ad.jp"
] |
kanai@wide.ad.jp
|
849d6da1c93aa71164cd222145fb72163c6366c0
|
66358f0897dd92882344a9ec87adff2003c9bc76
|
/leetcode/501~600/501. Find Mode in Binary Search Tree.py
|
32f12787db36506d9437248100b324015ec7da2e
|
[] |
no_license
|
Parkyes90/algo
|
973c5f84ed1cae41bb963a5838b835473c8dc984
|
86490aad1774631ad947bdf12818e9ddba8a8ed0
|
refs/heads/master
| 2023-04-06T23:17:08.372040
| 2023-03-30T10:18:11
| 2023-03-30T10:18:11
| 244,273,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findMode(self, root: TreeNode) -> List[int]:
if not root:
return []
values = []
values_map = {}
nodes = [root]
ret = []
while nodes:
node = nodes.pop(0)
if node.left:
nodes.append(node.left)
if node.right:
nodes.append(node.right)
if isinstance(node.val, int):
values.append(node.val)
if not values:
return []
for value in values:
values_map[value] = values_map.get(value, 0) + 1
maximum = max(values_map.values())
for key, value in values_map.items():
if maximum == value:
ret.append(key)
return ret
if __name__ == "__main__":
r = TreeNode(0)
s = Solution()
answer = s.findMode(r)
print(answer)
|
[
"parkyes90@gmail.com"
] |
parkyes90@gmail.com
|
fb6afdf9dbb7cd9d0a6633a6e2296e70e406b1c3
|
bf63f844c9d3db9ae0293bc6762be53a6ca450b2
|
/helusers/jwt.py
|
735acf51caf9a653141543574b9fd416610362be
|
[
"BSD-2-Clause"
] |
permissive
|
tuomas777/django-helusers
|
93ab292b3b7a884b8ba04f9b24452ee3cc8342a7
|
77252693770410e40191f775462181cc7a3ec2bd
|
refs/heads/master
| 2020-04-10T05:30:45.465682
| 2018-09-14T09:43:06
| 2018-09-14T09:43:06
| 160,829,703
| 0
| 0
|
BSD-2-Clause
| 2018-12-07T13:50:08
| 2018-12-07T13:50:07
| null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from django.conf import settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from .user_utils import get_or_create_user
def patch_jwt_settings():
"""Patch rest_framework_jwt authentication settings from allauth"""
defaults = api_settings.defaults
defaults['JWT_PAYLOAD_GET_USER_ID_HANDLER'] = (
__name__ + '.get_user_id_from_payload_handler')
if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
return
from allauth.socialaccount.models import SocialApp
try:
app = SocialApp.objects.get(provider='helsinki')
except SocialApp.DoesNotExist:
return
defaults['JWT_SECRET_KEY'] = app.secret
defaults['JWT_AUDIENCE'] = app.client_id
# Disable automatic settings patching for now because it breaks Travis.
# patch_jwt_settings()
class JWTAuthentication(JSONWebTokenAuthentication):
def authenticate_credentials(self, payload):
return get_or_create_user(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
[
"juha.yrjola@iki.fi"
] |
juha.yrjola@iki.fi
|
58129f886c88589305db31e038960283db42c122
|
72b74f66f83239a928bf049c0dd6e47576e57bae
|
/tensorflow/tensorflow-master/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
|
00372831df6122c142850ae5ed87cbe3eb8baf5c
|
[
"Apache-2.0"
] |
permissive
|
InsaneLife/DeepLearning
|
7934056682e4fec7f3241dd2d4fbe1b4c5f192d2
|
4b60fe40587b96ba2a351c1b3cb832d03c2071ab
|
refs/heads/master
| 2022-10-08T08:18:19.633449
| 2017-08-30T10:47:05
| 2017-08-30T10:47:05
| 65,697,666
| 2
| 4
| null | 2022-09-30T21:55:05
| 2016-08-15T02:16:34
|
C++
|
UTF-8
|
Python
| false
| false
| 65,141
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util import nest
def check_op_order(graph):
"""Sanity check on the ordering of op id."""
for op in graph.get_operations():
for v in op.inputs:
assert v.op._id < op._id or op.type == "Merge", (
"The id of %s must be less than the id of %s" % (v.op.name, op.name))
return True
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def isum(s):
i = tf.constant(0, name="i")
c = lambda i, s: tf.less(i, 10)
b = lambda i, s: [tf.add(i, 1), tf.add(i, s)]
_, r_s = tf.while_loop(c, b, [i, s])
return r_s
class ControlFlowTest(tf.test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = tf.Variable(7)
v = control_flow_ops._Identity(v)
op = tf.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(check_op_order(v.graph))
self.assertTrue(isinstance(v2, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = tf.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = tf.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
op = tf.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
tf.initialize_all_variables().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = tf.Variable(7)
p = tf.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v.ref(), p)
v2 = tf.assign(v1[1], 9)
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
five = tf.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
mul_op = tf.mul(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = tf.constant([1, 2, 3, 4, 5, 6])
indices = tf.constant([0, 2, 4, 6, 8, 10])
data = tf.IndexedSlices(values, indices)
pred = tf.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = tf.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "The tensor returned for" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
zero = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
less_op = tf.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
id_op = tf.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
five = tf.constant(5)
mul_op = tf.mul(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = tf.convert_to_tensor(False)
n = tf.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = tf.constant(0)
one = tf.constant(1)
n = tf.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
with tf.device("/gpu:0"):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = tf.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = tf.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = tf.constant(0)
one = tf.constant(1)
n = tf.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = tf.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = tf.add(switch_i[1], enter_one)
with tf.device("/gpu:0"):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testCondBool(self):
values = tf.constant(10)
fn1 = lambda: tf.add(values, 1)
fn2 = lambda: tf.sub(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = tf.cond(False, fn1, fn2)
def testCondIndexedSlices(self):
with self.test_session():
values = tf.constant(10)
indices = tf.constant(0)
x = tf.IndexedSlices(values, indices)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = tf.constant(10)
i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
x = tf.IndexedSlices(values, i_32)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with tf.device("/cpu:0"):
v = tf.Variable(7.0)
x = tf.constant(10.0)
pred = tf.less(1.0, 2.0)
fn1 = lambda: tf.add(v, 1.0)
fn2 = lambda: tf.sub(x, 1.0)
r = tf.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
r = tf.cond(pred, fn1, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = tf.constant(10)
r = tf.cond(tf.less(1, 0), lambda: tf.add(x, 1), lambda: tf.sub(x, 1))
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
fn3 = lambda: tf.add(tf.cond(pred, fn1, fn2), 1)
r = tf.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = tf.Variable(7)
v2 = tf.Variable(7)
v3 = tf.Variable(7)
age = tf.constant(3)
max_age = tf.constant(2)
pred = tf.greater(age, max_age)
fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op]
fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op]
r = tf.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertTrue(check_op_order(age.graph))
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = tf.constant(True, name="alive")
count = tf.constant(0, name="count")
def body(i):
return tf.cond(
alive, lambda: [tf.less(i, 3), tf.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = tf.Variable([7])
age = tf.constant(3)
pred = tf.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = tf.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = tf.constant(10)
y = tf.constant(200)
pred = tf.less(1, 2)
fn1 = lambda: [tf.add(x, 1), tf.add(x, 2)]
fn2 = lambda: [y, y]
r = tf.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
with self.test_session():
x = state_ops.variable_op([1], tf.float32)
true_fn = lambda: x
false_fn = lambda: tf.constant([2.0])
r = tf.cond(tf.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
v = state_ops.variable_op([1], tf.float32)
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that _ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops._ref_identity(v_f)
v_t_op = gen_array_ops._ref_identity(v_t)
with tf.control_dependencies([v_f_op]):
assign_v = tf.assign(v, [1.0])
with tf.control_dependencies([v_t_op]):
orig_v = tf.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondGrad_1(self):
with self.test_session():
x = tf.constant(10.0, name="x")
pred = tf.less(1, 2)
fn1 = lambda: tf.identity(x)
fn2 = lambda: tf.identity(x)
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
x = tf.constant(10.0)
pred = tf.less(c, 2)
fn1 = lambda: tf.mul(x, 42.0)
fn2 = lambda: tf.mul(x, 3.0)
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
x = tf.constant(0., name="X")
y = tf.cond(tf.constant(True),
lambda: x,
lambda: tf.cond(x < 1., lambda: x, lambda: x))
result = tf.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = tf.cond(tf.constant(False),
lambda: x,
lambda: tf.cond(x < 1., lambda: x, lambda: x))
result = tf.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = tf.Variable([1.0, 42.0])
c = tf.placeholder(tf.int32, shape=[])
pred = tf.less(c, 2)
fn1 = lambda: tf.identity(v1)
fn2 = lambda: tf.gather(v1, [1, 1])
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [v1])[0]
tf.initialize_all_variables().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 10,000 iterations took 0.21s.
def testWhile_1(self):
with self.test_session():
n = tf.constant(0)
c = lambda x: tf.less(x, 10000)
b = lambda x: tf.add(x, 1)
r = tf.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = tf.Variable(0).ref()
i = tf.constant(0)
c = lambda i, x: tf.less(i, 100)
self.assertEqual(x.dtype, tf.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, tf.int32_ref)
return (i+1, gen_array_ops._ref_identity(x))
r = tf.while_loop(c, b, [i, x], parallel_iterations=5)
tf.initialize_all_variables().run()
self.assertEqual(r[0].dtype, tf.int32)
self.assertEqual(r[1].dtype, tf.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = tf.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.add(m, 1), tf.add(c, 1)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
d = tf.convert_to_tensor(100)
r = tf.while_loop(
lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.gather(x, i), tf.gather(x, i)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = tf.while_loop(
lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = tf.slice(x, tf.expand_dims(i, 0), [1])
o = tf.concat(0, [o, c])
i = tf.add(i, 1)
return [i, c, o]
i = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor([0])
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = tf.while_loop(
lambda i, c, o: tf.less(i, s), compute, [i, c, o])
result = r[2].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
b = lambda x: tf.add(x, 1.0)
r = tf.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
def b(x):
with tf.device("/cpu:0"):
return tf.add(x, 1.0)
r = tf.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.test_session():
i = tf.constant(0)
m = tf.ones([2, 2])
c = lambda i, j: tf.less(i, 2)
def _b(i, j):
new_i = tf.add(i, 1)
new_j = tf.tile(j, [2, 2])
return [new_i, new_j]
r = tf.while_loop(c, _b, [i, m])
r = r[1] * tf.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
with self.test_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = tf.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.test_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: tf.pack([x[0] + 1])
r = tf.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.test_session():
i = tf.constant(0)
m = tf.ones([2, 2])
c = lambda i, j: tf.less(i, 2)
def _b(i, j):
new_i = tf.add(i, 1)
new_j = tf.concat(0, [j, j])
return [new_i, new_j]
r = tf.while_loop(c, _b, [i, m])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tf.Dimension(2))
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(0)
def cpu_sum(s):
c = lambda i, s: tf.less(i, 10)
def b(i, s):
i1 = tf.add(i, 1)
with tf.device("/cpu:0"):
s1 = tf.add(i, s)
return i1, s1
_, r_s = tf.while_loop(c, b, [n, s])
return r_s
c = lambda x: tf.less(x, 200)
b = lambda x: tf.add(x, cpu_sum(n))
r = tf.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = tf.constant(0)
r = tf.constant(0)
condition = lambda n_, r_: tf.less(n_, 10)
def body(n_, r_):
n_ = tf.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [n_, r_]
res = tf.while_loop(condition, body, [n, r],
parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = tf.constant(0)
condition = lambda r_: tf.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [r_]
res = tf.while_loop(condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
b = tf.placeholder(tf.bool)
c = tf.constant(0)
with tf.control_dependencies([b]):
c = tf.while_loop(lambda x: x < 10, lambda x: x + 1, [c])
self.assertEqual(10, sess.run(c, {b: True}))
def testWhileWithControl_4(self):
with self.test_session() as sess:
b = tf.placeholder(tf.bool)
c = tf.constant(1)
x0 = tf.constant(0)
with tf.control_dependencies([b]):
r = tf.while_loop(lambda x: x < 10, lambda x: x + tf.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testCondWhile_1(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = tf.cond(tf.less(0, 1),
lambda: tf.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = tf.cond(tf.less(1, 0), lambda: tf.add(n, 1),
lambda: tf.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def testWhileCond_1(self):
with self.test_session():
i = tf.convert_to_tensor(0, name="i")
n = tf.convert_to_tensor(10, name="n")
one = tf.convert_to_tensor(1, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: tf.cond(
tf.constant(True), lambda: tf.add(x, one), lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.cond(tf.constant(True), lambda: tf.add(x, 1), lambda: n)
r = tf.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: tf.cond(tf.less(0, 1), lambda: tf.add(x, 1),
lambda: tf.sub(x, 1))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = tf.while_loop(loop_iterator, loop_body, [n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = tf.Variable([3.0, 4.0, 5.0])
select2 = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns1 = tf.scatter_update(select1, j, 10.0)
ns2 = tf.scatter_update(select2, j, 10.0)
nj = tf.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = tf.while_loop(loop_iterator, loop_body, [n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j, _):
return tf.less(j, 3)
def loop_body(j, _):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
return [nj, ns]
r = tf.while_loop(loop_iterator, loop_body,
[n, tf.identity(select)],
parallel_iterations=1)
tf.initialize_all_variables().run()
result = r[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
c = tf.constant(0, name="c")
asn1 = tf.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn2 = tf.assign_add(var_b, asn1, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = tf.while_loop(pred, loop_body, [c],
parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
# Change condition to check var_b
def pred(_):
return tf.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add")
asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add")
with tf.control_dependencies([asn1, asn2]):
inc_b = tf.identity(var_b)
return inc_b
lpa = tf.while_loop(pred, loop_body, [var_b], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
c = tf.constant(0)
tf.initialize_all_variables().run()
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn1 = tf.assign_add(var_a, 1, name="a_add")
with tf.control_dependencies([asn1]):
asn2 = tf.assign_add(var_b, var_a, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = tf.while_loop(pred, loop_body, [c], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = tf.FIFOQueue(-1, tf.int32)
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = tf.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops._stack(tf.int32, stack_name="foo")
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops._stack_push(s, i)], ni)
return ni
r = tf.while_loop(c, b, [i], parallel_iterations=1)
x = tf.constant(0)
def c1(i, _):
return tf.greater(i, 0)
def b1(i, x):
ni = tf.sub(i, 1)
nx = x + gen_data_flow_ops._stack_pop(s, tf.int32)
return [ni, nx]
_, rx = tf.while_loop(c1, b1, [r, x], parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
with self.test_session(graph=tf.Graph()) as sess:
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
def b(x):
with tf.device("/gpu:0"):
return tf.square(x)
loop = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.gradients(loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = r.graph.get_operations()
r_devices = [(op.name, op.device.lower()) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue("gpu:0" in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue("gpu:0" in dev)
else:
self.assertFalse("gpu:0" in dev)
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(tf.less(1, 2), lambda: r, lambda: v)
r = tf.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = tf.placeholder(tf.float32, shape=[None])
v = tf.constant([2.0], name="v")
n = tf.constant(0, name="n")
c = lambda i, v: tf.less(i, 5)
b = lambda i, v: [i + 1, tf.mul(x, v)]
r = tf.while_loop(c, b, [n, v], parallel_iterations=1)
r = tf.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.test_session() as sess:
x = tf.placeholder(tf.float32, [None])
v0 = tf.constant([2.0, 2.0], name="v")
c = lambda v: tf.constant(False)
b = lambda v: tf.mul(v, x)
r = tf.while_loop(c, b, [v0])
y = tf.square(x)
r = tf.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.mul(r, r)
r = tf.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.add(r, r)
r = tf.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = tf.constant(3.0, name="a")
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = tf.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = tf.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGrad_Variable(self):
with self.test_session():
a = tf.Variable(3.0)
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, a)
tf.initialize_all_variables().run()
self.assertAllClose(216.0, r[0].eval())
def testWhile_NestedInput(self):
with self.test_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [named(a=tf.constant(0.0), b=tf.constant(1.0)),
(tf.constant(2.0), tf.constant(3.0)),
tf.constant(4.0)]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = tf.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], tf.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual(
[100.0, 1.0, 102.0, 3.0, 4.0 + 100*2.0],
sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.test_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [named(a=tf.constant(0.0), b=tf.constant(1.0)),
(tf.constant(2.0), tf.constant(3.0)),
tf.constant(4.0)]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
tf.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.test_session():
x = tf.constant(3.0, name="x")
y = tf.constant(2.0, name="y")
c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
y1 = tf.add(x, y)
x1 = tf.mul(x, y1)
return x1, y1
rx, ry = tf.while_loop(c, b, [x, y], parallel_iterations=1)
r = tf.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = tf.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = tf.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = tf.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 10)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
ri, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1)
r = tf.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = tf.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], back_prop=False)
r = tf.add(r, v)
r = tf.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
variable = tf.Variable(tf.ones([2, 3]))
time = tf.zeros([], dtype=tf.int32)
def cond(time, tensor, _):
return time < 10
def body(time, tensor, _):
return (time+1, tensor, tensor)
loop_vars = [time, variable, variable]
tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars)
cost = tf.reduce_sum(tensors[2])
grad = tf.gradients(cost, [variable])
tf.initialize_all_variables().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
c0 = tf.constant(0.0, name="c0")
c1 = tf.constant(1.0, name="c1")
time = tf.constant(0, name="t")
def cond(time, _):
return time < 1
def body(time, tensor):
return time+1, c1
loop_vars = [time, c0]
tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars)
cost = tf.reduce_sum(tensors[1])
grad = tf.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 5)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
_, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = tf.while_loop(c, b, [i, rx], parallel_iterations=1)
r = tf.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 5)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
_, r1 = tf.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = tf.while_loop(c, b, [i, x], parallel_iterations=1)
rx = tf.add(r1, r2)
r = tf.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = tf.constant(1.0)
def inner_loop(s):
c = lambda x: tf.less(x, 4.0)
b = lambda x: tf.mul(x, 2.0)
return tf.while_loop(c, b, [s])
c = lambda x: tf.less(x, 2.0)
b = lambda x: tf.mul(inner_loop(x), 2.0)
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = tf.constant(1.0)
def inner_loop1(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
def inner_loop2(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
c = lambda x: tf.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = tf.constant(1.0)
def inner_loop1(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
def inner_loop2(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
c = lambda x: tf.less(x, 128.0)
b = lambda x: tf.mul(inner_loop1(x)[1], inner_loop2(x)[1])
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = tf.convert_to_tensor(2.0, name="v")
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = tf.placeholder(tf.float32)
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = tf.Variable(0).ref()
i = tf.constant(0)
c = lambda i, x: tf.less(i, 10)
self.assertEqual(x.dtype, tf.int32_ref)
# pylint: disable=protected-access
def body(i, x):
self.assertEqual(x.dtype, tf.int32_ref)
return [i+1, gen_array_ops._ref_identity(x)]
# pylint: enable=protected-access
r = tf.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [tf.Variable(73).ref()]
grad = tf.gradients([r[1]], [x], grad_ys=grad_ys)
tf.initialize_all_variables().run()
self.assertEqual(r[0].dtype, tf.int32)
self.assertEqual(r[1].dtype, tf.int32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.test_session():
values = tf.constant([2.0, 4.0], name="values")
indices = tf.constant([0, 3], name="indices")
shape = tf.constant([10], name="dense_shape")
i = tf.constant(0)
x = tf.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [i + 1, tf.IndexedSlices(x.values * 2.0, x.indices,
x.dense_shape)]
_, r = tf.while_loop(c, b, [i, x])
r = tf.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
values = tf.constant([2.0, 4.0], name="values")
indices = tf.constant([[0], [3]], dtype=tf.int64, name="indices")
shape = tf.constant([10], dtype=tf.int64, name="dense_shape")
i = tf.constant(0)
x = tf.SparseTensor(indices, values, shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [i + 1, tf.SparseTensor(x.indices, x.values * 2.0,
x.shape)]
_, r = tf.while_loop(c, b, [i, x])
r = tf.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
i0 = tf.constant(0)
params = tf.constant(5.0)
params_1 = tf.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = tf.constant([1.0, 2.0, 3.0])
data = tf.mul(data, params_1)
x1 = x + tf.gradients(data, params)[0]
return i + 1, x1
output_grad = tf.while_loop(c, b, [i0, tf.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileGrad_StopGrad(self):
with self.test_session():
x = tf.constant(3.0, name="x")
y = tf.constant(2.0, name="y")
c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
y1 = tf.stop_gradient(tf.square(y))
x1 = tf.add(tf.square(x), y1)
return x1, y1
rx, _ = tf.while_loop(c, b, [x, y])
r = tf.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = tf.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGradGrad(self):
theta = tf.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = tf.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = tf.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
tf.gradients(grad_theta, theta)
def testOneValueCond(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
one = tf.convert_to_tensor(1, name="one")
two = tf.convert_to_tensor(2, name="two")
p = tf.greater_equal(c, 1)
i = tf.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, tf.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.test_session():
x = tf.convert_to_tensor([-2.0, 2.0], name="x")
d = tf.placeholder(tf.int32, shape=[])
def l2():
return tf.sqrt(tf.reduce_sum(tf.square(x)))
def l1():
return tf.reduce_sum(tf.abs(x))
i = tf.cond(tf.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
x = tf.constant(1)
y = tf.constant(2)
z = tf.constant(3)
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
f3 = lambda: tf.constant(-1)
r1 = tf.case({x < y: f1, x > z: f2}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = tf.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = tf.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = tf.case([(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError(
"More than one condition evaluated as True but exclusive=True."):
r4.eval()
# Check that the default is called if none of the others are
r5 = tf.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return tf.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = tf.case([(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: tf.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
with self.test_session() as sess:
v0 = tf.Variable(-1)
v1 = tf.Variable(-1)
v2 = tf.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([tf.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([tf.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([tf.assign(v2, 2)], 2)
x = tf.constant(1)
y = tf.constant(2)
r0 = tf.case(((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = tf.case(((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = tf.case(((x > y, a), (x > y, b)), default=c, exclusive=True)
tf.initialize_all_variables().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
tf.initialize_all_variables().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
tf.initialize_all_variables().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
with self.test_session():
v = tf.Variable(0)
c = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
two = tf.convert_to_tensor(2)
p = tf.greater_equal(c, 1)
def a():
return tf.assign(v, one)
def b():
return tf.assign(v, two)
i = tf.cond(p, a, b)
self.assertTrue(isinstance(i, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = tf.Variable(0.0)
c = tf.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v.ref(),
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = tf.Variable(0.0)
c1 = tf.constant(10)
c2 = tf.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v",
output_tensor=c1,
dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = tf.IndexedSlices(v, tf.constant([1]))
gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = tf.gather(
v_at_1_after_init.values, v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with tf.Graph().as_default():
# device set on tensor => same device on dep.
with tf.device("/job:ps"):
vd = tf.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = tf.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = tf.Variable([0.0], name="vdef")
with tf.device("/job:worker/gpu:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"],
with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
v1 = tf.Variable([0.0])
v2 = tf.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = tf.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
p3 = tf.placeholder(tf.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[None, 2])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[None, None])
p2 = tf.placeholder(tf.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = tf.placeholder(tf.int32)
# All inputs unknown.
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
p3 = tf.placeholder(tf.float32)
v1 = tf.Variable(p1, validate_shape=False)
v2 = tf.Variable(p2, validate_shape=False)
v3 = tf.Variable(p3, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = tf.Variable([[1, 2]])
v2 = tf.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = tf.Variable([[1, 2]])
v2 = tf.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = tf.Variable([[1., 2.]])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
v2 = tf.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
def condition(t):
return t < tf.constant(5)
def body(_):
tensor_list.append(tf.constant(5))
return tf.constant(10)
result = tf.while_loop(condition, body, [tf.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
class TupleTest(tf.test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable([1.0])
add1 = tf.add(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
2.0)
v2 = tf.Variable([10.0])
add2 = tf.add(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
tf.constant([1]))
v2 = tf.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
tf.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = tf.gather(st1.values, st1.indices)
g2 = tf.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = tf.Variable(0)
assign = tf.assign(var, 1)
t, = tf.tuple([tf.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.test_session():
r = tf.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, tf.py_func(func, [v], [tf.float32])[0]],
[tf.constant(0), tf.constant(2.0, tf.float32)])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(tf.float32)
def func(x):
return tf.square(tf.square(x))
with self.test_session():
x = tf.constant(2.0, tf.float32)
r = tf.while_loop(
lambda i, v: i < 2,
lambda i, v: [i + 1, func(v)],
[tf.constant(0), x])
self.assertEqual(r[1].eval(), 65536.0)
r = tf.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(len([op for op in x.graph.get_operations()
if op.type == "Stack"]),
1)
if __name__ == "__main__":
tf.test.main()
|
[
"993001803@qq.com"
] |
993001803@qq.com
|
0315d6b622ee9399845ac3c750df71dabf3c92b2
|
2967a6fa8065ecb68683b0499f66f65b9ab646c1
|
/Wbudowane_struktury_danych/9_named_tuple/zadanie/main.py
|
802648494ba1c52c2fcb15f4d95f5733ca93eeac
|
[] |
no_license
|
keinam53/Python_Poczatek
|
f285836a4aa7d261f25bcc4add253e894c30e65e
|
ccb05f3918cc94c925055c78627cba28482ce5bb
|
refs/heads/master
| 2023-05-01T06:41:56.414754
| 2021-05-13T19:30:30
| 2021-05-13T19:30:30
| 363,479,245
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from collections import namedtuple
Apple = namedtuple("Apple", ["species_name", "size", "price"])
def run():
apple = Apple("Gala", "M", 2.5)
print(apple.species_name)
print(apple.size)
print(apple.price)
print(apple[0])
print(apple[1])
print(apple[2])
for data in apple:
print(data)
if __name__ == '__main__':
run()
|
[
"mariusz.baran536@gmail.com"
] |
mariusz.baran536@gmail.com
|
c36b683cc1e58e829f9f297098569c90edd4c7e4
|
ffa8a728f43b6de2b9a4dbfda18f3eb8518fbbbd
|
/snmp-mibs/DISMAN-EXPRESSION-MIB.py
|
1ffaaebeb4ca69378f03f4197870dfb4089a8e77
|
[] |
no_license
|
oriordan/pysnmp_mibs
|
60e0d80e3f50490d9e6ab29d21627fec59ab0cfc
|
92d39abf358a952e55a426e2a4658f4b0824182f
|
refs/heads/master
| 2021-01-09T23:37:59.137750
| 2014-11-26T20:07:28
| 2014-11-26T20:07:28
| 20,253,987
| 11
| 15
| null | 2020-07-26T02:49:32
| 2014-05-28T10:43:18
|
Python
|
UTF-8
|
Python
| false
| false
| 43,500
|
py
|
# PySNMP SMI module. Autogenerated from smidump -f python DISMAN-EXPRESSION-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:35 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( SnmpAdminString, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( sysUpTime, ) = mibBuilder.importSymbols("SNMPv2-MIB", "sysUpTime")
( Bits, Counter32, Counter64, Gauge32, Integer32, Integer32, IpAddress, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, TimeTicks, Unsigned32, mib_2, zeroDotZero, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "Counter64", "Gauge32", "Integer32", "Integer32", "IpAddress", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "TimeTicks", "Unsigned32", "mib-2", "zeroDotZero")
( RowStatus, TimeStamp, TruthValue, ) = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TimeStamp", "TruthValue")
# Objects
sysUpTimeInstance = MibIdentifier((1, 3, 6, 1, 2, 1, 1, 3, 0))
dismanExpressionMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 90)).setRevisions(("2000-10-16 00:00",))
if mibBuilder.loadTexts: dismanExpressionMIB.setOrganization("IETF Distributed Management Working Group")
if mibBuilder.loadTexts: dismanExpressionMIB.setContactInfo("Ramanathan Kavasseri\nCisco Systems, Inc.\n170 West Tasman Drive,\nSan Jose CA 95134-1706.\nPhone: +1 408 527 2446\nEmail: ramk@cisco.com")
if mibBuilder.loadTexts: dismanExpressionMIB.setDescription("The MIB module for defining expressions of MIB objects for\nmanagement purposes.")
dismanExpressionMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 1))
expResource = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 1, 1))
expResourceDeltaMinimum = MibScalar((1, 3, 6, 1, 2, 1, 90, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(-1,-1),ValueRangeConstraint(1,600),))).setMaxAccess("readwrite").setUnits("seconds")
if mibBuilder.loadTexts: expResourceDeltaMinimum.setDescription("The minimum expExpressionDeltaInterval this system will\naccept. A system may use the larger values of this minimum to\nlessen the impact of constantly computing deltas. For larger\ndelta sampling intervals the system samples less often and\nsuffers less overhead. This object provides a way to enforce\nsuch lower overhead for all expressions created after it is\nset.\n\nThe value -1 indicates that expResourceDeltaMinimum is\nirrelevant as the system will not accept 'deltaValue' as a\nvalue for expObjectSampleType.\n\nUnless explicitly resource limited, a system's value for\nthis object should be 1, allowing as small as a 1 second\ninterval for ongoing delta sampling.\n\nChanging this value will not invalidate an existing setting\nof expObjectSampleType.")
expResourceDeltaWildcardInstanceMaximum = MibScalar((1, 3, 6, 1, 2, 1, 90, 1, 1, 2), Unsigned32()).setMaxAccess("readwrite").setUnits("instances")
if mibBuilder.loadTexts: expResourceDeltaWildcardInstanceMaximum.setDescription("For every instance of a deltaValue object, one dynamic instance\nentry is needed for holding the instance value from the previous\nsample, i.e. to maintain state.\n\nThis object limits maximum number of dynamic instance entries\nthis system will support for wildcarded delta objects in\nexpressions. For a given delta expression, the number of\ndynamic instances is the number of values that meet all criteria\nto exist times the number of delta values in the expression.\n\nA value of 0 indicates no preset limit, that is, the limit\nis dynamic based on system operation and resources.\n\nUnless explicitly resource limited, a system's value for\nthis object should be 0.\n\n\n\nChanging this value will not eliminate or inhibit existing delta\nwildcard instance objects but will prevent the creation of more\nsuch objects.\n\nAn attempt to allocate beyond the limit results in expErrorCode\nbeing tooManyWildcardValues for that evaluation attempt.")
expResourceDeltaWildcardInstances = MibScalar((1, 3, 6, 1, 2, 1, 90, 1, 1, 3), Gauge32()).setMaxAccess("readonly").setUnits("instances")
if mibBuilder.loadTexts: expResourceDeltaWildcardInstances.setDescription("The number of currently active instance entries as\ndefined for expResourceDeltaWildcardInstanceMaximum.")
expResourceDeltaWildcardInstancesHigh = MibScalar((1, 3, 6, 1, 2, 1, 90, 1, 1, 4), Gauge32()).setMaxAccess("readonly").setUnits("instances")
if mibBuilder.loadTexts: expResourceDeltaWildcardInstancesHigh.setDescription("The highest value of expResourceDeltaWildcardInstances\nthat has occurred since initialization of the managed\nsystem.")
expResourceDeltaWildcardInstanceResourceLacks = MibScalar((1, 3, 6, 1, 2, 1, 90, 1, 1, 5), Counter32()).setMaxAccess("readonly").setUnits("instances")
if mibBuilder.loadTexts: expResourceDeltaWildcardInstanceResourceLacks.setDescription("The number of times this system could not evaluate an\nexpression because that would have created a value instance in\nexcess of expResourceDeltaWildcardInstanceMaximum.")
expDefine = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 1, 2))
expExpressionTable = MibTable((1, 3, 6, 1, 2, 1, 90, 1, 2, 1))
if mibBuilder.loadTexts: expExpressionTable.setDescription("A table of expression definitions.")
expExpressionEntry = MibTableRow((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1)).setIndexNames((0, "DISMAN-EXPRESSION-MIB", "expExpressionOwner"), (0, "DISMAN-EXPRESSION-MIB", "expExpressionName"))
if mibBuilder.loadTexts: expExpressionEntry.setDescription("Information about a single expression. New expressions\ncan be created using expExpressionRowStatus.\n\nTo create an expression first create the named entry in this\ntable. Then use expExpressionName to populate expObjectTable.\nFor expression evaluation to succeed all related entries in\nexpExpressionTable and expObjectTable must be 'active'. If\nthese conditions are not met the corresponding values in\nexpValue simply are not instantiated.\n\nDeleting an entry deletes all related entries in expObjectTable\nand expErrorTable.\n\nBecause of the relationships among the multiple tables for an\nexpression (expExpressionTable, expObjectTable, and\nexpValueTable) and the SNMP rules for independence in setting\nobject values, it is necessary to do final error checking when\nan expression is evaluated, that is, when one of its instances\nin expValueTable is read or a delta interval expires. Earlier\nchecking need not be done and an implementation may not impose\nany ordering on the creation of objects related to an\nexpression.\n\nTo maintain security of MIB information, when creating a new row in\nthis table, the managed system must record the security credentials\nof the requester. These security credentials are the parameters\nnecessary as inputs to isAccessAllowed from the Architecture for\n\nDescribing SNMP Management Frameworks. When obtaining the objects\nthat make up the expression, the system must (conceptually) use\nisAccessAllowed to ensure that it does not violate security.\n\nThe evaluation of the expression takes place under the\nsecurity credentials of the creator of its expExpressionEntry.\n\nValues of read-write objects in this table may be changed\n\n\nat any time.")
expExpressionOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: expExpressionOwner.setDescription("The owner of this entry. The exact semantics of this\nstring are subject to the security policy defined by the\nsecurity administrator.")
expExpressionName = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: expExpressionName.setDescription("The name of the expression. This is locally unique, within\nthe scope of an expExpressionOwner.")
expExpression = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1024))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expExpression.setDescription("The expression to be evaluated. This object is the same\nas a DisplayString (RFC 1903) except for its maximum length.\n\nExcept for the variable names the expression is in ANSI C\nsyntax. Only the subset of ANSI C operators and functions\nlisted here is allowed.\n\nVariables are expressed as a dollar sign ('$') and an\n\n\ninteger that corresponds to an expObjectIndex. An\nexample of a valid expression is:\n\n ($1-$5)*100\n\nExpressions must not be recursive, that is although an expression\nmay use the results of another expression, it must not contain\nany variable that is directly or indirectly a result of its own\nevaluation. The managed system must check for recursive\nexpressions.\n\nThe only allowed operators are:\n\n ( )\n - (unary)\n + - * / %\n & | ^ << >> ~\n ! && || == != > >= < <=\n\nNote the parentheses are included for parenthesizing the\nexpression, not for casting data types.\n\nThe only constant types defined are:\n\n int (32-bit signed)\n long (64-bit signed)\n unsigned int\n unsigned long\n hexadecimal\n character\n string\n oid\n\nThe default type for a positive integer is int unless it is too\nlarge in which case it is long.\n\nAll but oid are as defined for ANSI C. Note that a\nhexadecimal constant may end up as a scalar or an array of\n8-bit integers. A string constant is enclosed in double\nquotes and may contain back-slashed individual characters\nas in ANSI C.\n\nAn oid constant comprises 32-bit, unsigned integers and at\nleast one period, for example:\n\n 0.\n .0\n 1.3.6.1\n\n\nNo additional leading or trailing subidentifiers are automatically\nadded to an OID constant. The constant is taken as expressed.\n\nInteger-typed objects are treated as 32- or 64-bit, signed\nor unsigned integers, as appropriate. The results of\nmixing them are as for ANSI C, including the type of the\nresult. Note that a 32-bit value is thus promoted to 64 bits\nonly in an operation with a 64-bit value. There is no\nprovision for larger values to handle overflow.\n\nRelative to SNMP data types, a resulting value becomes\nunsigned when calculating it uses any unsigned value,\nincluding a counter. To force the final value to be of\ndata type counter the expression must explicitly use the\ncounter32() or counter64() function (defined below).\n\nOCTET STRINGS and OBJECT IDENTIFIERs are treated as\none-dimensioned arrays of unsigned 8-bit integers and\nunsigned 32-bit integers, respectively.\n\nIpAddresses are treated as 32-bit, unsigned integers in\nnetwork byte order, that is, the hex version of 255.0.0.0 is\n0xff000000.\n\nConditional expressions result in a 32-bit, unsigned integer\nof value 0 for false or 1 for true. When an arbitrary value\nis used as a boolean 0 is false and non-zero is true.\n\nRules for the resulting data type from an operation, based on\nthe operator:\n\nFor << and >> the result is the same as the left hand operand.\n\nFor &&, ||, ==, !=, <, <=, >, and >= the result is always\nUnsigned32.\n\nFor unary - the result is always Integer32.\n\nFor +, -, *, /, %, &, |, and ^ the result is promoted according\nto the following rules, in order from most to least preferred:\n\n If left hand and right hand operands are the same type,\n use that.\n\n If either side is Counter64, use that.\n\n If either side is IpAddress, use that.\n\n\n\n If either side is TimeTicks, use that.\n\n If either side is Counter32, use that.\n\n Otherwise use Unsigned32.\n\nThe following rules say what operators apply with what data\ntypes. Any combination not explicitly defined does not work.\n\nFor all operators any of the following can be the left hand or\nright hand operand: Integer32, Counter32, Unsigned32, Counter64.\n\nThe operators +, -, *, /, %, <, <=, >, and >= work with\nTimeTicks.\n\nThe operators &, |, and ^ work with IpAddress.\n\nThe operators << and >> work with IpAddress but only as the\nleft hand operand.\n\nThe + operator performs a concatenation of two OCTET STRINGs or\ntwo OBJECT IDENTIFIERs.\n\nThe operators &, | perform bitwise operations on OCTET STRINGs.\nIf the OCTET STRING happens to be a DisplayString the results\nmay be meaningless, but the agent system does not check this as\nsome such systems do not have this information.\n\nThe operators << and >> perform bitwise operations on OCTET\nSTRINGs appearing as the left hand operand.\n\nThe only functions defined are:\n\n counter32\n counter64\n arraySection\n stringBegins\n stringEnds\n stringContains\n oidBegins\n oidEnds\n oidContains\n average\n maximum\n minimum\n sum\n exists\n\n\n\nThe following function definitions indicate their parameters by\nnaming the data type of the parameter in the parameter's position\nin the parameter list. The parameter must be of the type indicated\nand generally may be a constant, a MIB object, a function, or an\nexpression.\n\ncounter32(integer) - wrapped around an integer value counter32\nforces Counter32 as a data type.\n\ncounter64(integer) - similar to counter32 except that the\nresulting data type is 'counter64'.\n\narraySection(array, integer, integer) - selects a piece of an\narray (i.e. part of an OCTET STRING or OBJECT IDENTIFIER). The\ninteger arguments are in the range 0 to 4,294,967,295. The\nfirst is an initial array index (one-dimensioned) and the second\nis an ending array index. A value of 0 indicates first or last\nelement, respectively. If the first element is larger than the\narray length the result is 0 length. If the second integer is\nless than or equal to the first, the result is 0 length. If the\nsecond is larger than the array length it indicates last\nelement.\n\nstringBegins/Ends/Contains(octetString, octetString) - looks for\nthe second string (which can be a string constant) in the first\nand returns the one-dimensioned arrayindex where the match began.\nA return value of 0 indicates no match (i.e. boolean false).\n\noidBegins/Ends/Contains(oid, oid) - looks for the second OID\n(which can be an OID constant) in the first and returns the\nthe one-dimensioned index where the match began. A return value\nof 0 indicates no match (i.e. boolean false).\n\naverage/maximum/minimum(integer) - calculates the average,\nminimum, or maximum value of the integer valued object over\nmultiple sample times. If the object disappears for any\nsample period, the accumulation and the resulting value object\ncease to exist until the object reappears at which point the\ncalculation starts over.\n\nsum(integerObject*) - sums all available values of the\nwildcarded integer object, resulting in an integer scalar. Must\nbe used with caution as it wraps on overflow with no\nnotification.\n\nexists(anyTypeObject) - verifies the object instance exists. A\nreturn value of 0 indicates NoSuchInstance (i.e. boolean\nfalse).")
expExpressionValueType = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 4), Integer().subtype(subtypeSpec=SingleValueConstraint(4,6,7,5,3,8,2,1,)).subtype(namedValues=NamedValues(("counter32", 1), ("unsigned32", 2), ("timeTicks", 3), ("integer32", 4), ("ipAddress", 5), ("octetString", 6), ("objectId", 7), ("counter64", 8), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expExpressionValueType.setDescription("The type of the expression value. One and only one of the\nvalue objects in expValueTable will be instantiated to match\nthis type.\n\nIf the result of the expression can not be made into this type,\nan invalidOperandType error will occur.")
expExpressionComment = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 5), SnmpAdminString().clone('')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expExpressionComment.setDescription("A comment to explain the use or meaning of the expression.")
expExpressionDeltaInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400)).clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expExpressionDeltaInterval.setDescription("Sampling interval for objects in this expression with\nexpObjectSampleType 'deltaValue'.\n\nThis object has no effect if the the expression has no\ndeltaValue objects.\n\nA value of 0 indicates no automated sampling. In this case\nthe delta is the difference from the last time the expression\nwas evaluated. Note that this is subject to unpredictable\ndelta times in the face of retries or multiple managers.\n\nA value greater than zero is the number of seconds between\nautomated samples.\n\nUntil the delta interval has expired once the delta for the\n\n\nobject is effectively not instantiated and evaluating\nthe expression has results as if the object itself were not\ninstantiated.\n\nNote that delta values potentially consume large amounts of\nsystem CPU and memory. Delta state and processing must\ncontinue constantly even if the expression is not being used.\nThat is, the expression is being evaluated every delta interval,\neven if no application is reading those values. For wildcarded\nobjects this can be substantial overhead.\n\nNote that delta intervals, external expression value sampling\nintervals and delta intervals for expressions within other\nexpressions can have unusual interactions as they are impossible\nto synchronize accurately. In general one interval embedded\nbelow another must be enough shorter that the higher sample\nsees relatively smooth, predictable behavior. So, for example,\nto avoid the higher level getting the same sample twice, the\nlower level should sample at least twice as fast as the higher\nlevel does.")
expExpressionPrefix = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 7), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expExpressionPrefix.setDescription("An object prefix to assist an application in determining\nthe instance indexing to use in expValueTable, relieving the\napplication of the need to scan the expObjectTable to\ndetermine such a prefix.\n\nSee expObjectTable for information on wildcarded objects.\n\nIf the expValueInstance portion of the value OID may\nbe treated as a scalar (that is, normally, 0) the value of\nexpExpressionPrefix is zero length, that is, no OID at all.\nNote that zero length implies a null OID, not the OID 0.0.\n\nOtherwise, the value of expExpressionPrefix is the expObjectID\nvalue of any one of the wildcarded objects for the expression.\nThis is sufficient, as the remainder, that is, the instance\nfragment relevant to instancing the values, must be the same for\nall wildcarded objects in the expression.")
expExpressionErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expExpressionErrors.setDescription("The number of errors encountered while evaluating this\nexpression.\n\nNote that an object in the expression not being accessible,\nis not considered an error. An example of an inaccessible\nobject is when the object is excluded from the view of the\nuser whose security credentials are used in the expression\nevaluation. In such cases, it is a legitimate condition\nthat causes the corresponding expression value not to be\ninstantiated.")
expExpressionEntryStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 1, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expExpressionEntryStatus.setDescription("The control that allows creation and deletion of entries.")
expErrorTable = MibTable((1, 3, 6, 1, 2, 1, 90, 1, 2, 2))
if mibBuilder.loadTexts: expErrorTable.setDescription("A table of expression errors.")
expErrorEntry = MibTableRow((1, 3, 6, 1, 2, 1, 90, 1, 2, 2, 1)).setIndexNames((0, "DISMAN-EXPRESSION-MIB", "expExpressionOwner"), (0, "DISMAN-EXPRESSION-MIB", "expExpressionName"))
if mibBuilder.loadTexts: expErrorEntry.setDescription("Information about errors in processing an expression.\n\nEntries appear in this table only when there is a matching\nexpExpressionEntry and then only when there has been an\nerror for that expression as reflected by the error codes\ndefined for expErrorCode.")
expErrorTime = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 2, 1, 1), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expErrorTime.setDescription("The value of sysUpTime the last time an error caused a\nfailure to evaluate this expression.")
expErrorIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expErrorIndex.setDescription("The one-dimensioned character array index into\nexpExpression for where the error occurred. The value\nzero indicates irrelevance.")
expErrorCode = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 2, 1, 3), Integer().subtype(subtypeSpec=SingleValueConstraint(11,3,6,9,10,1,2,8,7,5,4,)).subtype(namedValues=NamedValues(("invalidSyntax", 1), ("resourceUnavailable", 10), ("divideByZero", 11), ("undefinedObjectIndex", 2), ("unrecognizedOperator", 3), ("unrecognizedFunction", 4), ("invalidOperandType", 5), ("unmatchedParenthesis", 6), ("tooManyWildcardValues", 7), ("recursion", 8), ("deltaTooShort", 9), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: expErrorCode.setDescription("The error that occurred. In the following explanations the\nexpected timing of the error is in parentheses. 'S' means\nthe error occurs on a Set request. 'E' means the error\n\n\noccurs on the attempt to evaluate the expression either due to\nGet from expValueTable or in ongoing delta processing.\n\ninvalidSyntax the value sent for expExpression is not\n valid Expression MIB expression syntax\n (S)\nundefinedObjectIndex an object reference ($n) in\n expExpression does not have a matching\n instance in expObjectTable (E)\nunrecognizedOperator the value sent for expExpression held an\n unrecognized operator (S)\nunrecognizedFunction the value sent for expExpression held an\n unrecognized function name (S)\ninvalidOperandType an operand in expExpression is not the\n right type for the associated operator\n or result (SE)\nunmatchedParenthesis the value sent for expExpression is not\n correctly parenthesized (S)\ntooManyWildcardValues evaluating the expression exceeded the\n limit set by\n expResourceDeltaWildcardInstanceMaximum\n (E)\nrecursion through some chain of embedded\n expressions the expression invokes itself\n (E)\ndeltaTooShort the delta for the next evaluation passed\n before the system could evaluate the\n present sample (E)\nresourceUnavailable some resource, typically dynamic memory,\n was unavailable (SE)\ndivideByZero an attempt to divide by zero occurred\n (E)\n\nFor the errors that occur when the attempt is made to set\nexpExpression Set request fails with the SNMP error code\n'wrongValue'. Such failures refer to the most recent failure to\nSet expExpression, not to the present value of expExpression\nwhich must be either unset or syntactically correct.\n\nErrors that occur during evaluation for a Get* operation return\nthe SNMP error code 'genErr' except for 'tooManyWildcardValues'\nand 'resourceUnavailable' which return the SNMP error code\n'resourceUnavailable'.")
expErrorInstance = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 2, 1, 4), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expErrorInstance.setDescription("The expValueInstance being evaluated when the error\noccurred. A zero-length indicates irrelevance.")
expObjectTable = MibTable((1, 3, 6, 1, 2, 1, 90, 1, 2, 3))
if mibBuilder.loadTexts: expObjectTable.setDescription("A table of object definitions for each expExpression.\n\nWildcarding instance IDs:\n\nIt is legal to omit all or part of the instance portion for\nsome or all of the objects in an expression. (See the\nDESCRIPTION of expObjectID for details. However, note that\nif more than one object in the same expression is wildcarded\nin this way, they all must be objects where that portion of\nthe instance is the same. In other words, all objects may be\nin the same SEQUENCE or in different SEQUENCEs but with the\nsame semantic index value (e.g., a value of ifIndex)\nfor the wildcarded portion.")
expObjectEntry = MibTableRow((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1)).setIndexNames((0, "DISMAN-EXPRESSION-MIB", "expExpressionOwner"), (0, "DISMAN-EXPRESSION-MIB", "expExpressionName"), (0, "DISMAN-EXPRESSION-MIB", "expObjectIndex"))
if mibBuilder.loadTexts: expObjectEntry.setDescription("Information about an object. An application uses\nexpObjectEntryStatus to create entries in this table while\nin the process of defining an expression.\n\nValues of read-create objects in this table may be\nchanged at any time.")
expObjectIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: expObjectIndex.setDescription("Within an expression, a unique, numeric identification for an\nobject. Prefixed with a dollar sign ('$') this is used to\nreference the object in the corresponding expExpression.")
expObjectID = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 2), ObjectIdentifier()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectID.setDescription("The OBJECT IDENTIFIER (OID) of this object. The OID may be\nfully qualified, meaning it includes a complete instance\nidentifier part (e.g., ifInOctets.1 or sysUpTime.0), or it\nmay not be fully qualified, meaning it may lack all or part\nof the instance identifier. If the expObjectID is not fully\nqualified, then expObjectWildcard must be set to true(1).\nThe value of the expression will be multiple\nvalues, as if done for a GetNext sweep of the object.\n\nAn object here may itself be the result of an expression but\nrecursion is not allowed.\n\nNOTE: The simplest implementations of this MIB may not allow\nwildcards.")
expObjectIDWildcard = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectIDWildcard.setDescription("A true value indicates the expObjecID of this row is a wildcard\nobject. False indicates that expObjectID is fully instanced.\nIf all expObjectWildcard values for a given expression are FALSE,\n\n\nexpExpressionPrefix will reflect a scalar object (i.e. will\nbe 0.0).\n\nNOTE: The simplest implementations of this MIB may not allow\nwildcards.")
expObjectSampleType = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 4), Integer().subtype(subtypeSpec=SingleValueConstraint(3,1,2,)).subtype(namedValues=NamedValues(("absoluteValue", 1), ("deltaValue", 2), ("changedValue", 3), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectSampleType.setDescription("The method of sampling the selected variable.\n\nAn 'absoluteValue' is simply the present value of the object.\n\nA 'deltaValue' is the present value minus the previous value,\nwhich was sampled expExpressionDeltaInterval seconds ago.\nThis is intended primarily for use with SNMP counters, which are\nmeaningless as an 'absoluteValue', but may be used with any\ninteger-based value.\n\nA 'changedValue' is a boolean for whether the present value is\ndifferent from the previous value. It is applicable to any data\ntype and results in an Unsigned32 with value 1 if the object's\nvalue is changed and 0 if not. In all other respects it is as a\n'deltaValue' and all statements and operation regarding delta\nvalues apply to changed values.\n\nWhen an expression contains both delta and absolute values\nthe absolute values are obtained at the end of the delta\nperiod.")
expObjectDeltaDiscontinuityID = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 5), ObjectIdentifier().clone((1, 3, 6, 1, 2, 1, 1, 3, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectDeltaDiscontinuityID.setDescription("The OBJECT IDENTIFIER (OID) of a TimeTicks, TimeStamp, or\nDateAndTime object that indicates a discontinuity in the value\nat expObjectID.\n\n\n\nThis object is instantiated only if expObjectSampleType is\n'deltaValue' or 'changedValue'.\n\nThe OID may be for a leaf object (e.g. sysUpTime.0) or may\nbe wildcarded to match expObjectID.\n\nThis object supports normal checking for a discontinuity in a\ncounter. Note that if this object does not point to sysUpTime\ndiscontinuity checking must still check sysUpTime for an overall\ndiscontinuity.\n\nIf the object identified is not accessible no discontinuity\ncheck will be made.")
expObjectDiscontinuityIDWildcard = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectDiscontinuityIDWildcard.setDescription("A true value indicates the expObjectDeltaDiscontinuityID of\nthis row is a wildcard object. False indicates that\nexpObjectDeltaDiscontinuityID is fully instanced.\n\nThis object is instantiated only if expObjectSampleType is\n'deltaValue' or 'changedValue'.\n\nNOTE: The simplest implementations of this MIB may not allow\nwildcards.")
expObjectDiscontinuityIDType = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 7), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,3,)).subtype(namedValues=NamedValues(("timeTicks", 1), ("timeStamp", 2), ("dateAndTime", 3), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectDiscontinuityIDType.setDescription("The value 'timeTicks' indicates the expObjectDeltaDiscontinuityID\nof this row is of syntax TimeTicks. The value 'timeStamp' indicates\nsyntax TimeStamp. The value 'dateAndTime indicates syntax\nDateAndTime.\n\nThis object is instantiated only if expObjectSampleType is\n'deltaValue' or 'changedValue'.")
expObjectConditional = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 8), ObjectIdentifier().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectConditional.setDescription("The OBJECT IDENTIFIER (OID) of an object that overrides\nwhether the instance of expObjectID is to be considered\nusable. If the value of the object at expObjectConditional\nis 0 or not instantiated, the object at expObjectID is\ntreated as if it is not instantiated. In other words,\nexpObjectConditional is a filter that controls whether or\nnot to use the value at expObjectID.\n\nThe OID may be for a leaf object (e.g. sysObjectID.0) or may be\nwildcarded to match expObjectID. If expObject is wildcarded and\nexpObjectID in the same row is not, the wild portion of\nexpObjectConditional must match the wildcarding of the rest of\nthe expression. If no object in the expression is wildcarded\nbut expObjectConditional is, use the lexically first instance\n(if any) of expObjectConditional.\n\nIf the value of expObjectConditional is 0.0 operation is\nas if the value pointed to by expObjectConditional is a\nnon-zero (true) value.\n\nNote that expObjectConditional can not trivially use an object\nof syntax TruthValue, since the underlying value is not 0 or 1.")
expObjectConditionalWildcard = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 9), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectConditionalWildcard.setDescription("A true value indicates the expObjectConditional of this row is\na wildcard object. False indicates that expObjectConditional is\nfully instanced.\n\nNOTE: The simplest implementations of this MIB may not allow\nwildcards.")
expObjectEntryStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 2, 3, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: expObjectEntryStatus.setDescription("The control that allows creation/deletion of entries.\n\nObjects in this table may be changed while\nexpObjectEntryStatus is in any state.")
expValue = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 1, 3))
expValueTable = MibTable((1, 3, 6, 1, 2, 1, 90, 1, 3, 1))
if mibBuilder.loadTexts: expValueTable.setDescription("A table of values from evaluated expressions.")
expValueEntry = MibTableRow((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1)).setIndexNames((0, "DISMAN-EXPRESSION-MIB", "expExpressionOwner"), (0, "DISMAN-EXPRESSION-MIB", "expExpressionName"), (1, "DISMAN-EXPRESSION-MIB", "expValueInstance"))
if mibBuilder.loadTexts: expValueEntry.setDescription("A single value from an evaluated expression. For a given\ninstance, only one 'Val' object in the conceptual row will be\ninstantiated, that is, the one with the appropriate type for\nthe value. For values that contain no objects of\nexpObjectSampleType 'deltaValue' or 'changedValue', reading a\nvalue from the table causes the evaluation of the expression\nfor that value. For those that contain a 'deltaValue' or\n'changedValue' the value read is as of the last sampling\ninterval.\n\nIf in the attempt to evaluate the expression one or more\nof the necessary objects is not available, the corresponding\nentry in this table is effectively not instantiated.\n\nTo maintain security of MIB information, when creating a new\nrow in this table, the managed system must record the security\ncredentials of the requester. These security credentials are\nthe parameters necessary as inputs to isAccessAllowed from\n[RFC2571]. When obtaining the objects that make up the\nexpression, the system must (conceptually) use isAccessAllowed to\nensure that it does not violate security.\n\nThe evaluation of that expression takes place under the\n\n\nsecurity credentials of the creator of its expExpressionEntry.\n\nTo maintain security of MIB information, expression evaluation must\ntake place using security credentials for the implied Gets of the\nobjects in the expression as inputs (conceptually) to\nisAccessAllowed from the Architecture for Describing SNMP\nManagement Frameworks. These are the security credentials of the\ncreator of the corresponding expExpressionEntry.")
expValueInstance = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 1), ObjectIdentifier()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: expValueInstance.setDescription("The final instance portion of a value's OID according to\nthe wildcarding in instances of expObjectID for the\nexpression. The prefix of this OID fragment is 0.0,\nleading to the following behavior.\n\nIf there is no wildcarding, the value is 0.0.0. In other\nwords, there is one value which standing alone would have\nbeen a scalar with a 0 at the end of its OID.\n\nIf there is wildcarding, the value is 0.0 followed by\na value that the wildcard can take, thus defining one value\ninstance for each real, possible value of the wildcard.\nSo, for example, if the wildcard worked out to be an ifIndex,\nthere is an expValueInstance for each applicable ifIndex.")
expValueCounter32Val = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueCounter32Val.setDescription("The value when expExpressionValueType is 'counter32'.")
expValueUnsigned32Val = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueUnsigned32Val.setDescription("The value when expExpressionValueType is 'unsigned32'.")
expValueTimeTicksVal = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueTimeTicksVal.setDescription("The value when expExpressionValueType is 'timeTicks'.")
expValueInteger32Val = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueInteger32Val.setDescription("The value when expExpressionValueType is 'integer32'.")
expValueIpAddressVal = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 6), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueIpAddressVal.setDescription("The value when expExpressionValueType is 'ipAddress'.")
expValueOctetStringVal = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 7), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueOctetStringVal.setDescription("The value when expExpressionValueType is 'octetString'.")
expValueOidVal = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 8), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueOidVal.setDescription("The value when expExpressionValueType is 'objectId'.")
expValueCounter64Val = MibTableColumn((1, 3, 6, 1, 2, 1, 90, 1, 3, 1, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expValueCounter64Val.setDescription("The value when expExpressionValueType is 'counter64'.")
dismanExpressionMIBConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 3))
dismanExpressionMIBCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 3, 1))
dismanExpressionMIBGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 90, 3, 2))
# Augmentions
# Groups
dismanExpressionResourceGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 90, 3, 2, 1)).setObjects(*(("DISMAN-EXPRESSION-MIB", "expResourceDeltaMinimum"), ("DISMAN-EXPRESSION-MIB", "expResourceDeltaWildcardInstancesHigh"), ("DISMAN-EXPRESSION-MIB", "expResourceDeltaWildcardInstances"), ("DISMAN-EXPRESSION-MIB", "expResourceDeltaWildcardInstanceResourceLacks"), ("DISMAN-EXPRESSION-MIB", "expResourceDeltaWildcardInstanceMaximum"), ) )
if mibBuilder.loadTexts: dismanExpressionResourceGroup.setDescription("Expression definition resource management.")
dismanExpressionDefinitionGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 90, 3, 2, 2)).setObjects(*(("DISMAN-EXPRESSION-MIB", "expObjectID"), ("DISMAN-EXPRESSION-MIB", "expObjectConditional"), ("DISMAN-EXPRESSION-MIB", "expExpressionComment"), ("DISMAN-EXPRESSION-MIB", "expObjectDiscontinuityIDWildcard"), ("DISMAN-EXPRESSION-MIB", "expErrorInstance"), ("DISMAN-EXPRESSION-MIB", "expExpressionErrors"), ("DISMAN-EXPRESSION-MIB", "expExpression"), ("DISMAN-EXPRESSION-MIB", "expErrorTime"), ("DISMAN-EXPRESSION-MIB", "expErrorIndex"), ("DISMAN-EXPRESSION-MIB", "expExpressionEntryStatus"), ("DISMAN-EXPRESSION-MIB", "expExpressionValueType"), ("DISMAN-EXPRESSION-MIB", "expObjectEntryStatus"), ("DISMAN-EXPRESSION-MIB", "expObjectDeltaDiscontinuityID"), ("DISMAN-EXPRESSION-MIB", "expObjectSampleType"), ("DISMAN-EXPRESSION-MIB", "expExpressionDeltaInterval"), ("DISMAN-EXPRESSION-MIB", "expExpressionPrefix"), ("DISMAN-EXPRESSION-MIB", "expObjectDiscontinuityIDType"), ("DISMAN-EXPRESSION-MIB", "expObjectConditionalWildcard"), ("DISMAN-EXPRESSION-MIB", "expObjectIDWildcard"), ("DISMAN-EXPRESSION-MIB", "expErrorCode"), ) )
if mibBuilder.loadTexts: dismanExpressionDefinitionGroup.setDescription("Expression definition.")
dismanExpressionValueGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 90, 3, 2, 3)).setObjects(*(("DISMAN-EXPRESSION-MIB", "expValueTimeTicksVal"), ("DISMAN-EXPRESSION-MIB", "expValueIpAddressVal"), ("DISMAN-EXPRESSION-MIB", "expValueInteger32Val"), ("DISMAN-EXPRESSION-MIB", "expValueOidVal"), ("DISMAN-EXPRESSION-MIB", "expValueUnsigned32Val"), ("DISMAN-EXPRESSION-MIB", "expValueCounter64Val"), ("DISMAN-EXPRESSION-MIB", "expValueCounter32Val"), ("DISMAN-EXPRESSION-MIB", "expValueOctetStringVal"), ) )
if mibBuilder.loadTexts: dismanExpressionValueGroup.setDescription("Expression value.")
# Compliances
dismanExpressionMIBCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 90, 3, 1, 1)).setObjects(*(("DISMAN-EXPRESSION-MIB", "dismanExpressionDefinitionGroup"), ("DISMAN-EXPRESSION-MIB", "dismanExpressionResourceGroup"), ("DISMAN-EXPRESSION-MIB", "dismanExpressionValueGroup"), ) )
if mibBuilder.loadTexts: dismanExpressionMIBCompliance.setDescription("The compliance statement for entities which implement\nthe Expression MIB.")
# Exports
# Module identity
mibBuilder.exportSymbols("DISMAN-EXPRESSION-MIB", PYSNMP_MODULE_ID=dismanExpressionMIB)
# Objects
mibBuilder.exportSymbols("DISMAN-EXPRESSION-MIB", sysUpTimeInstance=sysUpTimeInstance, dismanExpressionMIB=dismanExpressionMIB, dismanExpressionMIBObjects=dismanExpressionMIBObjects, expResource=expResource, expResourceDeltaMinimum=expResourceDeltaMinimum, expResourceDeltaWildcardInstanceMaximum=expResourceDeltaWildcardInstanceMaximum, expResourceDeltaWildcardInstances=expResourceDeltaWildcardInstances, expResourceDeltaWildcardInstancesHigh=expResourceDeltaWildcardInstancesHigh, expResourceDeltaWildcardInstanceResourceLacks=expResourceDeltaWildcardInstanceResourceLacks, expDefine=expDefine, expExpressionTable=expExpressionTable, expExpressionEntry=expExpressionEntry, expExpressionOwner=expExpressionOwner, expExpressionName=expExpressionName, expExpression=expExpression, expExpressionValueType=expExpressionValueType, expExpressionComment=expExpressionComment, expExpressionDeltaInterval=expExpressionDeltaInterval, expExpressionPrefix=expExpressionPrefix, expExpressionErrors=expExpressionErrors, expExpressionEntryStatus=expExpressionEntryStatus, expErrorTable=expErrorTable, expErrorEntry=expErrorEntry, expErrorTime=expErrorTime, expErrorIndex=expErrorIndex, expErrorCode=expErrorCode, expErrorInstance=expErrorInstance, expObjectTable=expObjectTable, expObjectEntry=expObjectEntry, expObjectIndex=expObjectIndex, expObjectID=expObjectID, expObjectIDWildcard=expObjectIDWildcard, expObjectSampleType=expObjectSampleType, expObjectDeltaDiscontinuityID=expObjectDeltaDiscontinuityID, expObjectDiscontinuityIDWildcard=expObjectDiscontinuityIDWildcard, expObjectDiscontinuityIDType=expObjectDiscontinuityIDType, expObjectConditional=expObjectConditional, expObjectConditionalWildcard=expObjectConditionalWildcard, expObjectEntryStatus=expObjectEntryStatus, expValue=expValue, expValueTable=expValueTable, expValueEntry=expValueEntry, expValueInstance=expValueInstance, expValueCounter32Val=expValueCounter32Val, expValueUnsigned32Val=expValueUnsigned32Val, expValueTimeTicksVal=expValueTimeTicksVal, expValueInteger32Val=expValueInteger32Val, expValueIpAddressVal=expValueIpAddressVal, expValueOctetStringVal=expValueOctetStringVal, expValueOidVal=expValueOidVal, expValueCounter64Val=expValueCounter64Val, dismanExpressionMIBConformance=dismanExpressionMIBConformance, dismanExpressionMIBCompliances=dismanExpressionMIBCompliances, dismanExpressionMIBGroups=dismanExpressionMIBGroups)
# Groups
mibBuilder.exportSymbols("DISMAN-EXPRESSION-MIB", dismanExpressionResourceGroup=dismanExpressionResourceGroup, dismanExpressionDefinitionGroup=dismanExpressionDefinitionGroup, dismanExpressionValueGroup=dismanExpressionValueGroup)
# Compliances
mibBuilder.exportSymbols("DISMAN-EXPRESSION-MIB", dismanExpressionMIBCompliance=dismanExpressionMIBCompliance)
|
[
"oriordan@devel.hu"
] |
oriordan@devel.hu
|
ac7050e1295509d0dead45d69060787b34fbc2c0
|
26933a384f41d0ebb3fb341d0b30a37983cb8bd8
|
/src/026.py
|
cf27c3a412306c4594e21af586da4a8470e6a276
|
[] |
no_license
|
KBRI-Biology-Experiment-Computation/bioinfo-lecture-2021-07-05
|
bbd085139730fd947c716bc46596f22f521eb92c
|
c46bb213bc41ad4f119abdf35c758da641228beb
|
refs/heads/main
| 2023-08-09T19:34:13.823250
| 2021-07-07T08:03:11
| 2021-07-07T08:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
s = "ATGTTATAG"
for i in range(0, len(s), 3):
print(s[i:i+3])
|
[
"kenneth.jh.han@gmail.com"
] |
kenneth.jh.han@gmail.com
|
10c9ca0234965420c1d7890a0676eac38518ad78
|
d40fbefbd5db39f1c3fb97f17ed54cb7b6f230e0
|
/ibm_db2/tests/test_unit.py
|
b9afacce63cced4dd32e2536a2f191f692792ab6
|
[] |
permissive
|
slightilusion/integrations-core
|
47a170d791e809f3a69c34e2426436a6c944c322
|
8f89e7ba35e6d27c9c1b36b9784b7454d845ba01
|
refs/heads/master
| 2020-05-20T18:34:41.716618
| 2019-05-08T21:51:17
| 2019-05-08T21:51:17
| 185,708,851
| 2
| 0
|
BSD-3-Clause
| 2019-05-09T02:05:19
| 2019-05-09T02:05:18
| null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.ibm_db2.utils import scrub_connection_string
pytestmark = pytest.mark.unit
class TestPasswordScrubber:
def test_start(self):
s = 'pwd=password;...'
assert scrub_connection_string(s) == 'pwd=********;...'
def test_end(self):
s = '...;pwd=password'
assert scrub_connection_string(s) == '...;pwd=********'
def test_no_match_within_value(self):
s = '...pwd=password;...'
assert scrub_connection_string(s) == s
|
[
"noreply@github.com"
] |
slightilusion.noreply@github.com
|
5f697b884de06a2a759dbaf8eff4e587d4b61385
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_8302.py
|
ada636326405c8ccbeb036a5d50f5d02b405d7d7
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((440, 791, 782), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((225, 173, 486), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((189, 410, 398), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((223, 777, 905), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((478, 789, 82), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((333, 343, 863), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((31, 931, 648), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((870, 666, 358), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((808, 521, 926), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((930, 997, 266), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((361, 198, 927), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((324, 359, 244), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((138, 760, 307), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((105, 308, 227), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((981, 431, 237), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((90, 179, 506), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((62, 80, 966), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((941, 518, 40), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((148, 603, 718), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((755, 407, 323), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((819, 809, 563), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
641a6e7fb1755fe782bd4cfa3de6704b19fd36e6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_09_09_11_54_52_542373.py
|
b6266cc9513e5cad35ebf1d45a0ba2b74ee8442f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
import math
def calcula_gaussiana(x, mi, sigma):
form = (1/ (sigma((2*math.pi)**(1/2))))* (e**(((-0.5((z-mi)/sigma))**2))
return form
|
[
"you@example.com"
] |
you@example.com
|
24f9ced0b5dd1c194216331452babe0cbcb1b786
|
52e05d43e6c37ee7a586118cf0f390b04e92ada3
|
/76. Minimum Window Substring _ Hash Table.py
|
81639a8ff54abd07d4ebc6079e31945e94369bcd
|
[] |
no_license
|
CaizhiXu/LeetCode-Python-Solutions
|
8f7a856e11e0804f32c43ed98bc08525a950ac13
|
63120dbaabd7c3c19633ebe952bcee4cf826b0e0
|
refs/heads/master
| 2021-05-18T04:57:16.412834
| 2020-08-05T04:33:13
| 2020-08-05T04:33:13
| 251,121,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
class Solution:
def minWindow(self, s: str, t: str) -> str:
ans = float('inf')
minstr = ''
sourcehash = [0] * 256
targethash = [0] * 256
self.init_target_hash(targethash, t)
j = 0
for i in range(len(s)):
while not self.valid(sourcehash, targethash) and j < len(s):
sourcehash[ord[j]] += 1
j += 1
if self.valid(sourcehash, targethash):
if ans > j - i:
ans = j - i
minstr = s[i:j]
sourcehash[ord[i]] -= 1
return minstr
def init_target_hash(self, targethash, t):
for ch in t:
targethash[ord[ch]] = targethash.get(ord[ch], 0) + 1
def valid(self, sourcehash, targethash): # to check whether it is includes in targethash
for i in range(256):
if targethash[i] > sourcehash[i]:
return False
return True
## time, space - O(N)
from collections import Counter, defaultdict
class Solution:
def minWindow(self, s: str, t: str) -> str:
t_cnts = Counter(t)
s_cnts = defaultdict(int)
start, end = 0, 0
match = 0
minstr = ''
minLen = float('inf')
while end < len(s):
s_cnts[s[end]] += 1
if s[end] in t_cnts and s_cnts[s[end]] == t_cnts[s[end]]:
match += 1
end += 1
while match == len(t_cnts):
if end - start < minLen:
minLen = end - start
minstr = s[start:end]
s_cnts[s[start]] -= 1
if s[start] in t_cnts and s_cnts[s[start]] < t_cnts[s[start]]:
match -= 1
start += 1
return minstr
|
[
"xucaizhi@gmail.com"
] |
xucaizhi@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.