blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a410f6e281c8139f127daf8c8cac5080cc40c306 | a0e777ea7e0d00c061068db132a30a8fa545cc75 | /EffectivePython/item4.py | 793758a82e0f7d72f099d9da42934bf39b0aae5f | [] | no_license | aadisetyaa/Python-Cookbook | 87215b64d2d3631d6b18e90a68a09400e7d80919 | a8df0343a39725312686423296bfd860dbaf70ad | refs/heads/master | 2022-04-08T13:41:27.255352 | 2017-11-27T03:54:29 | 2017-11-27T03:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | from urllib.parse import parse_qs
my_values = parse_qs('red=5&blue=0&green=', keep_blank_values=True)
#print(repr(my_values))
#print('Red: ', my_values.get('red', ['']))
#print('Green: ', my_values.get('green'))
#print('Opacity: ', my_values.get('opacity'))
#red = int(my_values.get('red', [''])[0] or 0)
#green = int(my_values.get('green', [''])[0] or 0)
opacity = int(my_values.get('opacity', [''])[0] or 0)
red = my_values.get('red', [''])
red = int(red[0]) if red[0] else 0
'''
green = my_values.get('green', [''])
if green[0]:
green = int(green[0])
else:
green = 0
'''
def get_first_int(values, key, default=0):
found = values.get(key, [''])
if found[0]:
found = int(found[0])
else:
found = default
return found
green = get_first_int(my_values, 'green')
print('Red: %r' % red)
print('Green: %r' % green)
print('Opacity: %r' % opacity)
| [
"wpr101@hotmail.com"
] | wpr101@hotmail.com |
ca87ff0a336058adb8d8daa3ec98166ca00c8011 | 40fc10c31449faca8a3235d34b422c322db8af6e | /src/pyasl/asl/binning.py | 54c5604387cecbbd9b56c5d52f171313d02642cf | [
"MIT"
] | permissive | voneiden/PyAstronomy | 93d27e9d82bd54ecaf55b8fcbcba5ecbe7021997 | d7e92fb84aad0995bc9c3e38538f44d1d9fa6200 | refs/heads/master | 2021-01-16T20:52:04.139625 | 2013-07-04T14:00:12 | 2013-07-04T14:00:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,488 | py | # -*- coding: utf-8 -*-
from PyAstronomy.pyaC import pyaErrors as PE
import numpy as np
def binningx0dt(x, y, yerr=None, x0=None, dt=None, nbins=None, reduceBy=None, removeEmpty=True, \
removeNoError=False, useBinCenter=True):
"""
A simple binning algorithm.
This algorithm uses a fixed bin-width to produce a binned
data set. Either the bin-width, `dt`, or the number of bins,
`nbins`, must be specified. The number of output bins may
also depend on other flags such as, for example, `removeNoError`.
If no errors are specified via `yerr`, the errors for the binned
data are estimated as the standard deviation of the input data
points divided by the square root of their number. If `yerr` has
been specified, error propagation is used to determine the error.
The behavior of the time axis can be controlled via the
`useBinCenter` flag.
Values which cannot be determined will be indicated by NaN.
Various flags can be used to remove such bins from the binned
data set.
Parameters
----------
x, y : array
The x and y data values.
yerr : array, optional
Errors on the data values.
x0 : float, optional
Starting time of first bin.
Default is lowest given x value.
dt : float, optional
Width of a bin (either `dt`, `nbins` or `reduceBy` must
be given).
nbins : int, optional
Number of bins to use (either `dt`, `nbins` or `reduceBy` must
be given). Note that this specifies the number of bins into which
the range from `x0` to the last data point is subdivided.
reduceBy : int, optional
Reduce the number of elements in the array by the given factor
(either `dt`, `nbins` or `reduceBy` must be given). Note that
in this case, `x0` is set to the first (minimum x-value) and
the number of bins, n, is calculated according to the
prescription: :math:`n = int(round(len(x)/reduceBy))`
removeEmpty : boolean, optional
If True (default), bins with no data points will be
removed from the result.
removeNoError : boolean, optional
If True, bins for which no error can be determined
will be removed from the result. Default is False.
useBinCenter : boolean, optional
If True (default), the time axis will refer to the
center of the bins. Otherwise the numbers refer to
the start of the bins.
Returns
-------
Binned LC : array
An array with four columns: 1) The binned time axis,
2) The binned data, 3) Error of binned data, 4) The
number of input data points used to create the bin, e.g.
the new x-values are LC[::,0].
dt : float
The width of the bins.
"""
if ((not dt is None) + (not nbins is None) + (not reduceBy is None)) != 1:
raise(PE.PyAParameterConflict("Specify one of `dt`, `nbins`, or `reduceBy`."))
if ((not x0 is None) + (not reduceBy is None)) != 1:
raise(PE.PyAParameterConflict("Specify either `x0` or `reduceBy`."))
if x0 is None:
# Use first time as starting point
x0 = np.min(x)
if x0 > np.max(x):
raise(PE.PyAValError("The starting point, `x0`, is larger than the end time of the data.", \
solution="Use a smaller value."))
# Calculate the new number of array elements.
if reduceBy is not None:
nbins = int(round(len(x)/float(reduceBy)))
if nbins == 0: nbins=1 # Prevent empty return arrays
if nbins is not None:
# Use a specified number of bins.
# Calculate bin length
dt = (np.max(x) - x0)/float(nbins)
# Start calculation
# In which bin do the individual data points belong?
inWhichBin = np.floor(((x-x0)/dt))
# Lonely last bin correction
# Brings the last data point into the last valid bin
# instead of creating a new bin with that data point\
# at its very beginning
if nbins is not None:
inWhichBin[np.where(inWhichBin == nbins)[0]] -= 1
# Get the number of bins (start at x0 even if the
# first bins do not contain any data points)
nbins = np.max(inWhichBin) + 1
# Bins with data
bwd = np.unique(inWhichBin)
# Sort data into the bins
# Create output array (time, flux, error, data-point-counter)
result = np.empty( (nbins, 4) )
result[:] = np.NAN
# Assign time axis (beginning of bins)
result[::,0] = x0 + np.arange(nbins) * dt
if useBinCenter:
# Use the center of the bin for timing
result[::,0] += (0.5 * dt)
# Set data point counter (points/bin) to zero
result[::,3] = 0
for b in bwd:
indi = np.where(inWhichBin == b)[0]
result[b, 3] = len(indi)
result[b, 1] = np.mean(y[indi])
if yerr is None:
# No errors on data points are given
if len(indi) > 1:
result[b, 2] = np.std(y[indi]) / np.sqrt(result[b, 3])
else:
# No error if there is only a single point in the bin
result[b, 2] = np.NAN
else:
# There are errors on the data points
# Use error propagation
result[b, 2] = np.sqrt(np.sum(yerr[indi]**2)) / result[b, 3]
if removeEmpty:
# Remove bins without data points in it
indi = np.where(np.invert(np.isnan(result[::,1])))[0]
result = result[indi,::]
if removeNoError:
# Remove bins for which no error can be given
indi = np.where(np.invert(np.isnan(result[::,2])))[0]
result = result[indi,::]
return result, dt | [
"stefan.czesla@hs.uni-hamburg.de"
] | stefan.czesla@hs.uni-hamburg.de |
665e2dbb2e727b9f06db39c73e7338cd30f993c1 | c0046aa23b6260b1fce3dc8932b62c638fe6aa46 | /HcalTasks/python/HcalNoiseTask.py | 99a28865cf5c4433de438d8508c875eb02ac42e4 | [] | no_license | HCALDQM/DQM | 8c9f08fe89b844054069aba2ca893a831356b0ef | fa36a791f0527d0e3e96bd70c0776697a630b67c | refs/heads/master | 2020-04-06T07:05:20.336782 | 2015-10-14T07:46:58 | 2015-10-14T07:46:58 | 41,037,040 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,900 | py | import FWCore.ParameterSet.Config as cms
import DQM.HcalCommon.HcalDQStandard as standard
StandardSet = standard.StandardSet.clone()
# List of FEDs
lFEDs = [x+700 for x in range(32)] + [929, 1118, 1120, 1122]
moduleName = "HcalNoiseTask"
# Modify whatever is in StandardSet importing
StandardSet.moduleParameters.name = cms.untracked.string(moduleName)
StandardSet.EventsProcessed.path = cms.untracked.string(
"%s/" % moduleName)
StandardSet.EventsProcessedPerLS.path = cms.untracked.string(
"%s/" % moduleName)
StandardSet.Standard2DMap.path = cms.untracked.string(
"%s/" % moduleName)
StandardSet.Standard2DMap.desc = cms.untracked.string(
"Some Noise Task 2D Map")
# Main Task Description
hcalNoiseTask = cms.EDAnalyzer(
moduleName,
moduleParameters = StandardSet.moduleParameters,
MEs = cms.untracked.PSet(
EventsProcessed = StandardSet.EventsProcessed,
EventsProcessedPerLS = StandardSet.EventsProcessedPerLS,
HENoiseShape = cms.untracked.PSet(
path = cms.untracked.string("%s/HE" % moduleName),
kind = cms.untracked.string("TH1D"),
desc = cms.untracked.string("HE Noise Shape"),
xaxis = cms.untracked.PSet(
edges = cms.untracked.bool(False),
nbins = cms.untracked.int32(10),
min = cms.untracked.double(0.),
max = cms.untracked.double(10.),
title = cms.untracked.string("TS")
)
),
HFNoiseShape = cms.untracked.PSet(
path = cms.untracked.string("%s/HF" % moduleName),
kind = cms.untracked.string("TH1D"),
desc = cms.untracked.string("HF Noise Shape"),
xaxis = cms.untracked.PSet(
edges = cms.untracked.bool(False),
nbins = cms.untracked.int32(10),
min = cms.untracked.double(0.),
max = cms.untracked.double(10.),
title = cms.untracked.string("TS")
)
),
HONoiseShape = cms.untracked.PSet(
path = cms.untracked.string("%s/HO" % moduleName),
kind = cms.untracked.string("TH1D"),
desc = cms.untracked.string("HO Noise Shape"),
xaxis = cms.untracked.PSet(
edges = cms.untracked.bool(False),
nbins = cms.untracked.int32(10),
min = cms.untracked.double(0.),
max = cms.untracked.double(10.),
title = cms.untracked.string("TS")
)
),
NoiseSizeCheck = StandardSet.Standard2DMap
# me4 = cms.untracked.PSet(
# path = cms.untracked.string("%s/" % moduleName),
# kind = cms.untracked.string("PROF"),
# desc = cms.untracked.string("Example ME4"),
# xaxis = cms.untracked.PSet(
# edges = cms.untracked.bool(False),
# nbins = cms.untracked.int32(200),
# min = cms.untracked.double(-100),
# max = cms.untracked.double(100),
# title = cms.untracked.string("me4-X")
# ),
# yaxis = cms.untracked.PSet(
# wnbins = cms.untracked.bool(True),
# nbins = cms.untracked.int32(100),
# min = cms.untracked.double(-50),
# max = cms.untracked.double(50),
# title = cms.untracked.string("me4-Y")
# )
# )
)
)
| [
"vdkhristenko1991@gmail.com"
] | vdkhristenko1991@gmail.com |
7aeb14ec178c1278a0ad73d90f369a0ce020c9c0 | c46cbaefaf2ddce20f4d69d79dc8ad786c71ca9b | /src/keystore/javacard/applets/securechannel.py | 082793c09dbd69499c1bf5a407261c4cbf17bb6c | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | xavierfiechter/specter-diy | eeca22ca1984051edeef5abd1863d8b6b25f22e1 | 2c7b6244aca54eca19c60fad9d44f4884ba0ad62 | refs/heads/master | 2023-03-29T23:54:31.340395 | 2021-03-30T12:52:54 | 2021-03-30T12:52:54 | 343,566,546 | 0 | 0 | MIT | 2021-03-30T12:40:30 | 2021-03-01T21:48:30 | Python | UTF-8 | Python | false | false | 7,364 | py | from ..util import encode
import secp256k1
import hashlib, hmac
from io import BytesIO
from rng import get_random_bytes
from ucryptolib import aes
from binascii import hexlify
AES_BLOCK = 16
IV_SIZE = 16
MAC_SIZE = 14
AES_CBC = 2
class SecureError(Exception):
"""
Raised when something went wrong with the
secure channel (i.e. signature is invalid etc)
"""
pass
class SecureChannel:
"""
Class that implements secure communication with the card.
"""
GET_PUBKEY = b"\xB0\xB2\x00\x00"
OPEN_EE = b"\xB0\xB5\x00\x00"
OPEN_SE = b"\xB0\xB4\x00\x00"
SECURE_MSG = b"\xB0\xB6\x00\x00"
CLOSE = b"\xB0\xB7\x00\x00"
SUCCESS = b"\x90\x00"
def __init__(self, applet):
"""Pass Card or Simulator instance here"""
self.applet = applet
self.iv = 0
self.card_pubkey = None
self.card_aes_key = None
self.host_aes_key = None
self.card_mac_key = None
self.host_mac_key = None
self.mode = "es"
self.is_open = False
def get_card_pubkey(self):
"""Returns static public key of the card.
This key doesn't change unless applet is reinstalled.
"""
sec = self.applet.request(self.GET_PUBKEY)
self.card_pubkey = secp256k1.ec_pubkey_parse(sec)
return self.card_pubkey
def derive_keys(self, shared_secret):
"""Derives keys necessary for encryption and authentication"""
self.host_aes_key = hashlib.sha256(b"host_aes" + shared_secret).digest()
self.card_aes_key = hashlib.sha256(b"card_aes" + shared_secret).digest()
self.host_mac_key = hashlib.sha256(b"host_mac" + shared_secret).digest()
self.card_mac_key = hashlib.sha256(b"card_mac" + shared_secret).digest()
return hashlib.sha256(shared_secret).digest()[:4]
def open(self, mode=None):
"""Opens a secure channel.
Mode can be "es" - ephemeral-static
or "ee" - ephemeral-ephemenral
"""
# save mode for later - i.e. reestablish secure channel
if mode is None:
mode = self.mode
else:
self.mode = mode
# check if we know pubkey already
if self.card_pubkey is None:
self.get_card_pubkey()
# generate ephimerial key
secret = get_random_bytes(32)
host_prv = secret
host_pub = secp256k1.ec_pubkey_create(secret)
# ee mode - ask card to create ephimerial key and send it to us
if mode == "ee":
data = secp256k1.ec_pubkey_serialize(host_pub, secp256k1.EC_UNCOMPRESSED)
# get ephimerial pubkey from the card
res = self.applet.request(self.OPEN_EE + encode(data))
s = BytesIO(res)
data = s.read(65)
pub = secp256k1.ec_pubkey_parse(data)
secp256k1.ec_pubkey_tweak_mul(pub, secret)
shared_secret = hashlib.sha256(
secp256k1.ec_pubkey_serialize(pub)[1:33]
).digest()
shared_fingerprint = self.derive_keys(shared_secret)
recv_hmac = s.read(MAC_SIZE)
h = hmac.new(self.card_mac_key, digestmod="sha256")
h.update(data)
expected_hmac = h.digest()[:MAC_SIZE]
if expected_hmac != recv_hmac:
raise SecureChannelError("Wrong HMAC.")
data += recv_hmac
raw_sig = s.read()
sig = secp256k1.ecdsa_signature_parse_der(raw_sig)
# in case card doesn't follow low s rule (but it should)
sig = secp256k1.ecdsa_signature_normalize(sig)
if not secp256k1.ecdsa_verify(
sig, hashlib.sha256(data).digest(), self.card_pubkey
):
raise SecureChannelError("Signature is invalid.")
# se mode - use our ephimerial key with card's static key
else:
data = secp256k1.ec_pubkey_serialize(host_pub, secp256k1.EC_UNCOMPRESSED)
# ugly copy
pub = secp256k1.ec_pubkey_parse(
secp256k1.ec_pubkey_serialize(self.card_pubkey)
)
secp256k1.ec_pubkey_tweak_mul(pub, secret)
shared_secret = secp256k1.ec_pubkey_serialize(pub)[1:33]
res = self.applet.request(self.OPEN_SE + encode(data))
s = BytesIO(res)
nonce_card = s.read(32)
recv_hmac = s.read(MAC_SIZE)
secret_with_nonces = hashlib.sha256(shared_secret + nonce_card).digest()
shared_fingerprint = self.derive_keys(secret_with_nonces)
data = nonce_card
h = hmac.new(self.card_mac_key, digestmod="sha256")
h.update(data)
expected_hmac = h.digest()[:MAC_SIZE]
if expected_hmac != recv_hmac:
raise SecureChannelError("Wrong HMAC.")
data += recv_hmac
sig = secp256k1.ecdsa_signature_parse_der(s.read())
# in case card doesn't follow low s rule (but it should)
sig = secp256k1.ecdsa_signature_normalize(sig)
if not secp256k1.ecdsa_verify(
sig, hashlib.sha256(data).digest(), self.card_pubkey
):
raise SecureChannelError("Signature is invalid")
# reset iv
self.iv = 0
self.is_open = True
def encrypt(self, data):
"""Encrypts the message for transmission"""
# add padding
d = data + b"\x80"
if len(d) % AES_BLOCK != 0:
d += b"\x00" * (AES_BLOCK - (len(d) % AES_BLOCK))
iv = self.iv.to_bytes(IV_SIZE, "big")
crypto = aes(self.host_aes_key, AES_CBC, iv)
ct = crypto.encrypt(d)
h = hmac.new(self.host_mac_key, digestmod="sha256")
h.update(iv)
h.update(ct)
ct += h.digest()[:MAC_SIZE]
return ct
def decrypt(self, ct):
"""Decrypts the message received from the card"""
recv_hmac = ct[-MAC_SIZE:]
ct = ct[:-MAC_SIZE]
iv = self.iv.to_bytes(IV_SIZE, "big")
h = hmac.new(self.card_mac_key, digestmod="sha256")
h.update(iv)
h.update(ct)
expected_hmac = h.digest()[:MAC_SIZE]
if expected_hmac != recv_hmac:
raise SecureChannelError("Wrong HMAC.")
crypto = aes(self.card_aes_key, AES_CBC, iv)
plain = crypto.decrypt(ct)
# check and remove \x80... padding
arr = plain.split(b"\x80")
if len(arr) == 1 or len(arr[-1].replace(b"\x00", b"")) > 0:
raise SecureChannelError("Wrong padding")
return b"\x80".join(arr[:-1])
def request(self, data):
"""Sends a secure request to the card
and returns decrypted result.
Raises a SecureError if errorcode returned from the card.
"""
# if counter reached maximum - reestablish channel
if self.iv >= 2 ** 16 or not self.is_open:
self.open()
ct = self.encrypt(data)
res = self.applet.request(self.SECURE_MSG + encode(ct))
plaintext = self.decrypt(res)
self.iv += 1
if plaintext[:2] == self.SUCCESS:
return plaintext[2:]
else:
raise SecureError(hexlify(plaintext[:2]).decode())
def close(self):
"""Closes the secure channel"""
self.applet.request(self.CLOSE)
self.is_open = False
| [
"snigirev.stepan@gmail.com"
] | snigirev.stepan@gmail.com |
0933399c858eea92a5d926f9873e527890ee7eee | 471a036309c05b59243033f2480e27e19268ec55 | /src/london/london/apps/notifications/engines/session.py | 9840893b6a787cb3beb43026428db35a08c4bbf5 | [
"BSD-2-Clause"
] | permissive | avelino/votacao_paredao_bbb | 1bbf33b9ec00f033db5b1d558190135315d50b03 | 875ac157b207fee80be6841f9b17c41b7069e15d | refs/heads/master | 2021-01-20T12:17:48.362512 | 2012-07-13T05:41:44 | 2012-07-13T05:41:44 | 4,928,781 | 0 | 0 | null | 2020-07-27T11:05:32 | 2012-07-06T17:51:03 | Python | UTF-8 | Python | false | false | 892 | py | import logging
from london.apps.notifications.engines.base import BasePool
from london.apps.notifications.app_settings import SESSION_KEY
class SessionPool(BasePool):
def add_message_pool(self, request, message, level=logging.NOTSET):
request.session[SESSION_KEY] = request.session[SESSION_KEY] or []
request.session[SESSION_KEY].append(self.make_message_dict(request, message, level))
request.session.modified = True
def get_messages(self, request):
return [msg for msg in (request.session[SESSION_KEY] or [])]
def delete_message(self, request, message):
request.session.setdefault(SESSION_KEY, [])
msg_id = message['id']
for msg in request.session[SESSION_KEY]:
if msg['id'] == msg_id:
request.session[SESSION_KEY].remove(msg)
break
request.session.modified = True
| [
"thiagoavelinoster@gmail.com"
] | thiagoavelinoster@gmail.com |
4098559e3d5ca21f8b0ace8663ad6833d5192dc3 | 482e28dccb663459e50ac601e8cc376f2441b0f7 | /src/ch05/rtda/LocalVars.py | aa2c43c0799c56ab03c9df750cc8fef69b0720b8 | [] | no_license | wlj5240/JVMByPython | 1487d4f4b8367e9e31d71b60a9d06ff4996ad1b7 | 53502f10f4f19741646d871c46014e023ccad4a5 | refs/heads/master | 2022-10-05T19:55:50.381307 | 2019-10-20T05:03:38 | 2019-10-20T05:03:38 | 270,003,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: LocalVars.py
@time: 2019/9/15 16:22
@desc: 局部变量表,用于python的列表能存储任何数据类型,所以将基本数据类型和引用类型都用一个Slot表示。
"""
from rtda.Slot import Slot
class LocalVars:
def __init__(self, max_locals):
self.slots = []
if max_locals > 0:
self.slots = [Slot() for _ in range(max_locals)]
def set_numeric(self, index, val):
self.slots[index].num = val
def get_numeric(self, index):
return self.slots[index].num
def set_ref(self, index, ref):
self.slots[index].ref = ref
def get_ref(self, index):
return self.slots[index].ref
def __str__(self):
return "slots:{0}".format([str(t) for t in self.slots])
| [
"huruifeng1202@163.com"
] | huruifeng1202@163.com |
3cd3a0490007878e66ee2502a82d81cb04e6a0e3 | 18219d0fc95936ded56fe44f9a65ecb27f015232 | /148 dictionary items.py | a63a3687830fdc6ac0f0f71e6705986048d579d8 | [] | no_license | JDavid121/Script-Curso-Cisco-Python | 20a61b91b09376dcaef54f8ae5f86fe252de5c33 | 6d68c17ff3c3826e9fc609d110ce9d0e6ebf718b | refs/heads/master | 2021-05-18T04:54:59.948970 | 2020-03-29T20:19:53 | 2020-03-29T20:19:53 | 251,120,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 09:58:15 2020
DICTIONARY ITEMS
@author: David
"""
dict3 = {1:"beta",2:"gamma",4:"iota",3:"theta",9:"zeta",5:"eta"}
# items() method
# This method returns a "class" of tuples.
# Each tuple is a key,value pair
print(dict3.items())
print("**********************")
for key,value in dict3.items():
print(key,"->",value)
print("**********************")
for key,value in dict3.items():
print(value,"->",key)
print("***********************")
A = dict3.items()
print(A)
print(len(A))
print(type(A))
| [
"noreply@github.com"
] | JDavid121.noreply@github.com |
c418f5064c035680c4a33f021984341aae2d73ca | 42051d7c4dd80bb827f10905a6c89b7187448697 | /cfxdb/gen/KeyValue.py | 2067857559a4defa3aa6a5407a5f4eb3fc7d0ba7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | om26er/cfxdb | 1f1d195e5e5d37be08d05364518fb6d2cf13f4e4 | ade48d664a0b4fb99d02836dc77e8e8a43076a07 | refs/heads/master | 2021-04-05T03:30:19.212037 | 2020-04-02T20:05:17 | 2020-04-02T20:05:17 | 248,516,500 | 0 | 0 | NOASSERTION | 2020-03-19T13:55:56 | 2020-03-19T13:55:56 | null | UTF-8 | Python | false | false | 1,436 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace:
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# A key-value pair with string keys & values.
class KeyValue(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsKeyValue(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = KeyValue()
x.Init(buf, n + offset)
return x
# KeyValue
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# UTF8 encoded key of KV pair.
# KeyValue
def Key(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# UTF8 encoded value of KV pair.
# KeyValue
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def KeyValueStart(builder): builder.StartObject(2)
def KeyValueAddKey(builder, key): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(key), 0)
def KeyValueAddValue(builder, value): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def KeyValueEnd(builder): return builder.EndObject()
| [
"tobias.oberstein@crossbario.com"
] | tobias.oberstein@crossbario.com |
b334b94929f6f34e58add9421156267e499fec17 | 334172bb2d17dd0ab0506e054fea207383c50042 | /condonsax/hiddenalbum/models.py | bb7d66dfb30198dc11f2a27a742526de775edb78 | [] | no_license | JayWelborn/condonsax | 1336dd1258a4b348d145a8e1603abf73c7c20947 | 6689db0b5c35420286587531b2e6eaac5f3c3bb7 | refs/heads/master | 2021-01-19T22:41:04.818558 | 2017-05-19T18:52:27 | 2017-05-19T18:52:27 | 88,843,128 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.db import models
from albums.models import Album
class HiddenAlbum(models.Model):
album = models.ForeignKey(Album)
slug = models.SlugField('URL slug', max_length=100, db_index=True)
def get_absolute_url(self):
"""
Defines primary key and slug as components of url
"""
args = (self.slug)
return reverse(self, args)
def __str__(self):
return self.album.title
| [
"jesse.welborn@gmail.com"
] | jesse.welborn@gmail.com |
006d8aa3c250bfd18c9ae5ac8b4bea0fc073de5f | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /samples/cli/accelbyte_py_sdk_cli/ugc/_admin_get_tag.py | f1d4a69d7577f3d4b4c8875fb982147bb1483e49 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 2,329 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Ugc Service (2.11.3)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.ugc import admin_get_tag as admin_get_tag_internal
from accelbyte_py_sdk.api.ugc.models import ModelsPaginatedGetTagResponse
from accelbyte_py_sdk.api.ugc.models import ResponseError
@click.command()
@click.option("--limit", "limit", type=int)
@click.option("--offset", "offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_get_tag(
limit: Optional[int] = None,
offset: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_get_tag_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = admin_get_tag_internal(
limit=limit,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminGetTag failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_get_tag.operation_id = "AdminGetTag"
admin_get_tag.is_deprecated = False
| [
"elmernocon@gmail.com"
] | elmernocon@gmail.com |
c2519ef6d7e3ea9351129a217f76116d3a604cf9 | fba81be05c6665690ab7bd35d1fb7c2bdaf72fce | /backend/standard_casualty_p_26548/urls.py | 939afdacf8f0c94508920656ffbdcaaee9c65eb2 | [] | no_license | crowdbotics-apps/standard-casualty-p-26548 | 5ff4e4433b340a47854bdd5b7d0fa52eb71ba127 | f8de2e6d3c55f7a68457afe84532be4cdb4fbacf | refs/heads/master | 2023-04-29T18:27:50.111889 | 2021-05-11T13:54:12 | 2021-05-11T13:54:12 | 366,399,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | """standard_casualty_p_26548 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Standard Casualty Portal"
admin.site.site_title = "Standard Casualty Portal Admin Portal"
admin.site.index_title = "Standard Casualty Portal Admin"
# swagger
api_info = openapi.Info(
title="Standard Casualty Portal API",
default_version="v1",
description="API documentation for Standard Casualty Portal App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
f82fc162a15b808dfc58d3ae84fd370ab77132b3 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/communication/azure-communication-administration/azure/communication/administration/_phonenumber/_generated/aio/__init__.py | 6ec72fc665b13e79f13317f3a017845a99847c9a | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 604 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._phone_number_administration_service_async import PhoneNumberAdministrationService
__all__ = ['PhoneNumberAdministrationService']
| [
"noreply@github.com"
] | yunhaoling.noreply@github.com |
4df438883771aae9c61194134f1d97f85de32a2a | 5ed63c425f4bcc7a82c91920e58f03d60a8d9b3c | /bid/constants.py | f0cbc77421dd0be6068ef06a4196e2a1ab0ae806 | [] | no_license | cesslab/winners_curse_v7 | 33d0f021d5e8fd5dee3aad99438b750ec7b3959f | 4e096cf97dc027d727aad567c7cc859a3ba125f3 | refs/heads/master | 2023-09-05T03:57:02.065977 | 2021-10-13T21:33:47 | 2021-10-13T21:33:47 | 355,645,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py |
class Constants:
name_in_url = "bid"
players_per_group = None
PART_NUMBER = 1
PART_ONE = 1
MIN_VALUATION = 0
MAX_VALUATION = 100
NUM_LOTTERIES = 4
ROUNDS_PER_LOTTERY = 10
num_rounds = NUM_LOTTERIES * ROUNDS_PER_LOTTERY
PREFIX = "bid_lottery_"
| [
"anwarruff@gmail.com"
] | anwarruff@gmail.com |
1cee4617947c4877e2784a1c7ca58acba24dbcee | 786de89be635eb21295070a6a3452f3a7fe6712c | /pyana/tags/V00-00-10/src/input.py | 5aeb783cb0a06ff78ca4f1306da3746ced1206b1 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | #
# $Id$
#
# Copyright (c) 2010 SLAC National Accelerator Laboratory
#
import logging
import threading
import Queue
from pypdsdata import io
_log = logging.getLogger("pyana.input")
def dgramGen(names):
""" Datagram generator """
# group files by run number
runfiles = {}
for n in names :
xname = io.XtcFileName(n)
runfiles.setdefault(xname.run(), []).append(n)
# scan all runs
runs = runfiles.keys()
runs.sort()
for run in runs :
names = runfiles[run]
logging.info("Processing run number %s" % run)
logging.info("File list: %s" % names)
# read datagrams one by one
dgiter = io.XtcMergeIterator( names )
for dg in dgiter :
fileName = dgiter.fileName()
fpos = dgiter.fpos()
run = dgiter.run()
yield (dg, fileName, fpos, run)
class _DgramReaderThread ( threading.Thread ):
def __init__(self, queue, names):
threading.Thread.__init__(self, name="DgramReader")
self.queue = queue
self.names = names
def run(self) :
for dg in dgramGen(self.names) :
self.queue.put(dg)
# signal end of data
self.queue.put(None)
def threadedDgramGen( names, queueSize = 10 ):
""" datagram generator which does reading in a separate thread """
queue = Queue.Queue(queueSize)
thread = _DgramReaderThread(queue, names)
thread.start()
while True:
dg = queue.get()
if dg is None : break
yield dg
# join the thread
thread.join()
| [
"salnikov@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@b967ad99-d558-0410-b138-e0f6c56caec7 |
0cec27060bbaa91ea8a2fe8be0588bddeb3ec1ff | 525c0cd60cf07e36c378539472703aa1e8354a86 | /app/users/models.py | 53f8eb8d9ff3a3a4b9ef50ac9285c68cfdbe417e | [] | no_license | yatimisi/bookkeeping-server | a4575b871eb18886bb53ec0fe314f41801983978 | 272710aa0066711ec605755226b4387286c7e404 | refs/heads/master | 2020-11-30T13:38:18.194740 | 2019-12-30T01:27:04 | 2019-12-30T01:27:04 | 230,407,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | import os
import time
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager as AuthUerManager
from django.contrib.auth.tokens import default_token_generator
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext_lazy as _
def user_image_path(instance, filename):
ext = os.path.splitext(filename)[-1]
return os.path.join('user-profile', f'{instance.username}{ext}')
class UserManager(AuthUerManager):
def _create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField('電子郵件', unique=True)
profile = models.ImageField(
blank=True, null=True, upload_to=user_image_path)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def send_password_set_email(self):
subject = loader.render_to_string('email/set-password-subject.txt')
subject = ''.join(subject.splitlines())
body = loader.render_to_string('email/set-password-content.html', {
'uid': urlsafe_base64_encode(force_bytes(self.pk)),
'token': default_token_generator.make_token(self),
'user': self,
})
self.email_user(subject, body)
@property
def username(self):
return f'{self.first_name} {self.last_name}'.strip() or self.email.split('@')[0]
def __str__(self):
return self.username
| [
"mea9096@gmail.com"
] | mea9096@gmail.com |
c4b2387b95248fcc29f632db6c3466309d2568f4 | e98a1e360e947a0f91edc3cb603d915a3630cfbc | /easy/1017_160_**intersection_of_two_linked_list.py | 75d55c3e5c95d14a9f8c63cf3633510d95adf202 | [] | no_license | myungwooko/algorithm | 3a6a05cf7efa469aa911fe04871ef368ab98bb65 | 673e51199a2d07198894a283479d459bef0272c5 | refs/heads/master | 2021-07-04T01:17:41.787653 | 2020-12-25T00:59:33 | 2020-12-25T00:59:33 | 213,865,632 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,996 | py | """
160. Intersection of Two Linked Lists
Easy
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# class Solution(object):
# def getIntersectionNode(self, headA, headB): =====================================> Time Limit Exceed
# """
# :type head1, head1: ListNode
# :rtype: ListNode
# """
# if not headA or not headB:
# return
#
# vals = []
# curA = headA
# curB = headB
#
# while curA:
# vals.append(curA)
# curA = curA.next
#
# while curB:
# if curB in vals:
# return curB
# curB = curB.next
#
# return
"""
If two linked lists have intersection, we can find two observations:
They must have same nodes after the intersection point.
L1+L2 must have same tail from the intersection point as L2 + L1. For example,
L1 = 1,2,3
L2 = 6,5,2,3
L1+L2 = 1,2,3,6,5,2,3
L2+L1 = 6,5,2,3,1,2,3
왜냐면 둘다 어딘가에서는 만나고 거기서부턴 똑같이 가서 끝나는 건 똑같기 때문에
위와 같이 더하면 길이는 같아지고 접합지점 부터의 모습을 같게 힐수 있다.
그래서 아래의 함수는 둘을 더해서 만나는 지점인 꼬리부분으로 들어갈떄 둘은 같아지고 그때 그것을 리턴하면 된다는 논리.
아래의 함수 while loop에서 2에서 나가게 되고 그 node를 리턴
1-2-3-None-6 -5-(2)-3
6-5-2- 3-None-1-(2)-3
그리고 마찬가지로 둘이 같은게 없을때도 위를 그대로 따라가 길이는 같아지고 같은게 없고 마지막 한번더 None==None으로 while loop을 빠져 나가게 되고 그 node를 리턴
L1 = 1,2,3
L2 = 4,5,6,7
1-2-3-None-4-5-6-7-(None)
4-5-6-7-None-1-2-3-(None)
"""
class Solution(object):
def getIntersectionNode(self, headA, headB):
p1, p2 = headA, headB
while p1 != p2:
p1 = headB if not p1 else p1.next
p2 = headA if not p2 else p2.next
return p1
a = ListNode(1)
a.next = ListNode(2)
b = ListNode(7)
b.next = ListNode(8)
s = Solution()
test = s.getIntersectionNode(a, b)
print(11, test)
| [
"myungwoo.ko@gmail.com"
] | myungwoo.ko@gmail.com |
2b6f169bba089af91ccf40e5c920c1520be4b508 | fb0dd23271a274a9b622183b2f66bdd40c46169e | /todo/decorators.py | 3b30554e25d05f21b6f661c6b5fa71845c3b78ea | [] | no_license | adeelehsan/rest-todo-app | 826e7639353ec8e2bd4d61fa673fd75e6c6ce846 | ced2c819587b783eaeb90ba220fb46e6f796056e | refs/heads/master | 2020-12-02T07:51:08.398935 | 2017-07-13T04:41:05 | 2017-07-13T04:41:05 | 96,734,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | # from .models import Task
#
# def user_has_perm(function):
# def wrap(request):
# if request.user.has_perm('todo.can_view'):
# task = Task.objects.all()
# return function(request, task)
# else:
# task = Task.objects.filter(user=request.user)
# return function(request, task)
# return wrap | [
"adeel.ehsan@arbisoft.com"
] | adeel.ehsan@arbisoft.com |
8b3c3535ec1c8228fdf9be210a9644992f4e1033 | 90b2b50be27da77b6680f4c7b9cfea53267f2f6d | /CodingInterviews/46.把数字翻译成字符串.py | c6fe0263f14df73e786772453b4d6bc777200922 | [
"Apache-2.0"
] | permissive | xuhuasheng/algorithm-python | 22cd3007d0b63d169d8f84b9b88787d6883e9c76 | 9c47c1add8da1ccfbee8882057883d90615d27b5 | refs/heads/master | 2023-01-18T22:52:22.027636 | 2020-11-20T09:48:23 | 2020-11-20T09:48:23 | 313,158,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | # 给定一个数字,按照如下规则翻译成字符串:0翻译成“a”,1翻译成“b”…25翻译成“z”。
# 一个数字有多种翻译可能,例如12258一共有5种,分别是bccfi,bwfi,bczi,mcfi,mzi。
# 实现一个函数,用来计算一个数字有多少种不同的翻译方法。
# 递归地思考问题,自底向上地解决问题
# f(n)表示从右边数第n位的结果
# f(0) = 1
# f(1) = 1
# f(n) = f(n-1) + g(n, n-1)f(n-2)
# 当str(n)x10+str(n-1)在10-25时,g=1,否则=0
# 动态规划:实质是自底向上地计算并储存复用结果
# 时间复杂度:o(n)
# 空间复杂度:o(n)
def getTranslationCount(num):
if num < 0:
return 0
numstr = str(num)
if len(numstr) == 1:
return 1
restmp = [0] * (len(numstr)+1)
restmp[0] = 1
restmp[1] = 1
g = 0
n = 0
for i in range(len(numstr)-2, -1, -1):
dd = int(numstr[i])*10+int(numstr[i+1])
if dd >= 10 and dd <=25:
g = 1
else:
g = 0
n = len(numstr)-i
restmp[n] = restmp[n-1] + g*restmp[n-2]
return restmp[n]
if __name__ == "__main__":
print(getTranslationCount(12258)) | [
"xu_watson@163.com"
] | xu_watson@163.com |
582600a5132f6e92437d624e360e300171afc73d | 7a28b09805e6a925bb8fff2a06258c42cf9957f7 | /parallel_wavegan/bin/compute_statistics.py | 21f1d4d10af30691ffdd7f4b0e5ae3664bed6f9e | [
"MIT"
] | permissive | tantk/ParallelWaveGAN | cf55d4533e6899b51a6a44afb3247d14b5d83196 | a204ac47357066c58a9662b8e9b619f53d19d043 | refs/heads/master | 2020-09-03T19:30:24.068755 | 2019-11-04T12:32:42 | 2019-11-04T12:32:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,287 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Calculate statistics of feature files."""
import argparse
import logging
import os
import numpy as np
import yaml
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from parallel_wavegan.datasets import MelDataset
from parallel_wavegan.utils import read_hdf5
from parallel_wavegan.utils import write_hdf5
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description="Compute mean and variance of dumped raw features "
"(See detail in parallel_wavegan/bin/compute_statistics.py).")
parser.add_argument("--rootdir", type=str, required=True,
help="direcotry including feature files.")
parser.add_argument("--config", type=str, required=True,
help="yaml format configuration file.")
parser.add_argument("--dumpdir", default=None, type=str,
help="direcotry to save statistics. if not provided, "
"stats will be saved in the above root direcotry. (default=None)")
parser.add_argument("--verbose", type=int, default=1,
help="logging level. higher is more logging. (default=1)")
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning('skip DEBUG/INFO messages')
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# check direcotry existence
if args.dumpdir is None:
args.dumpdir = os.path.dirname(args.rootdir)
if not os.path.exists(args.dumpdir):
os.makedirs(args.dumpdir)
# get dataset
if config["format"] == "hdf5":
mel_query = "*.h5"
mel_load_fn = lambda x: read_hdf5(x, "feats") # NOQA
elif config["format"] == "npy":
mel_query = "*-feats.npy"
mel_load_fn = np.load
else:
raise ValueError("support only hdf5 or npy format.")
dataset = MelDataset(
args.rootdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn)
logging.info(f"the number of files = {len(dataset)}.")
# calculate statistics
scaler = StandardScaler()
for mel in tqdm(dataset):
scaler.partial_fit(mel)
if config["format"] == "hdf5":
write_hdf5(os.path.join(args.dumpdir, "stats.h5"), "mean", scaler.mean_.astype(np.float32))
write_hdf5(os.path.join(args.dumpdir, "stats.h5"), "scale", scaler.scale_.astype(np.float32))
else:
stats = np.stack([scaler.mean_, scaler.scale_], axis=0)
np.save(os.path.join(args.dumpdir, "stats.npy"), stats.astype(np.float32), allow_pickle=False)
if __name__ == "__main__":
main()
| [
"hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp"
] | hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp |
a7c1e6b42ee79fdf0c4da37853f738ecb2cb846d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_43/137.py | 32bf9aa7b435162a23c136394f5bfa91268e72dc | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | def print_2d(tab):
for e in tab:
print(e)
if __name__ == '__main__':
fin = open("./test.in", "r")
fout = open("./test.out", "w")
line = fin.readline()
N = int(line)
for test in range(0, N):
total = 0
num = fin.readline().replace("\n", "")
base = []
val = []
print(num)
for q in num:
val.append(q)
if q not in base:
base.append(q)
if(len(base) > 1):
base[0], base[1] = base[1], base[0]
else:
base[:0] = [0]
#print(base)
#print("val", val)
pow = 0
for i in range(len(val)-1, -1, -1):
total += (len(base)**pow) * base.index(val[i])
pow += 1
#print("base", base)
sol = "Case #" + str(test+1) + ": " + str(total) + "\n"
print(sol)
fout.write(sol) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5267c5cfb4bed1233a56ac7d2f2c81af75cc887d | 9beac2738b54468e108b34e3d4f1455276b5885d | /saral_debug4.py | 891b4251273f529ae74d6b96680d6c30d491e7c8 | [] | no_license | preetising/Dictinory | f6c1d7186698a91de641fc5d899ecf8367ea0e2f | f681933d4b1dc23e8e1aff5168bd599314930af7 | refs/heads/main | 2023-08-20T19:56:59.696642 | 2021-10-10T08:21:43 | 2021-10-10T08:21:43 | 415,525,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | s={'umesh':21,'bijender':54,'amar':67,'peter':89,'sonu':56}
a={'python':20,"gaurav":300,'dev':34,"karan":43}
c={}
for i in (s,a):
c.update(i)
print(c) | [
"noreply@github.com"
] | preetising.noreply@github.com |
161947c50d199b8842683b5136d3eeaaf338567d | 3109e3a7f2f2dccc5a806695f0adbe0fed879112 | /ecommerce/Loma/migrations/0029_auto_20190205_1945.py | 335430bc000f2b635c63fc5a7519215234a82de3 | [] | no_license | Maheshwari2604/ecommercee | 9ebbf18b4fbf933a0d9641009f7f17ce836de587 | 4411e7e10eccda907711200d2c0d873db3d7f803 | refs/heads/master | 2020-04-20T18:03:49.575124 | 2019-02-12T16:02:05 | 2019-02-12T16:02:05 | 169,007,411 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-05 14:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Loma', '0028_daily_price_list_loma_id'),
]
operations = [
migrations.RenameModel(
old_name='MD_model',
new_name='md',
),
]
| [
"maheshwarishivam2604@gmail.com"
] | maheshwarishivam2604@gmail.com |
f322e0b9b3bbf0ddf6522dc18c005c0b41c7478a | a57130d75ad75520217b92d3bd977394846501f7 | /test.py | b983df123bd16dc281079046f1465292f05469d3 | [] | no_license | gittygupta/slp | 15153b9cb580ed8d35d6be4b157ed94ac41d4a4f | c43fc9a01ae67dfd28d147dc4ffc2f0a70e60199 | refs/heads/master | 2022-12-18T08:51:42.266621 | 2020-09-28T16:46:31 | 2020-09-28T16:46:31 | 293,038,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | import tensorflow as tf
import numpy as np
from model import *
from attention import *
from data_utils import *
from bert_utils import *
# Global variables
num_decoder_blocks = 6
num_heads = 8
d_model = 256
d_ffn = 256
d_out = 154
# models
bert = Bert(max_sequence_length=80)
decoder = Decoder(num_decoder_blocks, num_heads, d_model, d_ffn, d_out)
# inference-loop
# only for a single input
def inference(model_path, sentence, net_seq_len):
tar = tf.zeros((1, net_seq_len, d_out))
# ckpt
checkpoint_path = model_path
ckpt = tf.train.Checkpoint(decoder=decoder,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Model Loaded!!')
else:
print('Initialised checkpoint')
words, _, seq_lengths = bert(sentence[0])
pad_mask = padding_mask(words.shape[1], seq_lengths)
la_mask = look_ahead_mask(net_seq_len - 1)
for i in range(net_seq_len - 1):
pred = decoder(tar[:, :-1, :], words, la_mask, pad_mask)
print("Counter : ", pred[0][i][-1])
tar[0][i+1] = pred[0][i]
return tar
# simple test
def test(model_path, sentences, path, video, net_sequence_length):
# ckpt
checkpoint_path = model_path
ckpt = tf.train.Checkpoint(decoder=decoder,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Model Loaded!!')
else:
print('Initialised checkpoint')
tar_inp = get_processed_data(path, video, 0, net_sequence_length)[:, :-1, :]
words, _, seq_lengths = bert(sentences[0])
pad_mask = padding_mask(words.shape[1], seq_lengths)
la_mask = look_ahead_mask(tar_inp.shape[1])
pred = decoder(tar_inp, words, la_mask, pad_mask)
return pred
if __name__ == '__main__':
model_path = 'models/path/to/model'
sentence = ['german sentence']
vid_path = 'path/to/videos'
video = 'name of video in dataset'
net_sequence_length = 512
test(model_path, sentence, vid_path, video, net_sequence_length)
| [
"noreply@github.com"
] | gittygupta.noreply@github.com |
aebc43e38204ef38f0a134e24421cf6b5df8c018 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03003/s239952205.py | 9e63871a10ccf805a11575b5567761e5e95c710d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | def count_cs(str1, str2, MOD):
"""文字列str1, str2の共通部分列(Common Subsequence, CS)を数え上げる。
添字が異なる場合は異なる部分列として考える。
計算量 O(|str1||str2|)
"""
dp = [[0] * (len(str2) + 1) for _ in range(len(str1) + 1)]
for i in range(len(str1)):
for j in range(len(str2)):
if str1[i] == str2[j]:
dp[i + 1][j + 1] = dp[i + 1][j] + dp[i][j + 1] + 1
else:
dp[i + 1][j + 1] = dp[i + 1][j] + dp[i][j + 1] - dp[i][j]
dp[i + 1][j + 1] %= MOD
return (dp[len(str1)][len(str2)] + 1) % MOD
n, m = map(int, input().split())
s = list(map(int, input().split()))
t = list(map(int, input().split()))
MOD = 10 ** 9 + 7
print(count_cs(s, t, MOD)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3e6fc91d43aac579360e7c6d3e082c7d2104718e | bf099f3425b590f6cbb39e8305eac5489e558e26 | /笔记/re/python-trade/trade.py | ef4f03b4ad241931859eff03cbe3c3e8247eaceb | [] | no_license | Imtinmin/Note | 399a08e185bf11e2ca9fbbe4823477e86e86ebe2 | 8f99a4c180b921baf46a838997bcce64c688fd70 | refs/heads/master | 2023-01-27T23:25:43.763941 | 2020-01-01T15:24:08 | 2020-01-01T15:24:08 | 188,774,377 | 1 | 1 | null | 2023-01-04T13:55:52 | 2019-05-27T05:07:47 | PHP | UTF-8 | Python | false | false | 651 | py | #!/usr/bin/env python
# encoding: utf-8
# 如果觉得不错,可以推荐给你的朋友!http://tool.lu/pyc
import base64
def encode(message):
s = ''
for i in message:
x = ord(i) ^ 32
x = x + 16
s += chr(x)
return base64.b64encode(s)
def decode(message):
a = base64.b64decode(message)
s = ''
for i in a:
s += chr((ord(i) - 16) ^ 32)
return s
'''correct = 'XlNkVmtUI1MgXWBZXCFeKY+AaXNt'
flag = ''
print 'Input flag:'
flag = raw_input()
if encode(flag) == correct:
print 'correct'
else:
print 'wrong'
'''
correct = 'XlNkVmtUI1MgXWBZXCFeKY+AaXNt'
print decode(correct)
| [
"954093370@qq.com"
] | 954093370@qq.com |
8987f8644e2e99713f75adc99ac002c8bbce3e86 | 22bcb68759d516eea70d18116cd434fcd0a9d842 | /scrap/infibeam_mobiles_scrap.py | f6a2586c2a736359955ad15e0a9a1b85a49fdc82 | [] | no_license | lovesh/abhiabhi-web-scrapper | 1f5da38c873fea74870d59f61c3c4f52b50f1886 | b66fcadc56377276f625530bdf8e739a01cbe16b | refs/heads/master | 2021-01-01T17:16:51.577914 | 2014-10-18T15:56:42 | 2014-10-18T15:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,656 | py | import downloader
import dom
import urllib
import re
import datetime
import math
import simplejson as json
import pymongo
from collections import defaultdict
import util
siteurl='http://www.infibeam.com'
referer='http://www.infibeam.com/Mobiles/search'
ajax_url='http://www.infibeam.com/Mobiles/Search_ajax.action?store=Mobiles&page='
debug=True
brand_pattern=re.compile('\w+',re.I)
shipping_pattern=re.compile('(\d+)-(\d+)',re.I)
logfile=open('infibeam_mobile_log.txt','w')
dl=downloader.Downloader()
dl.addHeaders({'Origin':siteurl,'Referer':referer})
def getMobileUrlsOfPage(html):
mobile_url_path='//ul[@class="srch_result portrait"]/li/a'
page_dom=dom.DOM(string=html)
links=set(siteurl+l[1] for l in page_dom.getLinksWithXpath(mobile_url_path))
return links
def getAllMobileUrls():
count_path='//div[@id="resultsPane"]/div/div/b[2]'
doc=dom.DOM(url=referer)
count=int(doc.getNodesWithXpath(count_path)[0].text)
num_pages=int(math.ceil(count/20.0))
page_urls=[ajax_url+str(n) for n in xrange(1,num_pages+1)]
dl.putUrls(page_urls)
pages=dl.download()
print len(pages)
mobile_urls=[]
for p in pages:
status=pages[p][0]
html=pages[p][1]
if status > 199 and status < 400:
mobile_urls.extend(getMobileUrlsOfPage(html))
print len(mobile_urls)
return mobile_urls
def getMobileFromPage(url=None,string=None):
mobile={}
if url:
doc=dom.DOM(url=url)
else:
doc=dom.DOM(string=string)
addBox=doc.getNodesWithXpath('//input[@class="buyimg "]')
if addBox: #availability check
mobile['availability']=1
details_path='//div[@id="ib_details"]'
details=doc.getNodesWithXpath(details_path)
if details:
details=details[0].text_content()
shipping=shipping_pattern.search(details)
if shipping:
mobile['shipping']=[shipping.group(1),shipping.group(2)]
else:
mobile['availability']=0
name_path='//div[@id="ib_details"]/h1'
mobile['name']=doc.getNodesWithXpath(name_path)[0].text_content().strip()
brand=brand_pattern.search(mobile['name']).group().lower()
if re.match('sony ericsson',mobile['name'],re.I):
mobile['brand']='sony ericsson'
else:
mobile['brand']=brand
color_path='//a[@class="colorlink"]'
colors=doc.getNodesWithXpath(color_path)
mobile['colors']=[color.get('text') for color in colors]
price_path='//span[@class="infiPrice amount price"]'
price=doc.getNodesWithXpath(price_path)
if price:
mobile['price']=int(price[0].text.replace(',',''))
img_path="//div[@id='ib_img_viewer']/img"
mobile['img_url']={'0':doc.getImgUrlWithXpath(img_path)}
desc_path='//div[@class="reviews-box-cont-inner"]'
desc=doc.getNodesWithXpath(desc_path)
if desc:
mobile['description']=desc[0].text_content.strip()
mobile['last_modified_datetime']=datetime.datetime.now()
product_history={}
if 'price' in mobile:
product_history['price']=mobile['price']
if 'shipping' in mobile:
product_history['shipping']=mobile['shipping']
product_history['availability']=mobile['availability']
product_history['datetime']=mobile['last_modified_datetime']
mobile['product_history']=[product_history,]
mobile['site']='infibeam'
offer_path='//div[@class="offer"]'
offer=doc.getNodesWithXpath(offer_path)
if offer:
mobile['offer']=offer[0].text_content().replace('\r\n ','')
specs_path='//div[@id="specs"]/div'
specs=doc.getNodesWithXpath(specs_path)
specification={}
for spec in specs:
text=spec.xpath('a')[0].text.strip()
if text=='Deliverable Locations' or text=='Disclaimer':
continue
trs=spec.xpath('.//tr')
for tr in trs:
tds=tr.xpath('.//td')
if len(tds)<2:
continue
key=tds[0].text_content().strip(':\n\t ').replace('.','').lower()
value=tds[1].text_content().strip(':\n\t ').lower()
specification[key]=value
if 'android os' in specification and 'os' not in specification:
if specification['android os'] in ['available','yes']:
if 'os version' in specification:
specification['os']='android'+' '+specification['os version']
del(specification['os version'])
else:
specification['os']='android'
del(specification['android os'])
if mobile['brand']=='blackberry' and 'os version' in specification:
util.replaceKey(specification,'os version','os')
mobile['specification']=specification
return mobile
def scrapAllMobiles():
urls=getAllMobileUrls()
mobiles=[]
dl.putUrls(urls)
result=dl.download()
for r in result:
print r
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
mobile=getMobileFromPage(string=html)
mobile['url']=r
mobiles.append(mobile)
return mobiles
def insertIntoDB(log=True):
con=pymongo.Connection('localhost',27017)
db=con['abhiabhi']
mobile_coll=db['scraped_mobiles']
mobile_coll.create_index('url',unique=True)
inserted_count=0
updated_count=0
inserted_urls=[]
updated_urls=[]
mobiles=scrapAllMobiles()
for mobile in mobiles:
try:
mobile_coll.insert(mobile,safe=True)
inserted_count+=1
inserted_urls.append(mobile['url'])
except pymongo.errors.DuplicateKeyError:
upd={'last_modified_datetime':datetime.datetime.now()}
if 'availability' in mobile:
upd['availability']=mobile['availability']
if 'price' in mobile:
upd['price']=mobile['price']
if 'shipping' in mobile:
upd['shipping']=mobile['shipping']
if 'offer' in mobile:
upd['offer']=mobile['offer']
else:
upd['offer']=''
mobile_coll.update({'url':mobile['url']},{'$push':{'product_history':mobile['product_history'][0]},'$set':upd})
updated_count+=1
updated_urls.append(mobile['url'])
if log:
scrap_log=db['scrap_log']
log={'siteurl':siteurl,'datetime':datetime.datetime.now(),'product':'mobile','products_updated_count':updated_count,'products_inserted_count':inserted_count,'products_updated_urls':updated_urls,'products_inserted_urls':inserted_urls}
scrap_log.insert(log)
print "%d inserted and %d updated"%(inserted_count,updated_count)
if __name__=='__main__':
insertIntoDB()
| [
"lovesh.bond@gmail.com"
] | lovesh.bond@gmail.com |
4fdf0eea5655ddf62a21084ec572485b7d410a69 | 5f4e13201d4c5b7edc8dbbda289380682a187bec | /nlpfr/nltk/test/doctest_nose_plugin.py | 8b0dfecae97f59fa3dc62cfbe8b1662d9c2cd18f | [] | no_license | intellivoid/CoffeeHousePy | 92f4fb344de757837c3d3da05cb5513e90408039 | 57c453625239f28da88b88ddd0ae5f1ecdd4de3c | refs/heads/master | 2023-02-23T14:32:01.606630 | 2021-01-28T02:57:10 | 2021-01-28T02:57:10 | 324,419,067 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,250 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
import os
import codecs
import doctest
from nose.util import tolist, anyp
from nose.plugins.base import Plugin
from nose.suite import ContextList
from nose.plugins.doctests import Doctest, log, DocFileCase
ALLOW_UNICODE = doctest.register_optionflag("ALLOW_UNICODE")
class _UnicodeOutputChecker(doctest.OutputChecker):
_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
def _remove_u_prefixes(self, txt):
return re.sub(self._literal_re, r"\1\2", txt)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
if not (optionflags & ALLOW_UNICODE):
return False
# ALLOW_UNICODE is active and want != got
cleaned_want = self._remove_u_prefixes(want)
cleaned_got = self._remove_u_prefixes(got)
res = doctest.OutputChecker.check_output(
self, cleaned_want, cleaned_got, optionflags
)
return res
_checker = _UnicodeOutputChecker()
class DoctestPluginHelper(object):
"""
This mixin adds print_function future import to all test cases.
It also adds support for:
'#doctest +ALLOW_UNICODE' option that
makes DocTestCase think u'foo' == 'foo'.
'#doctest doctestencoding=utf-8' option that
changes the encoding of doctest files
"""
OPTION_BY_NAME = ("doctestencoding",)
def loadTestsFromFileUnicode(self, filename):
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = codecs.open(filename, "r", self.options.get("doctestencoding"))
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {"__file__": filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(fixt_mod, globals(), locals(), ["nop"])
except ImportError as e:
log.debug("Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s", fixt_mod, fixture_context)
if hasattr(fixture_context, "globs"):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name, filename=filename, lineno=0
)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, "setup_test", None),
tearDown=getattr(fixture_context, "teardown_test", None),
result_var=self.doctest_result_var,
)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
def loadTestsFromFile(self, filename):
cases = self.loadTestsFromFileUnicode(filename)
for case in cases:
if isinstance(case, ContextList):
yield ContextList([self._patchTestCase(c) for c in case], case.context)
else:
yield self._patchTestCase(case)
def loadTestsFromModule(self, module):
"""Load doctests from the module.
"""
for suite in super(DoctestPluginHelper, self).loadTestsFromModule(module):
cases = [self._patchTestCase(case) for case in suite._get_tests()]
yield self.suiteClass(cases, context=module, can_split=False)
def _patchTestCase(self, case):
if case:
case._dt_test.globs["print_function"] = print_function
case._dt_checker = _checker
return case
def configure(self, options, config):
# it is overriden in order to fix doctest options discovery
Plugin.configure(self, options, config)
self.doctest_result_var = options.doctest_result_var
self.doctest_tests = options.doctest_tests
self.extension = tolist(options.doctestExtension)
self.fixtures = options.doctestFixtures
self.finder = doctest.DocTestFinder()
# super(DoctestPluginHelper, self).configure(options, config)
self.optionflags = 0
self.options = {}
if options.doctestOptions:
stroptions = ",".join(options.doctestOptions).split(",")
for stroption in stroptions:
try:
if stroption.startswith("+"):
self.optionflags |= doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
elif stroption.startswith("-"):
self.optionflags &= ~doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
try:
key, value = stroption.split("=")
except ValueError:
pass
else:
if not key in self.OPTION_BY_NAME:
raise ValueError()
self.options[key] = value
continue
except (AttributeError, ValueError, KeyError):
raise ValueError("Unknown doctest option {}".format(stroption))
else:
raise ValueError(
"Doctest option is not a flag or a key/value pair: {} ".format(
stroption
)
)
class DoctestFix(DoctestPluginHelper, Doctest):
pass
| [
"netkas@intellivoid.info"
] | netkas@intellivoid.info |
db70a141cfb4f03bf7d9c154bdb978495ede765f | e5d5fa28999bcc6c642bb42dda93afd38e272b81 | /LeetCode/1020. Number of Enclaves/solve1.py | 65d26252539eea884af9a4350e4badff1630d7a4 | [] | no_license | chiahsun/problem_solving | cd3105969983d16d3d5d416d4a0d5797d4b58e91 | 559fafa92dd5516058bdcea82a438eadf5aa1ede | refs/heads/master | 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | class Solution:
def numEnclaves(self, grid: List[List[int]]) -> int:
M, N = len(grid), len(grid[0])
def fill(grid, x, y):
grid[x][y] = 0
for dx, dy in [[1, 0], [-1, 0], [0, 1], [0, -1]]:
nx, ny = x+dx, y+dy
if nx >= 0 and nx < M and ny >= 0 and ny < N and grid[nx][ny] == 1:
fill(grid, nx, ny)
for i in range(M):
if grid[i][0] == 1:
fill(grid, i, 0)
if grid[i][N-1] == 1:
fill(grid, i, N-1)
for i in range(N):
if grid[0][i] == 1:
fill(grid, 0, i)
if grid[M-1][i] == 1:
fill(grid, M-1, i)
res = 0
for i in range(M):
for k in range(N):
if grid[i][k] == 1:
res += 1
return res
| [
"chiahsun0814@gmail.com"
] | chiahsun0814@gmail.com |
f6752c0fb6fb382f2304473c74d1b6030f7c9ae9 | 092dd56a1bf9357466c05d0f5aedf240cec1a27b | /tests/pytests/topology/TestMeshGenerator.py | ea7d044b789593815e26b703c24303b498c72894 | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | c5f872c6afff004a06311d36ac078133a30abd99 | refs/heads/main | 2023-08-24T18:27:30.877550 | 2023-06-21T22:03:01 | 2023-06-21T22:03:01 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 1,078 | py | #!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
# @file tests/pytests/topology/TestMeshGenerator.py
#
# @brief Unit testing of Python MeshGenerator object.
import unittest
from pylith.testing.UnitTestApp import TestAbstractComponent
from pylith.topology.MeshGenerator import MeshGenerator
class TestMeshGenerator(TestAbstractComponent):
"""Unit testing of MeshGenerator object.
"""
_class = MeshGenerator
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestMeshGenerator))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
346ffe2563d7b5009ca8d5426353249ca86ced67 | b252d1f8ec5f68bf5f935c000e0bb011718ea691 | /virtualenvs/ninetyseven/src/savoy/contrib/events/.svn/text-base/admin.py.svn-base | 0a3e21da84e64ebedc6f1ae3b91e7b3694c532de | [] | no_license | syncopated/97bottles | 2ceace7ed6a852bef61796733a08eb878b045152 | 08f4210e3de77c4564fcc8c1a2e9b47a0088249f | refs/heads/master | 2016-08-05T07:48:51.109089 | 2012-12-02T17:38:35 | 2012-12-02T17:38:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | from django.contrib import admin
from savoy.contrib.events.models import *
class OneOffEventTimeInline(admin.TabularInline):
model = OneOffEventTime
extra = 3
class AllDayEventTimeInline(admin.TabularInline):
model = AllDayEventTime
extra = 3
class WeeklyEventTimeInline(admin.TabularInline):
model = WeeklyEventTime
extra = 3
class MonthlyEventTimeInline(admin.TabularInline):
model = MonthlyEventTime
extra = 3
class EventAdmin(admin.ModelAdmin):
inlines = [OneOffEventTimeInline, AllDayEventTimeInline, WeeklyEventTimeInline, MonthlyEventTimeInline]
prepopulated_fields = {'slug': ("title",)}
list_display = ('title','short_description', 'added_by', 'date_created','start_time',)
search_fields = ('title','short_description','description','tags',)
date_hierarchy = 'date_published'
list_filter=('date_created','date_published',)
fieldsets = (
('Basics:', {'fields': ('title', 'slug', 'date_published','added_by', 'short_description', 'description', 'event_url')}),
('People and places:', {'fields': ('places', 'organizers', 'sponsors', 'individual_organizers', 'individual_sponsors',)}),
('Categorization:', {'fields': ('tags',)}),
('Cost and tickets:', {'fields': ('cost_high','cost_low','ticket_url',)}),
)
admin.site.register(Event, EventAdmin) | [
"keith@dkeithrobinson.com"
] | keith@dkeithrobinson.com | |
55fad1ea837d7346b46547d70e497c4f672c2024 | a86ca34e23afaf67fdf858df9e47847606b23e0c | /lib/temboo/Library/Klout/ListTopics.py | 306595bee299af4f646bec8c2a32e475e9fa0bbc | [] | no_license | miriammelnick/dont-get-mugged | 6026ad93c910baaecbc3f5477629b0322e116fa8 | 1613ee636c027ccc49c3f84a5f186e27de7f0f9d | refs/heads/master | 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py |
###############################################################################
#
# ListTopics
# Retrieves a list of the top three topics for Twitter users you specify.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class ListTopics(Choreography):
"""
Create a new instance of the ListTopics Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Klout/ListTopics')
def new_input_set(self):
return ListTopicsInputSet()
def _make_result_set(self, result, path):
return ListTopicsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListTopicsChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the ListTopics
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class ListTopicsInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API key provided by Klout.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the ReturnType input for this choreography. ((optional, string) The desired format for the retrieved topics: xml or json. Defaults to xml.)
"""
def set_ReturnType(self, value):
InputSet._set_input(self, 'ReturnType', value)
"""
Set the value of the Usernames input for this choreography. ((required, string) A comma-delimited string of Twitter usernames whose topics you want to retrieve.)
"""
def set_Usernames(self, value):
InputSet._set_input(self, 'Usernames', value)
"""
A ResultSet with methods tailored to the values returned by the ListTopics choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class ListTopicsResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((xml) The retrieved Klout scores for the specified users. The response format depends on what is specified in the ReturnType input. Defaults to xml.)
"""
def get_Response(self):
return self._output.get('Response', None)
class ListTopicsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListTopicsResultSet(response, path)
| [
"miriam@famulus"
] | miriam@famulus |
9221dbcaffe38d0448eee87f5f3ed8162b467596 | 8a69c886a8bde80c0b8f1f0c03b0de6f31b784dd | /tests/functional/parsing/test_parsing_insert.py | f593edf1130db39e74abbcec79f249ed2e9401ad | [
"BSD-3-Clause"
] | permissive | bouthilx/kleio | c774a85b851081f264a8bbae0f45cd45ebf3fa82 | bf6291f95d9c35774194e3d9ed678db4544ca345 | refs/heads/develop | 2020-03-28T00:43:02.218807 | 2018-08-19T02:26:35 | 2018-08-19T02:26:35 | 139,289,100 | 1 | 1 | null | 2018-08-19T02:26:35 | 2018-07-01T00:18:04 | Python | UTF-8 | Python | false | false | 1,086 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Perform functional tests for the parsing of the different commands."""
import argparse
import os
import pytest
from kleio.core.cli import insert
def _create_parser(need_subparser=True):
parser = argparse.ArgumentParser()
if need_subparser:
subparsers = parser.add_subparsers()
return parser, subparsers
return parser
@pytest.mark.usefixtures("clean_db")
def test_insert_command_full_parsing(database, monkeypatch):
"""Test the parsing of all the options of insert"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
parser, subparsers = _create_parser()
args_list = ["insert", "-n", "test", "--config",
"./kleio_config_random.yaml", "./black_box.py", "-x=1"]
insert.add_subparser(subparsers)
subparsers.choices['insert'].set_defaults(func='')
args = vars(parser.parse_args(args_list))
assert args['name'] == 'test'
assert args['config'].name == './kleio_config_random.yaml'
assert args['user_args'] == ['./black_box.py', '-x=1']
| [
"xavier.bouthillier@umontreal.ca"
] | xavier.bouthillier@umontreal.ca |
56008640a4a20567e577c9b682e01646a60c0ea3 | 16c4d625ad9e945471a2a267e9992c7e9260214f | /criterium/forms.py | 4a77f9477282f7224cc15e9e7c0b8948627cbecb | [
"BSD-2-Clause"
] | permissive | andywar65/rp_repo | 8cea1c81533250b49a4036fb9b0ff6e93a0dde66 | 726c1426d738b962cabeabd8995aa35767df0c41 | refs/heads/master | 2023-05-26T13:47:48.329624 | 2021-06-05T08:35:05 | 2021-06-05T08:35:05 | 255,056,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | from django import forms
from django.forms import ModelForm
from .models import Race, Athlete
from cronache.models import Event
from users.models import User
class RaceForm(ModelForm):
event = forms.ModelChoiceField(label="Evento", required = False,
queryset = Event.objects.filter(tags__name__in = ['criterium',
'Criterium'], ), )
def clean_date(self):
date = self.cleaned_data['date']
event = self.cleaned_data['event']
if not date and not event:
msg = 'Senza evento occorre inserire almeno la data.'
raise forms.ValidationError(msg, code='no_date')
return date
class Meta:
model = Race
fields = '__all__'
class AthleteForm(ModelForm):
user = forms.ModelChoiceField(label="Iscritto", required = True,
queryset = User.objects.filter(profile__parent = None,
profile__sector__in = ['1-YC', '2-NC'],
is_active = True ).order_by('last_name', 'first_name'), )
class Meta:
model = Athlete
fields = '__all__'
| [
"andy.war1965@gmail.com"
] | andy.war1965@gmail.com |
c671f526cdc219ba9326376d219ae533bae11376 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_07_07_ideas_md3/plot_linkage_check.py | fa41cb4110e8f87b1600d492c07e5f189351bd32 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,795 | py | import numpy as np
import matplotlib.pyplot as plt
import functions as rr
from constants import const
import h5py
def plot_check(par, n_pc, n_poly, H):
C = const()
# colormat = np.random.rand(len(set_id_set), 3)
colormat = np.array([[.3, .3, 1.],
[.3, 1., .3],
[1., .2, .2],
[0., .7, .7],
[.7, .0, .7],
[.7, .7, .0],
[.5, .3, .1],
[.3, .5, .1],
[.1, .3, .5]])
f_reg = h5py.File("regression_results_L%s.hdf5" % H, 'r')
order = f_reg.get('order_%s' % par)[...]
"""explicitly define #PCs"""
tmp = (order[:, 0] == n_pc)*(order[:, 1] == n_poly)
indx = np.arange(order.shape[0])[tmp]
# """calculate # PCs required to reach desired explained variance"""
# f_pc = h5py.File("pca_data_L%s.hdf5" % H, 'r')
# ratios = f_pc.get('ratios')[...]
# f_pc.close()
# tmp = np.cumsum(ratios)
# tmp = np.arange(tmp.size)[tmp >= C["ev_lvl"]]
# # max_ev = tmp.max()
# # print "max explained variance: %s" % max_ev
# # tmp = np.arange(tmp.size)[tmp >= max_ev]
# n_pc = tmp[0] + 1
# tmp = (order[:, 0] == n_pc)*(order[:, 1] == n_poly)
# indx = np.arange(order.shape[0])[tmp]
# """calculate # PCs to minimize LOOCV mean error"""
# indx = np.argmin(f_reg.get('loocv_err_%s' % par))
# n_pc = order[indx, 0]
msg = par
rr.WP(msg, C['wrt_file'])
msg = "n_pc, n_poly: %s" % str(order[indx, :])
rr.WP(msg, C['wrt_file'])
"""find the results associated with the desired n_pc, n_poly"""
"""load the simulated and predicted responses"""
if par == 'modulus':
RsimC = f_reg.get('Rsim_cal_%s' % par)[...]*(1e-3)
RpredC = f_reg.get('Rpred_cal_%s' % par)[indx, :]*(1e-3)
RsimV = f_reg.get('Rsim_val_%s' % par)[...]*(1e-3)
RpredV = f_reg.get('Rpred_val_%s' % par)[indx, :]*(1e-3)
else:
RsimC = f_reg.get('Rsim_cal_%s' % par)[...]
RpredC = f_reg.get('Rpred_cal_%s' % par)[indx, :]
RsimV = f_reg.get('Rsim_val_%s' % par)[...]
RpredV = f_reg.get('Rpred_val_%s' % par)[indx, :]
"""write out the associated error"""
errC = 100.*np.abs(RpredC-RsimC)/RsimC.mean()
msg = "mean %% error for cal: %s" % errC.mean()
rr.WP(msg, C['wrt_file'])
msg = "max %% error for cal: %s" % errC.max()
rr.WP(msg, C['wrt_file'])
errV = 100.*np.abs(RpredV-RsimV)/RsimV.mean()
msg = "mean %% error for val: %s" % errV.mean()
rr.WP(msg, C['wrt_file'])
msg = "max %% error for val: %s" % errV.max()
rr.WP(msg, C['wrt_file'])
"""plot the prediction equal to simulation line"""
fig = plt.figure(figsize=[8, 5])
minval = np.min([np.min([RsimC, RpredC]), np.min([RsimV, RpredV])])
maxval = np.max([np.max([RsimC, RpredC]), np.max([RsimV, RpredV])])
valrange = maxval-minval
minln = minval - 0.5*valrange
maxln = maxval + 0.5*valrange
line = np.array([minln, maxln])
plt.plot(line, line, 'k-')
c = 0
for ii in xrange(len(C['ns_cal'])):
c_ = c + C['ns_cal'][ii]
name = C['names_cal'][ii]
Rsim_tmp = RsimC[c:c_]
Rpred_tmp = RpredC[c:c_]
c = c_
plt.plot(Rsim_tmp, Rpred_tmp,
marker='o', markersize=7, color=colormat[ii, :], alpha=0.4,
linestyle='', label="%s (calibration)" % name)
c = 0
for ii in xrange(len(C['ns_val'])):
c_ = c + C['ns_val'][ii]
name = C['names_val'][ii]
Rsim_tmp = RsimV[c:c_]
Rpred_tmp = RpredV[c:c_]
c = c_
plt.plot(Rsim_tmp, Rpred_tmp,
marker='s', markersize=7, color=colormat[ii, :], alpha=0.4,
linestyle='', label="%s (validation)" % name)
minbnd = minval - 0.1*valrange
maxbnd = maxval + 0.1*valrange
plt.axis([minbnd, maxbnd, minbnd, maxbnd])
plt.axes().set_aspect('equal')
if par == 'modulus':
plt.xlabel("simulation (GPa)")
plt.ylabel("prediction (GPa)")
else:
plt.xlabel("simulation (MPa)")
plt.ylabel("prediction (MPa)")
# plt.xticks(rotation=20)
# plt.yticks(rotation=20)
# plt.legend(loc='upper left', shadow=True, fontsize='medium')
plt.legend(bbox_to_anchor=(1.02, 1), loc=2, shadow=True, fontsize='medium')
fig.tight_layout(rect=(0, 0, .75, 1))
fig_name = 'prediction_%s_npc%s_npoly%s_L%s.png' % (par, n_pc, n_poly, H)
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
f_reg.close()
if __name__ == '__main__':
C = const()
ns_set = C['ns_val']
names_set = C['names_val']
par = "c0"
plot_check(ns_set, names_set, par)
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
fe007eb975a9d674a4485a4870ad52f056034e72 | d63b1b36634b68070f6f3c017c0250a7ea646e6f | /SMC/GEM5/gem5/src/dev/x86/Pc.py | 1f1f3ca89ba3a345f277583ed85d6800e4ecccc7 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later"
] | permissive | jiwon-choe/Brown-SMCSim | ccf506d34d85fb3d085bf50ed47de8b4eeaee474 | ff3d9334c1d5c8d6a00421848c0d51e50e6b67f8 | refs/heads/master | 2021-06-30T00:15:57.128209 | 2020-11-24T03:11:41 | 2020-11-24T03:11:41 | 192,596,189 | 15 | 8 | MIT | 2019-06-20T15:43:00 | 2019-06-18T18:53:40 | C++ | UTF-8 | Python | false | false | 3,388 | py | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import IsaFake
from Pci import PciConfigAll
from Platform import Platform
from SouthBridge import SouthBridge
from Terminal import Terminal
from Uart import Uart8250
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port;
class Pc(Platform):
type = 'Pc'
cxx_header = "dev/x86/pc.hh"
system = Param.System(Parent.any, "system")
pciconfig = PciConfigAll()
south_bridge = SouthBridge()
# "Non-existant" port used for timing purposes by the linux kernel
i_dont_exist = IsaFake(pio_addr=x86IOAddress(0x80), pio_size=1)
# Ports behind the pci config and data regsiters. These don't do anything,
# but the linux kernel fiddles with them anway.
behind_pci = IsaFake(pio_addr=x86IOAddress(0xcf8), pio_size=8)
# Serial port and terminal
com_1 = Uart8250()
com_1.pio_addr = x86IOAddress(0x3f8)
com_1.terminal = Terminal()
# Devices to catch access to non-existant serial ports.
fake_com_2 = IsaFake(pio_addr=x86IOAddress(0x2f8), pio_size=8)
fake_com_3 = IsaFake(pio_addr=x86IOAddress(0x3e8), pio_size=8)
fake_com_4 = IsaFake(pio_addr=x86IOAddress(0x2e8), pio_size=8)
# A device to catch accesses to the non-existant floppy controller.
fake_floppy = IsaFake(pio_addr=x86IOAddress(0x3f2), pio_size=2)
def attachIO(self, bus, dma_ports = []):
self.south_bridge.attachIO(bus, dma_ports)
self.i_dont_exist.pio = bus.master
self.behind_pci.pio = bus.master
self.com_1.pio = bus.master
self.fake_com_2.pio = bus.master
self.fake_com_3.pio = bus.master
self.fake_com_4.pio = bus.master
self.fake_floppy.pio = bus.master
self.pciconfig.pio = bus.default
| [
"brandnew7th@gmail.com"
] | brandnew7th@gmail.com |
aa873476e46a8e79f0c40ca8c00123eeb3510f1e | 4297f48daaa2aa0f5e4058bbeee111bf9236790e | /cv/hyper-network/residual_network_on_cifar10.py | cc01d12854cfb994273a508ce822eadd70d0987e | [] | no_license | HQ01/fast-weight | 21d9c379c8bccc29aced911d35f3b0784a072c8f | e0d9afe15c05fca966bb03a5b571a5486d6629c6 | refs/heads/master | 2021-01-21T11:54:54.423127 | 2017-05-15T13:08:42 | 2017-05-15T13:08:42 | 91,762,993 | 0 | 1 | null | 2017-05-19T03:46:09 | 2017-05-19T03:46:09 | null | UTF-8 | Python | false | false | 3,661 | py | import mx_layers as layers
def _convolution(**kwargs):
defaults = {'kernel_shape' : (3, 3), 'stride' : (1, 1), 'pad' : (1, 1), 'no_bias' : True}
defaults.update(kwargs)
return layers.convolution(**defaults)
def _normalized_convolution(network, **kwargs):
network = layers.batch_normalization(network, fix_gamma=False)
network = layers.ReLU(network)
network = _convolution(X=network, **kwargs)
return network
def _module(network, n_filters, n_layers):
for index in range(n_layers):
identity = network
network = _normalized_convolution(network, n_filters=n_filters)
network = _normalized_convolution(network, n_filters=n_filters)
network += identity
return network
def _transit(network, n_filters):
'''
identity = \
_convolution(X=network, n_filters=n_filters, kernel_shape=(1, 1), stride=(2, 2), pad=(0, 0))
'''
identity = layers.pooling(X=network, mode='maximum', kernel_shape=(2, 2), stride=(2, 2), pad=(0, 0))
identity = _convolution(X=identity, n_filters=n_filters, kernel_shape=(1, 1), pad=(0, 0))
network = _normalized_convolution(network, n_filters=n_filters, stride=(2, 2))
network = _normalized_convolution(network, n_filters=n_filters)
return identity + network
def build_network(n_layers):
network = layers.variable('data')
network = _convolution(X=network, n_filters=16)
for n_filters in (16, 32):
network = _module(network, n_filters, n_layers)
network = _transit(network, n_filters * 2)
network = _module(network, 64, n_layers)
network = layers.batch_normalization(network, fix_gamma=False)
network = layers.ReLU(network)
network = layers.pooling(X=network, mode='average', kernel_shape=(8, 8), stride=(1, 1), pad=(0, 0))
network = layers.flatten(network)
network = layers.batch_normalization(network, fix_gamma=False)
network = layers.fully_connected(X=network, n_hidden_units=10)
network = layers.softmax_loss(prediction=network, normalization='batch', id='softmax')
return network
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--initial_lr', type=float, default=0.1)
parser.add_argument('--n_layers', type=int, required=True)
parser.add_argument('--postfix', type=str, default='')
args = parser.parse_args()
network = build_network(n_layers=args.n_layers)
from lr_scheduler import AtIterationScheduler
lr_table = {32000 : args.initial_lr * 0.1, 48000 : args.initial_lr * 0.01}
lr_scheduler = AtIterationScheduler(args.initial_lr, lr_table)
optimizer_settings = {
'args' : {'momentum' : 0.9},
'initial_lr' : args.initial_lr,
'lr_scheduler' : lr_scheduler,
'optimizer' : 'SGD',
'weight_decay' : 0.0001,
}
from mx_initializers import PReLUInitializer
initializer = PReLUInitializer()
from mx_solver import MXSolver
solver = MXSolver(
batch_size = args.batch_size,
devices = (0, 1, 2, 3),
epochs = 150,
initializer = initializer,
optimizer_settings = optimizer_settings,
symbol = network,
verbose = True,
)
from data_utilities import load_cifar10_record
data = load_cifar10_record(args.batch_size)
info = solver.train(data)
postfix = '-' + args.postfix if args.postfix else ''
identifier = 'residual-network-on-cifar-10-%d%s' % (args.n_layers, postfix)
import cPickle as pickle
pickle.dump(info, open('info/%s' % identifier, 'wb'))
parameters = solver.export_parameters()
pickle.dump(parameters, open('parameters/%s' % identifier, 'wb'))
| [
"yg1246@nyu.edu"
] | yg1246@nyu.edu |
796bdbddda266ec0bbdc64183fe18db01383609b | ceb620c4be8b34f4aa08156226187db081fc3b55 | /loca_13/ext_personalizacion_lanta/model/models.py | b02f0c3f8c6077ee5b65dd0c562cef2f423c005a | [] | no_license | hjrhjr/entrenamiento_13_odoo_ref | f73e292b91d085473283f63a88ccd2363a03d9bf | 9a492c006d9c0aab68d0b095281dafda97ebdfda | refs/heads/main | 2023-08-25T06:46:39.075724 | 2021-10-19T14:51:27 | 2021-10-19T14:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,563 | py | # -*- coding: utf-8 -*-
import logging
import json
from odoo.tools import float_is_zero, float_compare, safe_eval, date_utils, email_split, email_escape_char, email_re
from odoo import fields, models, api, exceptions, _
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger('__name__')
class AccountMove(models.Model):
"""This model add fields need in the invoice for accounting in Venezuela."""
_inherit = 'account.move'
invoice_payments_widget = fields.Text(groups="account.group_account_invoice",
compute='_compute_payments_widget_reconciled_info')
@api.depends('type', 'line_ids.amount_residual')
def _compute_payments_widget_reconciled_info(self):
for move in self:
if move.state != 'posted' or not move.is_invoice(include_receipts=True):
move.invoice_payments_widget = json.dumps(False)
continue
reconciled_vals = move._get_reconciled_info_JSON_values()
if reconciled_vals:
info = {
'title': _('Aplicado'),
'outstanding': False,
'content': reconciled_vals,
}
move.invoice_payments_widget = json.dumps(info, default=date_utils.json_default)
else:
move.invoice_payments_widget = json.dumps(False)
#raise UserError(_(' valor=%s')%move.invoice_payments_widget)
def funcion_numeracion_fac(self):
if self.type=="in_invoice":
busca_correlativos = self.env['account.move'].search([('invoice_number','=',self.invoice_number_pro),('id','!=',self.id),('partner_id','=',self.partner_id.id)])
for det_corr in busca_correlativos:
if det_corr.invoice_number:
raise UserError(_(' El valor :%s ya se uso en otro documento de este proveedor')%det_corr.invoice_number)
"""busca_correlativos2 = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number_pro),('id','!=',self.id)])
for det_corr2 in busca_correlativos2:
if det_corr2.invoice_ctrl_number:
raise UserError(_(' El nro de control :%s ya se uso en otro documento')%det_corr2.invoice_ctrl_number)"""
self.invoice_number=self.invoice_number_pro
self.invoice_ctrl_number=self.invoice_ctrl_number_pro
partners='pro' # aqui si es un proveedor
if self.type=="in_refund" or self.type=="in_receipt":
busca_correlativos = self.env['account.move'].search([('invoice_number','=',self.refuld_number_pro),('id','!=',self.id),('partner_id','=',self.partner_id.id)])
for det_corr in busca_correlativos:
if det_corr.invoice_number:
raise UserError(_(' El valor :%s ya se uso en otro documento de este proveedor')%det_corr.invoice_number)
busca_correlativos2 = self.env['account.move'].search([('invoice_ctrl_number','=',self.refund_ctrl_number_pro),('id','!=',self.id)])
for det_corr2 in busca_correlativos2:
if det_corr2.invoice_ctrl_number:
raise UserError(_(' El nro de control :%s ya se uso en otro documento')%det_corr2.invoice_ctrl_number)
self.invoice_number=self.refuld_number_pro
self.invoice_ctrl_number=self.refund_ctrl_number_pro
partners='cli' # aqui si es un cliente
if self.type=="out_invoice":
if self.nr_manual==False:
busca_correlativos = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number),('id','!=',self.id)])
#if self.invoice_number_cli:
if busca_correlativos or not self.invoice_ctrl_number:
self.invoice_number_cli=self.get_invoice_number_cli()
self.invoice_number=self.invoice_number_cli #self.get_invoice_number_cli()
self.invoice_ctrl_number_cli=self.get_invoice_ctrl_number_unico()
self.invoice_ctrl_number=self.invoice_ctrl_number_cli #self.get_invoice_ctrl_number_cli()
else:
self.invoice_number=self.invoice_number_cli
self.invoice_ctrl_number=self.invoice_ctrl_number_cli
else:
self.invoice_number=self.invoice_number_cli
self.invoice_ctrl_number=self.invoice_ctrl_number_cli
if self.type=="out_refund":
if self.nr_manual==False:
busca_correlativos = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number),('id','!=',self.id)])
if busca_correlativos or not self.invoice_ctrl_number:
self.refuld_number_cli=self.get_refuld_number_cli()
self.invoice_number=self.refuld_number_cli #self.get_refuld_number_cli()
self.refund_ctrl_number_cli=self.get_invoice_ctrl_number_unico()
self.invoice_ctrl_number=self.refund_ctrl_number_cli #self.get_refuld_ctrl_number_cli()
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
if self.type=="out_receipt":
if self.nr_manual==False:
busca_correlativos = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number),('id','!=',self.id)])
if busca_correlativos or not self.invoice_ctrl_number:
self.refuld_number_cli=self.get_refuld_number_pro()
self.invoice_number=self.refuld_number_cli #self.get_refuld_number_cli()
self.refund_ctrl_number_cli=self.get_invoice_ctrl_number_unico()
self.invoice_ctrl_number=self.refund_ctrl_number_cli #self.get_refuld_ctrl_number_cli()
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
def get_invoice_ctrl_number_unico(self):
'''metodo que crea el Nombre del asiento contable si la secuencia no esta creada, crea una con el
nombre: 'l10n_ve_cuenta_retencion_iva'''
self.ensure_one()
SEQUENCE_CODE = 'l10n_ve_nro_control_unico_formato_libre'+str(self.company_id.id) #loca 14
company_id = self.company_id.id #loca 14
IrSequence = self.env['ir.sequence'].with_context(force_company=company_id) #loca 14
name = IrSequence.next_by_code(SEQUENCE_CODE)
# si aún no existe una secuencia para esta empresa, cree una
if not name:
IrSequence.sudo().create({
'prefix': '00-',
'name': 'Localización Venezolana nro control Unico Factura Forma Libre %s' % 1,
'code': SEQUENCE_CODE,
'implementation': 'no_gap',
'padding': 4,
'number_increment': 1,
'company_id': company_id, #loca 14
})
name = IrSequence.next_by_code(SEQUENCE_CODE)
#self.invoice_number_cli=name
return name | [
"inmldrsolucionestecnologicas@gmail.com"
] | inmldrsolucionestecnologicas@gmail.com |
c696330ec9c2433c9e5367b330c460090a719861 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-vas/huaweicloudsdkvas/v2/model/start_task_request.py | fc412068b562f5923f2f59232c150d5fd5cb53c2 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,825 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class StartTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'service_name': 'str',
'task_id': 'str'
}
attribute_map = {
'service_name': 'service_name',
'task_id': 'task_id'
}
def __init__(self, service_name=None, task_id=None):
"""StartTaskRequest
The model defined in huaweicloud sdk
:param service_name: 服务名称
:type service_name: str
:param task_id: 指定的服务作业ID
:type task_id: str
"""
self._service_name = None
self._task_id = None
self.discriminator = None
self.service_name = service_name
self.task_id = task_id
@property
def service_name(self):
"""Gets the service_name of this StartTaskRequest.
服务名称
:return: The service_name of this StartTaskRequest.
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this StartTaskRequest.
服务名称
:param service_name: The service_name of this StartTaskRequest.
:type service_name: str
"""
self._service_name = service_name
@property
def task_id(self):
"""Gets the task_id of this StartTaskRequest.
指定的服务作业ID
:return: The task_id of this StartTaskRequest.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this StartTaskRequest.
指定的服务作业ID
:param task_id: The task_id of this StartTaskRequest.
:type task_id: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StartTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
6cc38a6ccf3295ef06d5d0013c3c4c686aec7aa5 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/001_asynchronous/_exercises/templates/Async Techniques and Examples in Python/09-built-on-asyncio/the_unsync/nosync.py | c927d59cbe969ccdd7bff7d4f230fce878fb7974 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,112 | py | # ______ da..
# ______ ma..
# ______ ti..
# ______ re..
#
#
# ___ main
# t0 _ d_t_.d_t_.n..
#
# c..
# c..
# c..
# d..
# d..
# d_s_m..
# d_s_m..
# w..
# w..
# w..
# w..
#
# dt _ d_t_.d_t_.n.. - t0
# print("Synchronous version done in |:,.2_ seconds.".f.. ?.t_s..
#
#
# ___ compute_some
# print("Computing...")
# ___ _ __ ra.. 1 10_000_000
# ma__.sq..(25 ** 25 + .01)
#
#
# ___ download_some
# print("Downloading...")
# url _ 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2'
# resp _ re__.g.. ?
# ?.r_f_s..
#
# text _ ?.t..
#
# print("Downloaded (more) |;, characters.".f.. le. ?
#
#
# ___ download_some_more
# print("Downloading more ...")
# url _ 'https://pythonbytes.fm/episodes/show/92/will-your-python-be-compiled'
# resp _ re__.g.. ?
# ?.r_f_s..
#
# text _ ?.t..
#
# print Downloaded |:, characters. .f.. le.. ?
#
#
# ___ wait_some
# print("Waiting...")
# ___ _ __ ra.. 1 1000
# t__.s.. .001
#
#
# __ _________ __ ________
# ?
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
9478c584e8720ca8e131eae84299c0383e0e907d | ad08ee023345fcc42fdac6fab527809b2d502fa5 | /peek_plugin_diagram/_private/tuples/location_index/LocationIndexUpdateDateTuple.py | 3ed190106e964d14ae31c9201f88e18f70586ab8 | [] | no_license | Synerty/peek-plugin-diagram | fcaefc414334e4584976d0b0567712bb47a3082a | 8efffa2bb3877b7fcd3736b80df53adc784ca69c | refs/heads/master | 2021-07-03T17:42:17.129328 | 2020-10-27T04:55:52 | 2020-10-27T04:55:52 | 191,874,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | from typing import Dict
from peek_abstract_chunked_index.private.tuples.ACIUpdateDateTupleABC import \
ACIUpdateDateTupleABC
from peek_plugin_diagram._private.PluginNames import diagramTuplePrefix
from vortex.Tuple import addTupleType, TupleField, Tuple
#: This the type of the data that we get when the clients observe new locationIndexs.
DeviceLocationIndexT = Dict[str, str]
@addTupleType
class LocationIndexUpdateDateTuple(Tuple, ACIUpdateDateTupleABC):
__tupleType__ = diagramTuplePrefix + "LocationIndexUpdateDateTuple"
# Improve performance of the JSON serialisation
__rawJonableFields__ = ('initialLoadComplete', 'updateDateByChunkKey')
initialLoadComplete: bool = TupleField()
updateDateByChunkKey: DeviceLocationIndexT = TupleField({})
@property
def ckiUpdateDateByChunkKey(self):
return self.updateDateByChunkKey
| [
"jarrod.chesney@synerty.com"
] | jarrod.chesney@synerty.com |
3109118228c8de62a74220aa01d858cbd0adb4b1 | 4b660991e5c9c93c83dccccdd3ea91531201e8a3 | /DSA/string/practise/find_left_most_char_that_repeat.py | 07b8bae3a720ad130348c7581e41db275e14f38b | [
"MIT"
] | permissive | RohanMiraje/DSAwithPython | 2a1515fa5f9e5cc76b08a3e6f0ce34e451fb6f4b | ea4884afcac9d6cc2817a93e918c829dd10cef5d | refs/heads/master | 2022-09-24T08:57:04.695470 | 2021-10-21T01:06:06 | 2021-10-21T01:06:06 | 238,381,770 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | import sys
"""
Find index of leftmost char that repeats
Naive approach-TC:O(n^2) SC:O(1)
start string travers from left:
check if curr left index value is repeating
at its right remaining string by traversing:
if found repeated then result curr left index
return -1 if no repeated found
better approach:TC:O(n) SC:O(256)
use count_array of len 256 with default values to -1
init res = MAX value # to store final result index
traverse string from start :
check if value in count_arr at ascii value of curr char as index is == -1:
store curr index of curr char at this position
else:
res = min(res, count_arr[ord(char)])
return -1 if res is MAX else res
faster approach than above approach is to traverse string from its end:
so, that res will be updated directly with repeated char index every time
"""
def find_left_most_char_that_repeat(string):
count_array = [-1] * 256
res = sys.maxsize
for index, char in enumerate(string):
if count_array[ord(char)] == -1:
count_array[ord(char)] = index
else:
res = min(res, count_array[ord(char)])
return -1 if res == sys.maxsize else res
def find_left_most_char_that_repeat_method2(string):
count_array = [-1] * 256
res = sys.maxsize
for index in range(len(string) - 1, -1, -1):
char = string[index]
if count_array[ord(char)] == -1:
count_array[ord(char)] = index
else:
count_array[ord(char)] = index
res = count_array[ord(char)]
return -1 if res == sys.maxsize else res
def find_left_most_char_that_not_repeat_method(string):
count_array = [-1] * 256
res = sys.maxsize
for index, char in enumerate(string):
if count_array[ord(char)] == -1:
count_array[ord(char)] = 1
else:
count_array[ord(char)] += 1
for index, char in enumerate(string):
if count_array[ord(char)] == 1:
return index
def find_left_most_char_that_not_repeat_method2(string):
count_array = [-1] * 256
res = sys.maxsize
for index, char in enumerate(string):
if count_array[ord(char)] == -1:
count_array[ord(char)] = index # this is IMP
else:
count_array[ord(char)] = -2 # this is marked repeating
for val in count_array: # const loop
if val >= 0:
res = min(res, val) # val is index of leftmost non repeated
return res
if __name__ == '__main__':
print(find_left_most_char_that_not_repeat_method2('geeksforgeeks'))
| [
"rohanmiraje19@gmail.com"
] | rohanmiraje19@gmail.com |
8791d022fce2f7641b74d13eb5342d85e53225e5 | 12db1c76e5411708fefd207c6a819e8e95551fe7 | /User/migrations/0013_auto_20201102_0842.py | d63358a6842a9e951c6e82abd5b55e74766b429b | [] | no_license | Jyonn/Saying | 5bbfffa06895211f44ae6c424223a400cd9d9223 | a32848560bc524011c1da47f509d94343f63ee59 | refs/heads/master | 2023-01-06T15:59:33.394067 | 2020-11-04T12:15:51 | 2020-11-04T12:15:51 | 123,360,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # Generated by Django 3.0.6 on 2020-11-02 08:42
import SmartDjango.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('User', '0012_auto_20180301_0040'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'default_manager_name': 'objects'},
),
migrations.AddField(
model_name='user',
name='inviter',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='User.User'),
),
migrations.AlterField(
model_name='user',
name='password',
field=SmartDjango.models.fields.CharField(max_length=32, verbose_name='密码'),
),
migrations.AlterField(
model_name='user',
name='pwd_change_time',
field=SmartDjango.models.fields.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='user',
name='salt',
field=SmartDjango.models.fields.CharField(default=None, max_length=10),
),
migrations.AlterField(
model_name='user',
name='username',
field=SmartDjango.models.fields.CharField(blank=True, default=None, max_length=32, null=True, unique=True, verbose_name='用户名'),
),
]
| [
"lqj679ssn@qq.com"
] | lqj679ssn@qq.com |
962e2ee557ca80df05cdb6c53d37554fb019dc11 | 93e058780c3fd4d7f40dbcac263fb58f63b51b6f | /integration_test/test_logging.py | 0d6fdb368d86e31734f05f185e19f07bea7a1d96 | [
"Apache-2.0"
] | permissive | multiscale/muscle3 | 2b6ffc34240b92bb2ade3e28e4dde1b6d3f8e3e7 | be8b21cfe97218d2a941b63d5762387716a9b3f8 | refs/heads/develop | 2023-07-12T06:12:03.510684 | 2023-07-06T20:11:41 | 2023-07-06T20:11:41 | 122,876,985 | 24 | 15 | Apache-2.0 | 2023-09-01T19:47:16 | 2018-02-25T21:07:17 | Fortran | UTF-8 | Python | false | false | 1,717 | py | import ymmsl
from ymmsl import Reference
from libmuscle.logging import LogLevel, LogMessage, Timestamp
from libmuscle.manager.manager import Manager
from libmuscle.mmp_client import MMPClient
def test_logging(log_file_in_tmpdir, caplog):
ymmsl_text = (
'ymmsl_version: v0.1\n'
'model:\n'
' name: test_model\n'
' components:\n'
' macro: macro_implementation\n'
' micro:\n'
' implementation: micro_implementation\n'
' multiplicity: [10]\n'
' conduits:\n'
' macro.out: micro.in\n'
' micro.out: macro.in\n'
'settings:\n'
' test1: 13\n'
' test2: 13.3\n'
' test3: testing\n'
' test4: True\n'
' test5: [2.3, 5.6]\n'
' test6:\n'
' - [1.0, 2.0]\n'
' - [3.0, 1.0]\n'
)
# create server
ymmsl_doc = ymmsl.load(ymmsl_text)
manager = Manager(ymmsl_doc)
# create client
instance_id = Reference('test_logging')
client = MMPClient(instance_id, manager.get_server_location())
message = LogMessage(
instance_id=str(instance_id),
timestamp=Timestamp(2.0),
level=LogLevel.DEBUG,
text='Integration testing')
# log and check
client.submit_log_message(message)
for rec in caplog.records:
if rec.name == 'instances.test_logging':
assert rec.time_stamp == '1970-01-01T00:00:02Z'
assert rec.levelname == 'DEBUG'
assert rec.message == 'Integration testing'
break
client.close()
manager.stop()
| [
"l.veen@esciencecenter.nl"
] | l.veen@esciencecenter.nl |
116dcc95d5b9a8bebd5e1bde8ab774eb1b84fd88 | 03c00aa07607c1f206c0fb3cf00fc5c510d7a4bf | /Infoplus/models/order_load_program.py | 000ac0c33ad65779c78e6b4df5d3c844be7667a4 | [] | no_license | infopluscommerce/infoplus-python-client | 748cc9af739615036c52adce70aa7f4303601b97 | bde657057fedb5396ecf6c42e8ba668456bd1c43 | refs/heads/master | 2023-08-23T11:32:01.160320 | 2017-03-17T14:43:15 | 2017-03-17T14:43:15 | 58,404,638 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class OrderLoadProgram(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
OrderLoadProgram - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'label': 'str'
}
self.attribute_map = {
'id': 'id',
'label': 'label'
}
self._id = None
self._label = None
@property
def id(self):
"""
Gets the id of this OrderLoadProgram.
:return: The id of this OrderLoadProgram.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this OrderLoadProgram.
:param id: The id of this OrderLoadProgram.
:type: int
"""
self._id = id
@property
def label(self):
"""
Gets the label of this OrderLoadProgram.
:return: The label of this OrderLoadProgram.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this OrderLoadProgram.
:param label: The label of this OrderLoadProgram.
:type: str
"""
self._label = label
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"kdonnelly@infopluscommerce.com"
] | kdonnelly@infopluscommerce.com |
4f08dc744434feff1d32cee783de86333bb13fb9 | 76787cd4117d71377bd27d251b6d30b41922ff67 | /tests/integration/response/schema/test_countries.py | 34f80377ee2c79759a4d9a4a17217fb723874f4e | [
"MIT"
] | permissive | jaebradley/draftkings_client | 50ba0f25e38b78f75d92a57660bfb110e3a27e69 | 2184e2e3cf66bfe9e4cc6f6d577c80602ab7121a | refs/heads/v3 | 2022-12-09T14:35:50.263181 | 2022-01-19T06:36:24 | 2022-01-19T06:36:24 | 73,451,976 | 138 | 47 | MIT | 2022-12-08T01:23:13 | 2016-11-11T06:29:44 | Python | UTF-8 | Python | false | false | 2,712 | py | import os
from unittest import TestCase
from draft_kings.response.objects.countries import Country, Countries
from draft_kings.response.schema.countries import CountriesSchema
from tests.config import ROOT_DIRECTORY
class TestCountries(TestCase):
def setUp(self) -> None:
with open(os.path.join(ROOT_DIRECTORY, 'tests/files/countries.json'), encoding="utf-8") as data_file:
self.schema = CountriesSchema()
self.data = self.schema.loads(data_file.read())
def test_deserialization(self) -> None:
self.assertIsNotNone(self.data)
def test_countries_data(self) -> None:
self.assertEqual(
Countries(
countries=[
Country(
country_id=1,
country_code="US",
name="United States",
is_licensed=True
),
Country(
country_id=14,
country_code="AU",
name="Australia",
is_licensed=True
),
Country(
country_id=15,
country_code="AT",
name="Austria",
is_licensed=True
),
Country(
country_id=2,
country_code="CA",
name="Canada",
is_licensed=True
),
Country(
country_id=4,
country_code="DE",
name="Germany",
is_licensed=True
),
Country(
country_id=89,
country_code="IE",
name="Ireland",
is_licensed=True
),
Country(
country_id=117,
country_code="MT",
name="Malta",
is_licensed=True
),
Country(
country_id=132,
country_code="NL",
name="Netherlands",
is_licensed=True
),
Country(
country_id=3,
country_code="GB",
name="United Kingdom",
is_licensed=True
)
]
),
self.data
)
| [
"noreply@github.com"
] | jaebradley.noreply@github.com |
92f0d23f55c9015d1f365d9e204979a391d1b0e4 | 5a8214b3a452c574e6c883bf5d90ba58ba87c461 | /leetcode/48.rotate-image.py | db4b09988b60fd7639cb9e4e1bf241fac3a2f0ad | [] | no_license | phlalx/algorithms | 69a3c8519687816e3c6333ec12b40659d3e3167f | f4da5a5dbda640b9bcbe14cb60a72c422b5d6240 | refs/heads/master | 2023-02-03T10:30:30.181735 | 2020-12-26T09:47:38 | 2020-12-26T09:47:38 | 129,254,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # TAGS implem
# make a drawing
#
# @lc app=leetcode id=48 lang=python3
#
# [48] Rotate Image
#
# https://leetcode.com/problems/rotate-image/description/
#
# algorithms
# Medium (50.40%)
# Likes: 2053
# Dislikes: 179
# Total Accepted: 307.7K
# Total Submissions: 596.5K
# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]'
#
# You are given an n x n 2D matrix representing an image.
#
# Rotate the image by 90 degrees (clockwise).
#
# Note:
#
# You have to rotate the image in-place, which means you have to modify the
# input 2D matrix directly. DO NOT allocate another 2D matrix and do the
# rotation.
#
# Example 1:
#
#
# Given input matrix =
# [
# [1,2,3],
# [4,5,6],
# [7,8,9]
# ],
#
# rotate the input matrix in-place such that it becomes:
# [
# [7,4,1],
# [8,5,2],
# [9,6,3]
# ]
#
#
# Example 2:
#
#
# Given input matrix =
# [
# [ 5, 1, 9,11],
# [ 2, 4, 8,10],
# [13, 3, 6, 7],
# [15,14,12,16]
# ],
#
# rotate the input matrix in-place such that it becomes:
# [
# [15,13, 2, 5],
# [14, 3, 4, 1],
# [12, 6, 8, 9],
# [16, 7,10,11]
# ]
#
#
#
# @lc code=start
class Solution:
def rotate(self, t: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not t or not t[0]:
return t
n = len(t[0])
for a in range(n // 2):
b = n - a - 1
for i in range(b - a):
t[a][a+i], t[a+i][b], t[b][b-i], t[b-i][a] = t[b-i][a], t[a][a+i], t[a+i][b], t[b][b-i]
# @lc code=end
| [
"phlalx@users.noreply.github.com"
] | phlalx@users.noreply.github.com |
e2611d1cd34427015fc98b5c6c870673d634990c | b2cfcacbd898f758a56d095f2140681934205d89 | /GeekShop_mentor/src_lesson_7/step_1(own_admin_start)/geekshop/adminapp/views.py | e33e89d0dc71607f5a2ff803da6531ffe1ed0eda | [] | no_license | AndreySperansky/Django_1 | 7d3be3ea2ede8e46d932fdae146ce4a7c4e300b4 | 0fec0a9a02b887fd8b45a5b763b7da5dc6377208 | refs/heads/master | 2022-12-15T19:56:23.611288 | 2020-09-21T17:40:40 | 2020-09-21T17:40:40 | 284,131,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | from django.shortcuts import render, get_object_or_404
from authapp.models import ShopUser
from mainapp.models import ProductCategory, Product
def users(request):
title = 'админка/пользователи'
users_list = ShopUser.objects.all().order_by('-is_active', '-is_superuser', '-is_staff', 'username')
content = {
'title': title,
'objects': users_list
}
return render(request, 'adminapp/users.html', content)
def user_create(request):
pass
def user_update(request, pk):
pass
def user_delete(request, pk):
pass
def categories(request):
title = 'админка/категории'
categories_list = ProductCategory.objects.all()
content = {
'title': title,
'objects': categories_list
}
return render(request, 'adminapp/categories.html', content)
def category_create(request):
pass
def category_update(request, pk):
pass
def category_delete(request, pk):
pass
def products(request, pk):
title = 'админка/продукт'
category = get_object_or_404(ProductCategory, pk=pk)
products_list = Product.objects.filter(category__pk=pk).order_by('name')
content = {
'title': title,
'category': category,
'objects': products_list,
}
return render(request, 'adminapp/products.html', content)
def product_create(request, pk):
pass
def product_read(request, pk):
pass
def product_update(request, pk):
pass
def product_delete(request, pk):
pass | [
"andrey.speransky@gmail.com"
] | andrey.speransky@gmail.com |
91af10910e07ae524e190b9ebf9a91f72007e792 | 32150af04590afe11f5c1229faf840e2e8c2a9ab | /Assignments/AS03/Week 5 HMEQ CART.py | b21ea8103f32726716f239ee33861966908b788a | [] | no_license | nsbgit/IIT-S21-CS-484 | f595f67dd72e0c0f65f9cbaafe581ab41ea4cab3 | 1850f0e571d0c72d875baedf87aae3d6943af382 | refs/heads/main | 2023-05-06T23:26:30.687201 | 2021-05-31T23:40:22 | 2021-05-31T23:40:22 | 332,584,584 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | # Load the necessary libraries
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy
import pandas
# Define a function to visualize the percent of a particular target category by a nominal predictor
def TargetPercentByNominal (
targetVar, # target variable
targetCat, # target category
predictor, # nominal predictor
val4na): # imputed value for NaN
crossTable = pandas.crosstab(index = predictor.fillna(val4na), columns = targetVar, margins = True, dropna = True)
crossTable['Percent'] = 100 * (crossTable[targetCat] / crossTable['All'])
print(crossTable)
plotTable = crossTable[crossTable.index != 'All']
plt.bar(plotTable.index, plotTable['Percent'])
plt.xlabel(predictor.name)
plt.ylabel('Percent of ' + targetVar.name + ' = ' + str(targetCat))
plt.gca().xaxis.set_major_locator(mticker.MultipleLocator(1))
plt.grid(True, axis='y')
plt.show()
return(crossTable)
# Define a function to visualize the percent of a particular target category by an interval predictor
def TargetPercentByInterval (
targetVar, # target variable
targetCat, # target category
predictor, # nominal predictor
val4na): # imputed value for NaN
crossTable = pandas.crosstab(index = predictor.fillna(val4na), columns = targetVar, margins = True, dropna = True)
crossTable['Percent'] = 100 * (crossTable[targetCat] / crossTable['All'])
print(crossTable)
plotTable = crossTable[crossTable.index != 'All']
plt.scatter(plotTable.index, plotTable['Percent'])
plt.xlabel(predictor.name)
plt.ylabel('Percent of ' + targetVar.name + ' = ' + str(targetCat))
plt.grid(True, axis='both')
plt.show()
return(crossTable)
hmeq = pandas.read_csv('hmeq.csv',
delimiter=',')
nTotal = len(hmeq)
# Generate the frequency table and the bar chart for the BAD target variable
crossTable = pandas.crosstab(index = hmeq['BAD'], columns = ["Count"], margins = True, dropna = False)
crossTable['Percent'] = 100 * (crossTable['Count'] / nTotal)
crossTable = crossTable.drop(columns = ['All'])
print(crossTable)
plotTable = crossTable[crossTable.index != 'All']
plt.bar(plotTable.index, plotTable['Percent'])
plt.xticks([[0], [1]])
plt.xlabel('BAD')
plt.ylabel('Percent')
plt.grid(True, axis='y')
plt.show()
# Cross-tabulate BAD by DELINQ
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['DELINQ'], val4na = -1)
# Cross-tabulate BAD by DEROG
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['DEROG'], val4na = -1)
# Cross-tabulate BAD by JOB
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['JOB'], val4na = 'Unknown')
# Cross-tabulate BAD by NINQ
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['NINQ'], val4na = -1)
# Cross-tabulate BAD by REASON
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['REASON'], val4na = 'Unknown')
# Cross-tabulate BAD by DEBTINC
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['DEBTINC'], val4na = -10)
# Cross-tabulate BAD by CLAGE
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['CLAGE'], val4na = -10)
# Cross-tabulate BAD by CLNO
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['CLNO'], val4na = -10)
# Cross-tabulate BAD by LOAN
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['LOAN'], val4na = -10)
# Cross-tabulate BAD by MORTDUE
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['MORTDUE'], val4na = -10)
# Cross-tabulate BAD by VALUE
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['VALUE'], val4na = -10)
# Cross-tabulate BAD by YOJ
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['YOJ'], val4na = -10)
# Specify the target and the predictor variables
X_name = ['DEBTINC', 'DELINQ']
Y_name = 'BAD'
trainData = hmeq[['DEBTINC', 'DELINQ', 'BAD']].dropna()
X_inputs = trainData[X_name]
Y_target = trainData[Y_name]
# How many missing values are there?
print('Number of Missing Observations:')
print(X_inputs.isnull().sum())
print(Y_target.isnull().sum())
# Load the TREE library from SKLEARN
from sklearn import tree
classTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=2, random_state=60616)
hmeq_DT = classTree.fit(X_inputs, Y_target)
print('Accuracy of Decision Tree classifier on training set: {:.6f}' .format(classTree.score(X_inputs, Y_target)))
import graphviz
dot_data = tree.export_graphviz(hmeq_DT,
out_file=None,
impurity = True, filled = True,
feature_names = X_name,
class_names = ['0', '1'])
graph = graphviz.Source(dot_data)
print(graph)
| [
"nsbgit@users.noreply.github.com"
] | nsbgit@users.noreply.github.com |
3676f283e395d049f59998b475e78308256c0321 | db0fe327ae984b7a461c02be74c439fe7d41c220 | /Question_repo/libs/repo_data.py | 47c31cbbfeb20ee14e0a507f5b9489b3cc15dd93 | [] | no_license | xingzhe1998/T_QUE | 4da80366bc721cc0cd222f7f7fde8331c6df85ee | 4c0d39402659b7c8fc448165c784ab125c700b41 | refs/heads/master | 2020-06-24T16:29:22.652765 | 2019-07-26T12:55:45 | 2019-07-26T12:55:45 | 199,016,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | from apps.repo.models import Answers, User, Questions
from django.db.models import Count
def check_rank(data):
return data["id__count"]
def user_answer_data(user):
# 答题数量及总量
# count=> 计数
answer_num = Answers.objects.filter(user=user).count()
question_all = Questions.objects.all().__len__()
# 用户总量
user_sum = User.objects.all().__len__()
# 答题情况
# 每个用户答题数量:按用户统计答题数量
rank = Answers.objects.values('user').annotate(Count('id'))
# <QuerySet [{'user': 1, 'id__count': 1}, {'user': 2, 'id__count': 1}, {'user': 3, 'id__count': 2}]>
# print(rank) 按答题量排序
rank = sorted(rank, key=check_rank, reverse=True)
# 统计每个人的排名(为提升效率,可写入memcache)
rank_dict = {}
cur_rank = 0
cur_count = 0
for index, item in enumerate(rank, start=1):
if cur_count != item["id__count"]:
cur_rank = index
cur_count = item["id__count"]
rank_dict[item["user"]] = dict(item, **{"rank":cur_rank})
# print(rank_dict)
kwgs = {
"answer_num": answer_num,
"question_all": question_all,
"user_sum": user_sum,
"rank": rank_dict[user.id] if answer_num else {"rank":0, },
}
return kwgs
| [
"apple@appledeMacBook-Air.local"
] | apple@appledeMacBook-Air.local |
8560eab3f2fc20fb2784b79adb1f3d5c9ff9d7c8 | d1742451b25705fc128acc245524659628ab3e7d | /Data Structure & Algorithm/Segment Tree/Segment Tree Query.py | 2c6201ceabaf2ce509db9d2239a70e8e6527bab1 | [] | no_license | Shovon588/Programming | ebab793a3c97aedddfcad5ea06e7e22f5c54a86e | e4922c9138998358eed09a1be7598f9b060c685f | refs/heads/master | 2022-12-23T18:29:10.141117 | 2020-10-04T17:29:32 | 2020-10-04T17:29:32 | 256,915,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | def func(node,low,high):
if low==high:
tree[node]=a[low-1]
return
left=2*node
right=(2*node)+1
mid=(low+high)//2
func(left,low,mid)
func(right,mid+1,high)
tree[node]=tree[left]+tree[right]
def query(node,b,e,i,j):
if b>=i and e<=j:
return tree[node]
if i>e or j<b:
return 0
left=2*node
right=(2*node)+1
mid=(b+e)//2
p1=query(left,b,mid,i,j)
p2=query(right,mid+1,e,i,j)
return p1+p2
n=int(input())
a=list(map(int,input().split()))
tree=[0]*(3*n)
func(1,1,n)
for i in range(int(input())):
l,m=map(int,input().split())
b=query(1,1,n,l,m)
print(b)
| [
"mainulislam588@gmail.com"
] | mainulislam588@gmail.com |
af2538c02b261ca582200a1d3a1e2fe9f4d58da2 | 8b54951abdc4a8c119b057c5231adf65fdd5a915 | /lock_signal.py | fef29effd8931853c5369ee4e80d6ecf89fc05d7 | [] | no_license | larago/gevent | 0a645fbc97ec1f7f85c6a3a961b82739d1e64c7a | b41360ca8ebf6fbad8463d5128bb7b4bf837a7b9 | refs/heads/master | 2021-01-11T14:58:55.860636 | 2017-01-28T09:34:38 | 2017-01-28T09:34:38 | 80,270,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # encoding=utf8
from gevent import sleep
from gevent.pool import Pool
from gevent.lock import BoundedSemaphore
sem = BoundedSemaphore(2)
def worker1(n):
sem.acquire()
print 'Worker %s acquired semaphore' % n
sleep(0)
sem.release()
print 'Worker %s releasedd semaphore' % n
def worker2(n):
with sem:
print 'Worker %i acquired semaphore' % n
sleep(0)
print 'Worker %i released semaphore' % n
pool = Pool()
pool.map(worker1, xrange(0, 2))
pool.map(worker2, xrange(3, 6)) | [
"bingeye@foxmail.com"
] | bingeye@foxmail.com |
20ec50dbb59c877aabc53633286aa894e4a4907c | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-codecraft/huaweicloudsdkcodecraft/v5/model/register_competition_info_response.py | c351c82d2c877b2af1bf58f524e51d1749af18c2 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,282 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RegisterCompetitionInfoResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'is_permitted': 'bool',
'team_id': 'str'
}
attribute_map = {
'is_permitted': 'is_permitted',
'team_id': 'team_id'
}
def __init__(self, is_permitted=None, team_id=None):
"""RegisterCompetitionInfoResponse
The model defined in huaweicloud sdk
:param is_permitted: 是否允许提交作品,true-允许,false-不允许
:type is_permitted: bool
:param team_id: 团队ID
:type team_id: str
"""
super(RegisterCompetitionInfoResponse, self).__init__()
self._is_permitted = None
self._team_id = None
self.discriminator = None
if is_permitted is not None:
self.is_permitted = is_permitted
if team_id is not None:
self.team_id = team_id
@property
def is_permitted(self):
"""Gets the is_permitted of this RegisterCompetitionInfoResponse.
是否允许提交作品,true-允许,false-不允许
:return: The is_permitted of this RegisterCompetitionInfoResponse.
:rtype: bool
"""
return self._is_permitted
@is_permitted.setter
def is_permitted(self, is_permitted):
"""Sets the is_permitted of this RegisterCompetitionInfoResponse.
是否允许提交作品,true-允许,false-不允许
:param is_permitted: The is_permitted of this RegisterCompetitionInfoResponse.
:type is_permitted: bool
"""
self._is_permitted = is_permitted
@property
def team_id(self):
"""Gets the team_id of this RegisterCompetitionInfoResponse.
团队ID
:return: The team_id of this RegisterCompetitionInfoResponse.
:rtype: str
"""
return self._team_id
@team_id.setter
def team_id(self, team_id):
"""Sets the team_id of this RegisterCompetitionInfoResponse.
团队ID
:param team_id: The team_id of this RegisterCompetitionInfoResponse.
:type team_id: str
"""
self._team_id = team_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RegisterCompetitionInfoResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3043c42d97729db02e9e2dfc1c55597e1cf93c21 | 80afa26ba73b53f38e3fc21bf395030762fe8981 | /200. Number of Islands.py | ae34d2da127bdf85debe74d5795951cefd3b567d | [] | no_license | iamshivamgoswami/Random-DSA-Questions | 45b402063dbd2e31da2eee7590b6991aa624637d | e36250d08cf0de59cd0a59b4f3293e55793b1a6f | refs/heads/main | 2023-07-15T15:48:36.363321 | 2021-08-26T03:40:47 | 2021-08-26T03:40:47 | 392,702,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
def dfs(i, j):
grid[i][j] = "0"
for x, y in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]:
if 0 <= x < len(grid) and 0 <= y < len(grid[0]) and grid[x][y] == "1":
dfs(x, y)
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
count += 1
dfs(i, j)
return count
| [
"shivamgoswami12@gmail.com"
] | shivamgoswami12@gmail.com |
5e8b05b53016ffca5bed18a5013345269a00b769 | 7549c5f2e2b8ecc6d8f7ccc8030e005ffdf15018 | /modules/python_base/09/9.1.3/finally.py | 6c930a0a22b024c92df588cea78ced2a5f4298f0 | [] | no_license | gao634209276/myPython | d16bbf53645531e03cd2da4f211e783d0b93f703 | 40b4e8dcd329c34a73808a51743131d554832ab6 | refs/heads/master | 2020-06-20T22:21:55.521914 | 2017-09-01T14:27:31 | 2017-09-01T14:27:31 | 74,817,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# finally错误的用法
# try:
# f = file("hello2.txt", "r")
# print "读文件"
# except IOError: # 捕获IOError异常
# print "文件不存在"
# finally: # 其他异常情况
# f.close()
# try...except...finally
try:
f = open("hello.txt", "r")
try:
print f.read(5)
except:
print "读取文件错误"
finally: # finally子句一般用于释放资源
print "释放资源"
f.close()
except IOError:
print "文件不存在"
| [
"634209276@qq.com"
] | 634209276@qq.com |
535ad5ea0cc874c6d7e51ccadc8482f2f6343596 | 2309fbe9f9b86685f533706e6877ac3cfae99632 | /tests/src/Regression_Testing/Test_Scripts/Click_on_TAR.py | 099c02daa09fc1944c2154963b58662a39772477 | [
"MIT"
] | permissive | komathi1607/cQube | 909a8834608ce19989347863be538022bfaacd84 | 6cc629a600075a1e5332f84f8ffa940a3eebfcd0 | refs/heads/master | 2022-11-15T07:46:19.314371 | 2020-06-05T00:55:17 | 2020-06-05T00:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | import time
import unittest
from selenium import webdriver
from Data.parameters import Data
from TS.reuse_func import cqube
from get_dir import pwd
class test_TAR(unittest.TestCase):
def setUp(self):
driver_path = pwd()
self.driver = webdriver.Chrome(executable_path=driver_path.get_driver_path())
driver = cqube(self.driver)
driver.open_cqube_appln()
driver = cqube(self.driver)
driver.login_cqube()
def test_TAR_Page(self):
self.driver.find_element_by_xpath(Data.Dashboard).click()
time.sleep(2)
self.driver.find_element_by_xpath(Data.TAR).click()
time.sleep(2)
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"laxmikanth.vattamvar@tibilsolutions.com"
] | laxmikanth.vattamvar@tibilsolutions.com |
81ed140b023ae1c2e0ee981bb65a3c175e059052 | 3356ecffb180dd617a8ded3bca89a067122e9d65 | /lab1/task_7.py | 0c781a6c6f9d656ecab036074ee523467f7bbfbe | [] | no_license | python-practice-b02-927/TodorovRV | bae93783b15e6e0397c7dfae018dfac58b719a03 | d6765f8b65ae88b2d0ca021340ff1848d4b3605a | refs/heads/master | 2020-07-22T20:18:45.296515 | 2019-11-19T11:46:03 | 2019-11-19T11:46:03 | 207,315,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | #!/usr/bin/python3
from pyrob.api import *
@task
def task_5_4():
while not wall_is_beneath():
move_down()
while wall_is_beneath():
move_right()
move_down()
move_left()
while wall_is_above() and (not wall_is_on_the_left()):
move_left()
if __name__ == '__main__':
run_tasks()
| [
"noreply@github.com"
] | python-practice-b02-927.noreply@github.com |
38613ed23f71373e77774283f60636d3ef9b8b70 | e87c04d6c2bbba383f9c75620b16f02358039ab5 | /200826프로/Re최소비용구하기_G5.py | d8fd01ea1428cd215e57f2eb2c491840f6e2f32f | [] | no_license | yoonwoo123/Algorithm | 2bf6e103009572cbcf3abfd783f6c28762529340 | 5d1e76f1bf6c2fc6acb25dc5296d62b2ca453ec6 | refs/heads/master | 2022-06-18T14:06:06.248906 | 2022-05-29T10:41:15 | 2022-05-29T10:41:15 | 221,483,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | # 0628백준 못풀었던 최소비용구하기 재도전
import sys, heapq
sys.stdin = open("최소비용_input.txt")
input = sys.stdin.readline
def dijkstra(graph, start, end):
distances = {node : float('inf') for node in graph}
distances[start] = 0
queue = []
heapq.heappush(queue, [distances[start], start])
while queue:
currentDistance, currentNode = heapq.heappop(queue)
if currentDistance > distances[currentNode]: continue
for adjacent, weight in graph[currentNode].items():
distance = currentDistance + weight
if distance < distances[adjacent]:
distances[adjacent] = distance
heapq.heappush(queue, [distance, adjacent])
return distances[end]
N = int(input())
M = int(input())
graph = {i : {} for i in range(1, N+1)}
for _ in range(M):
start, end, toll = map(int, input().split())
# 주의해야할 점!! 같은 버스(같은 키)값으로 더 큰 값이 들어오는건 무시해줘야
# 최소비용을 정확히 구할 수 있다.
if end in graph[start] and toll >= graph[start][end]: continue
graph[start][end] = toll
start, end = map(int, input().split())
print(dijkstra(graph, start, end))
| [
"lkkjasd@korea.ac.kr"
] | lkkjasd@korea.ac.kr |
39c80ed7609de255a1d4095a62be36b57429a380 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /XjgoXNmnz59txiQp3_20.py | 5a5eb8ffec5c428b019e2cbaa644d2f512144599 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py |
def split(number):
if (number < 5):
return number
rem = number % 3
div3 = number//3
if (rem == 2):
return 3 ** (div3) * 2
return 3 ** (div3-1) * (3 + rem)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
86e9fa6a8c3ed0a7e6035870f5d3efbab277cffc | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_35/202.py | 85cf40d5f4d6c4e8f2519c5092793cfc5a85d792 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | #!/usr/bin/python
MAX_ALT = 11000
def is_valid_index(i, j, alts):
return i >= 0 and j >= 0 and i < len(alts) and j < len(alts[i])
def find_flow_dir(i, j, alts):
dirs = [(alts[i][j], (i, j)),
(alts[i - 1][j] if is_valid_index(i - 1, j, alts) else MAX_ALT, (i - 1, j)),
(alts[i][j - 1] if is_valid_index(i, j - 1, alts) else MAX_ALT, (i, j - 1)),
(alts[i][j + 1] if is_valid_index(i, j + 1, alts) else MAX_ALT, (i, j + 1)),
(alts[i + 1][j] if is_valid_index(i + 1, j, alts) else MAX_ALT, (i + 1, j))]
return min(dirs, key=lambda x: x[0])[1]
def flow(i, j, n, ans, alts, eq):
while ans[i][j] == -1:
ans[i][j] = n
ni, nj = find_flow_dir(i, j, alts)
if (ni, nj) == (i, j): # found a sink
break
i, j = ni, nj
eq[n] = ans[i][j]
def print_basin(basin, f):
for row in basin:
f.write(' '.join(map(str, row)) + '\n')
def parent(num, eq):
if eq[num] != num:
eq[num] = parent(eq[num], eq)
return eq[num]
def to_alph(basin, eq):
nums_seen = {}
count = 0
for i in xrange(H):
for j in xrange(W):
p = parent(basin[i][j], eq)
if p not in nums_seen:
nums_seen[p] = count
count += 1
basin[i][j] = chr(ord('a') + nums_seen[p])
f = open('B-large.in', 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
N = int(lines[0])
f = open('B-large.out', 'w')
line_num = 1
for case in xrange(1, N + 1):
H, W = map(int, lines[line_num].split(' '))
alts = [0]*H
for i in xrange(H):
alts[i] = map(int, lines[line_num + i + 1].split(' '))
assert len(alts[i]) == W
ans = [0] * H
for i in xrange(H):
ans[i] = [-1] * W
equivalencies = {}
count = 0
for i in xrange(H):
for j in xrange(W):
if ans[i][j] == -1:
flow(i, j, count, ans, alts, equivalencies)
count += 1
to_alph(ans, equivalencies)
f.write('Case #%d:\n' % case)
print_basin(ans, f)
line_num += 1 + H
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c891b8067ded9d4dbe6bf8eef18d752dd94634f4 | 78137d5e4e688749399bbb386b26536e4ac6d9fa | /pytorch3d/renderer/camera_conversions.py | 9fb73e5fcc8140e159b9f0ae21645212077e7ee4 | [
"MIT",
"BSD-3-Clause"
] | permissive | bruinxiong/pytorch3d | 4235681c6356f7e69fa506d8474a3c7cf83d9fe6 | 18a3c5cbb9055bcda44590d39db65bb0c74db799 | refs/heads/master | 2022-06-18T16:28:39.589229 | 2022-05-18T20:11:36 | 2022-05-18T20:11:36 | 238,892,798 | 0 | 0 | NOASSERTION | 2022-05-18T20:11:37 | 2020-02-07T10:04:39 | Python | UTF-8 | Python | false | false | 6,893 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
from ..transforms import matrix_to_rotation_6d
from .cameras import PerspectiveCameras
LOGGER = logging.getLogger(__name__)
def _cameras_from_opencv_projection(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
) -> PerspectiveCameras:
focal_length = torch.stack([camera_matrix[:, 0, 0], camera_matrix[:, 1, 1]], dim=-1)
principal_point = camera_matrix[:, :2, 2]
# Retype the image_size correctly and flip to width, height.
image_size_wh = image_size.to(R).flip(dims=(1,))
# Screen to NDC conversion:
# For non square images, we scale the points such that smallest side
# has range [-1, 1] and the largest side has range [-u, u], with u > 1.
# This convention is consistent with the PyTorch3D renderer, as well as
# the transformation function `get_ndc_to_screen_transform`.
scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0
scale = scale.expand(-1, 2)
c0 = image_size_wh / 2.0
# Get the PyTorch3D focal length and principal point.
focal_pytorch3d = focal_length / scale
p0_pytorch3d = -(principal_point - c0) / scale
# For R, T we flip x, y axes (opencv screen space has an opposite
# orientation of screen axes).
# We also transpose R (opencv multiplies points from the opposite=left side).
R_pytorch3d = R.clone().permute(0, 2, 1)
T_pytorch3d = tvec.clone()
R_pytorch3d[:, :, :2] *= -1
T_pytorch3d[:, :2] *= -1
return PerspectiveCameras(
R=R_pytorch3d,
T=T_pytorch3d,
focal_length=focal_pytorch3d,
principal_point=p0_pytorch3d,
image_size=image_size,
device=R.device,
)
def _opencv_from_cameras_projection(
cameras: PerspectiveCameras,
image_size: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
R_pytorch3d = cameras.R.clone() # pyre-ignore
T_pytorch3d = cameras.T.clone() # pyre-ignore
focal_pytorch3d = cameras.focal_length
p0_pytorch3d = cameras.principal_point
T_pytorch3d[:, :2] *= -1
R_pytorch3d[:, :, :2] *= -1
tvec = T_pytorch3d
R = R_pytorch3d.permute(0, 2, 1)
# Retype the image_size correctly and flip to width, height.
image_size_wh = image_size.to(R).flip(dims=(1,))
# NDC to screen conversion.
scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0
scale = scale.expand(-1, 2)
c0 = image_size_wh / 2.0
# pyre-fixme[29]: `Union[BoundMethod[typing.Callable(torch.Tensor.__neg__)[[Named...
principal_point = -p0_pytorch3d * scale + c0
focal_length = focal_pytorch3d * scale
camera_matrix = torch.zeros_like(R)
camera_matrix[:, :2, 2] = principal_point
camera_matrix[:, 2, 2] = 1.0
camera_matrix[:, 0, 0] = focal_length[:, 0]
camera_matrix[:, 1, 1] = focal_length[:, 1]
return R, tvec, camera_matrix
def _pulsar_from_opencv_projection(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
znear: float = 0.1,
) -> torch.Tensor:
assert len(camera_matrix.size()) == 3, "This function requires batched inputs!"
assert len(R.size()) == 3, "This function requires batched inputs!"
assert len(tvec.size()) in (2, 3), "This function reuqires batched inputs!"
# Validate parameters.
image_size_wh = image_size.to(R).flip(dims=(1,))
assert torch.all(
image_size_wh > 0
), "height and width must be positive but min is: %s" % (
str(image_size_wh.min().item())
)
assert (
camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3
), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % (
camera_matrix.size(1),
camera_matrix.size(2),
)
assert (
R.size(1) == 3 and R.size(2) == 3
), "Incorrect R shape: expected 3x3 but got %dx%d" % (
R.size(1),
R.size(2),
)
if len(tvec.size()) == 2:
tvec = tvec.unsqueeze(2)
assert (
tvec.size(1) == 3 and tvec.size(2) == 1
), "Incorrect tvec shape: expected 3x1 but got %dx%d" % (
tvec.size(1),
tvec.size(2),
)
# Check batch size.
batch_size = camera_matrix.size(0)
assert R.size(0) == batch_size, "Expected R to have batch size %d. Has size %d." % (
batch_size,
R.size(0),
)
assert (
tvec.size(0) == batch_size
), "Expected tvec to have batch size %d. Has size %d." % (
batch_size,
tvec.size(0),
)
# Check image sizes.
image_w = image_size_wh[0, 0]
image_h = image_size_wh[0, 1]
assert torch.all(
image_size_wh[:, 0] == image_w
), "All images in a batch must have the same width!"
assert torch.all(
image_size_wh[:, 1] == image_h
), "All images in a batch must have the same height!"
# Focal length.
fx = camera_matrix[:, 0, 0].unsqueeze(1)
fy = camera_matrix[:, 1, 1].unsqueeze(1)
# Check that we introduce less than 1% error by averaging the focal lengths.
fx_y = fx / fy
if torch.any(fx_y > 1.01) or torch.any(fx_y < 0.99):
LOGGER.warning(
"Pulsar only supports a single focal lengths. For converting OpenCV "
"focal lengths, we average them for x and y directions. "
"The focal lengths for x and y you provided differ by more than 1%, "
"which means this could introduce a noticeable error."
)
f = (fx + fy) / 2
# Normalize f into normalized device coordinates.
focal_length_px = f / image_w
# Transfer into focal_length and sensor_width.
focal_length = torch.tensor([znear - 1e-5], dtype=torch.float32, device=R.device)
focal_length = focal_length[None, :].repeat(batch_size, 1)
sensor_width = focal_length / focal_length_px
# Principal point.
cx = camera_matrix[:, 0, 2].unsqueeze(1)
cy = camera_matrix[:, 1, 2].unsqueeze(1)
# Transfer principal point offset into centered offset.
cx = -(cx - image_w / 2)
cy = cy - image_h / 2
# Concatenate to final vector.
param = torch.cat([focal_length, sensor_width, cx, cy], dim=1)
R_trans = R.permute(0, 2, 1)
cam_pos = -torch.bmm(R_trans, tvec).squeeze(2)
cam_rot = matrix_to_rotation_6d(R_trans)
cam_params = torch.cat([cam_pos, cam_rot, param], dim=1)
return cam_params
def _pulsar_from_cameras_projection(
cameras: PerspectiveCameras,
image_size: torch.Tensor,
) -> torch.Tensor:
opencv_R, opencv_T, opencv_K = _opencv_from_cameras_projection(cameras, image_size)
return _pulsar_from_opencv_projection(opencv_R, opencv_T, opencv_K, image_size)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
24a344875f69d03d0e0a3a6a855fce30a6b147d7 | 6b0f007ca1d3426c71b2298adac853ddce996b49 | /Schoolwebsite/schoolapp/migrations/0030_auto_20201223_1852.py | ff5d81f44974039c5d2f45fb26b161bd10c597f6 | [] | no_license | AbdurRahman111/Full_School_Management_System | 49b37e8615b94bc20aeabc3ef41e468cf2dd9b47 | 0508fa6ba7b529429c4dae2feeb19a991547457e | refs/heads/master | 2023-02-27T04:24:13.272126 | 2021-02-05T16:17:44 | 2021-02-05T16:17:44 | 336,323,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | # Generated by Django 3.1.4 on 2020-12-23 12:52
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('schoolapp', '0029_auto_20201223_1031'),
]
operations = [
migrations.AlterField(
model_name='assignment_comments_all',
name='time_comment',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 873142)),
),
migrations.AlterField(
model_name='teacher_assignment_upload_file',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 872143)),
),
migrations.AlterField(
model_name='teacher_assignment_upload_file',
name='due_date',
field=models.DateField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 872143)),
),
migrations.AlterField(
model_name='timeoff_staffs_teachers',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 874141)),
),
migrations.CreateModel(
name='Dean_login_information',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Dean_ID', models.CharField(max_length=50)),
('Dean_Name', models.CharField(max_length=255)),
('Dean_pass', models.CharField(max_length=255)),
('phone', models.CharField(max_length=255)),
('address', models.CharField(max_length=255)),
('dob', models.CharField(max_length=255)),
('major', models.CharField(max_length=255)),
('IT_Service_info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schoolapp.it_service_login_information')),
],
),
]
| [
"mdabdurrahmanchowdhury1122@gmail.com"
] | mdabdurrahmanchowdhury1122@gmail.com |
62fdac76001172e852e91825b8e6245b3d0c843a | 69d2627942a554d6914ba05de097a290fed66bad | /vb2py/targets/pythoncard/vbcontrols/vbtextfield.py | c37d1f300778de7e1e6b1ae23f4e6f2c677da7be | [
"BSD-3-Clause"
] | permissive | rayzamgh/sumurProjection | 0fcef39cc75e620057b012f1bd35cae1c49a5554 | 847ce71e85093ea5ee668ec61dbfba760ffa6bbd | refs/heads/master | 2020-07-23T23:33:26.621550 | 2019-12-22T05:31:24 | 2019-12-22T05:31:24 | 207,738,494 | 1 | 0 | null | 2019-10-28T16:00:07 | 2019-09-11T06:23:43 | Python | UTF-8 | Python | false | false | 831 | py | from vb2py.targets.pythoncard.controlclasses import VBWrapped, VBWidget
from vb2py.targets.pythoncard import Register
import vb2py.logger
log = vb2py.logger.getLogger("VBTextField")
from PythonCard.components import textfield
import wx
import sys
from PythonCard import event, registry, widget
class VBTextField(VBWidget):
__metaclass__ = VBWrapped
_translations = {
"Text" : "text",
"Enabled" : "enabled",
"Visible" : "visible",
}
_indexed_translations = {
"Left" : ("position", 0),
"Top" : ("position", 1),
"Width" : ("size", 0),
"Height" : ("size", 1),
}
_proxy_for = textfield.TextField
log.debug("Registering VBTextField as '%s'" % sys.modules[__name__].VBTextField)
Register(VBTextField)
| [
"rayzaganteng@gmail.com"
] | rayzaganteng@gmail.com |
1951975481074fd5d822438ba56fe73946a2f7b6 | c0baa78917da5bf81cd04758b127a8d3c5d27da6 | /vize/160401025/client/client.py | c993aad9b32dc6e608e41e9816a5ed83282d47a3 | [
"Unlicense"
] | permissive | nyucel/blm304 | 9e3049e2743e2e9055e8e067724a966e82579d07 | e23f28674229470b5f110ea37428f9c1ca13ac51 | refs/heads/master | 2022-11-07T12:36:12.620417 | 2020-06-27T11:09:59 | 2020-06-27T11:09:59 | 259,326,143 | 14 | 207 | Unlicense | 2020-06-27T11:10:00 | 2020-04-27T13:14:48 | Python | UTF-8 | Python | false | false | 4,818 | py | # -*- coding: utf-8 -*-
"""
@author: Halil İbrahim Koç
"""
import socket
import time
import os
import sys
port=42
buffer=4096
if len(sys.argv) != 2:
print("Host bilgisi girilmelidir.")
sys.exit()
try:
socket.gethostbyname(sys.argv[1])
except socket.error:
print("Host bilgisi gereklidir. Kontrol edip tekrar deneyiniz.")
sys.exit()
host = sys.argv[1]
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Bağlantı başlatılıyor")
s.setblocking(0)
s.settimeout(15)
except socket.error:
print("Başarısız bağlantı")
sys.exit()
while True:
command = input(
"Aşağıdaki komutlardan birini giriniz: \n1. get [dosya ismi]\n2. put [dosya ismi]\n3. list\n4. exit\n ")
"""o get [dosya ismi]
o put [dosya ismi]
o list
o exit"""
clientCommand = command.encode('utf-8')
try:
s.sendto(clientCommand, (host, port))
except ConnectionResetError:
print(
"Port bilgisi yanlış.")
sys.exit()
clientArguments = command.split()
if clientArguments[0] == "get":
try:
ClientData, clientAddr = s.recvfrom(51200)
except:
print("Program zaman aşımına uğradı.")
sys.exit()
text = ClientData.decode('utf8')
print(text)
try:
ClientData2, clientAddr2 = s.recvfrom(buffer)
except:
print("Program zaman aşımına uğradı.")
sys.exit()
text2 = ClientData2.decode('utf8')
print(text2)
if len(text2) < 50:
if clientArguments[0] == "get":
ComingFile = open(clientArguments[1], "wb")
d = 0
try:
# number of paclets
CountC, countaddress = s.recvfrom(buffer)
except:
print("Bağlantı zaman aşımına uğradı.")
sys.exit()
packet1 = CountC.decode('utf8')
packet12 = int(packet1)
while packet12 != 0:
ClientBData, clientbAddr = s.recvfrom(4096)
dataS = ComingFile.write(ClientBData)
d += 1
print("Paket Adedi:" + str(d))
packet12 = packet12 - 1
ComingFile.close()
print(
"Dosya indirildi.")
elif clientArguments[0] == "put":
try:
ClientData, clientAddr = s.recvfrom(buffer)
except:
print("Bağlantı zaman aşımına uğradı.")
sys.exit()
text = ClientData.decode('utf8')
print(text)
if text == "Put fonksiyonu başlatıldı.":
if os.path.isfile(clientArguments[1]):
c = 0
#Length = len(CL1[1])
size = os.stat(clientArguments[1])
sizeS = size.st_size # number of packets
#sizeS = sizeS[:-1]
print("Dosya boyutu(bayt): " + str(sizeS))
Num = int(sizeS / buffer)
Num = Num + 1
print("Gönderilen Paket Sayısı: " + str(Num))
s.sendto(str(Num).encode('utf8'), clientAddr)
packet_num = int(Num)
SendingFile = open(clientArguments[1], "rb")
while packet_num != 0:
Run = SendingFile.read(buffer)
s.sendto(Run, clientAddr)
c += 1
packet_num -= 1
print("Paket Sayısı:" + str(c))
SendingFile.close()
print("İstemciden sunucuya put işlemi sona erdi.")
# s.sendto(str(sizeS).encode('utf8'),clientAddr)
else:
print("İstemcinin bulunduğu dizinde dosya bulunamadı.")
else:
print("Geçersiz.")
elif clientArguments[0] == "list":
try:
ClientData, clientAddr = s.recvfrom(buffer)
ClientData2, clientAddr2 = s.recvfrom(buffer)
except:
print("Bağlantı zaman aşımına uğradı.")
sys.exit()
text = ClientData.decode('utf8')
print(text)
lists=ClientData2.decode('utf8')
print("Server'daki dosyalar:")
lists=lists.split(',')
for i in lists:
print(i)
elif clientArguments[0] == "exit":
print(
"Client ve Server kapatılıyor.")
quit()
print("Client kapatıldı.")
| [
"noreply@github.com"
] | nyucel.noreply@github.com |
eac3e5d6c2b6049b58d419a252bf24a4d5f309b5 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/opcodes/cases/test_set_size_149.py | 2290405a5f8b27eb8d84dfab529207a21fb3edd7 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 829 | py | from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestset_size_149(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_set_size_149(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/set_size.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN {} 111')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('0')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
46ae0f16489d49d88a5abf519a6add085e10a25d | 36959b56e506dbbe2d3c381cdccfe16965c14d24 | /Django/alms/leave/models.py | 020e39c03be6ec8eadabbc804613a01e862b778e | [] | no_license | Sathishkumar-M/Django | e2935fe0c69acb4cb39be2bc0504fd3d5619d002 | e54038ef70295274639b6207efe8e7e3939cbe36 | refs/heads/master | 2020-03-21T20:22:48.684770 | 2018-06-28T10:42:51 | 2018-06-28T10:42:51 | 139,003,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import datetime
# Create your models here.
class LeaveRules(models.Model):
leave_type = models.CharField(max_length=256,blank=False)
leave_rules = models.TextField(blank=True,default='')
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.leave_type
# @property
# def owner(self):
# return self.user
class LeaveApply(models.Model):
user = models.ForeignKey(User,related_name='leaves')
leave_type = models.ForeignKey(LeaveRules,related_name='leaves',null=True,blank=True)
start_date = models.DateField(default=datetime.date.today, blank=True)
end_date = models.DateField(default='', blank=True)
no_of_days = models.DecimalField(max_digits=2,decimal_places=0,null=True)
notes = models.TextField(blank=True,default='')
tag_to = models.CharField(max_length=256,blank=False)
status = models.CharField(max_length=256,default='Awaiting')
status_by = models.CharField(max_length=256,default='')
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True,null=True)
def __str__(self):
return self.user.username
| [
"sathishkumar.appiness@gmail.com"
] | sathishkumar.appiness@gmail.com |
c95ef593fefb765b7876720245730eeb8f614d53 | 89155ebee895cbd04e4eb7a9d079a820d90ffd7e | /viewset_modelviewset_application/app/migrations/0004_auto_20210122_0531.py | 7d4d27a8ea437a57f34a859481d6f08daaf624ca | [] | no_license | mahmudgithub/Rest-api-playground | 822c0671b534fc057461703711ef980d9d31ce56 | a452a329d60c9104afdeadde13f7493741e4914a | refs/heads/master | 2023-03-31T17:23:13.605754 | 2021-04-11T14:10:31 | 2021-04-11T14:10:31 | 331,842,045 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Generated by Django 3.1.4 on 2021-01-22 13:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_one_city'),
]
operations = [
migrations.AlterField(
model_name='one',
name='city',
field=models.CharField(max_length=100),
),
]
| [
"mahmudhossain838@gmail.com"
] | mahmudhossain838@gmail.com |
28cce25feb029f8c2e58daa7e26b3c7c31e89446 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_165/run_cfg.py | 4b626943fbef786d0d255b19f5fe1f78243bbfcc | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1739.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_174.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1740.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1741.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1742.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
849e4f1ba87609eb8511f0ba7b7db5d349078f29 | 8b44c7f5f9c2264fd5bfe91ea324fdbd53813413 | /algorithms/DQN/train_DQN.py | 0d2c4954ac20e0ad3bed569e5108a376d106f5d8 | [
"MIT"
] | permissive | syd951186545/reinforce_py | ee33a63d6c8c94c3318877460a49470ef7788036 | 46769da50aea65346cd3a300b55306d25f1f2683 | refs/heads/master | 2020-05-14T13:54:32.067888 | 2018-06-08T14:43:45 | 2018-06-08T14:43:45 | 181,823,264 | 1 | 0 | MIT | 2019-04-17T05:31:04 | 2019-04-17T05:31:00 | Python | UTF-8 | Python | false | false | 5,083 | py | from __future__ import print_function
from __future__ import division
import os
import argparse
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from agent import DQN
def main(args):
set_random_seed(args.seed)
env = gym.make('CartPole-v0')
agent = DQN(env, args)
agent.construct_model(args.gpu)
# load pretrained models or init new a model.
saver = tf.train.Saver(max_to_keep=1)
if args.model_path is not None:
saver.restore(agent.sess, args.model_path)
ep_base = int(args.model_path.split('_')[-1])
best_mean_rewards = float(args.model_path.split('/')[-1].split('_')[0])
else:
agent.sess.run(tf.global_variables_initializer())
ep_base = 0
best_mean_rewards = None
rewards_history, steps_history = [], []
train_steps = 0
# Training
for ep in range(args.max_ep):
state = env.reset()
ep_rewards = 0
for step in range(env.spec.timestep_limit):
# pick action
action = agent.sample_action(state, policy='egreedy')
# Execution action.
next_state, reward, done, debug = env.step(action)
train_steps += 1
ep_rewards += reward
# modified reward to speed up learning
reward = 0.1 if not done else -1
# Learn and Update net parameters
agent.learn(state, action, reward, next_state, done)
state = next_state
if done:
break
steps_history.append(train_steps)
if not rewards_history:
rewards_history.append(ep_rewards)
else:
rewards_history.append(
rewards_history[-1] * 0.9 + ep_rewards * 0.1)
# Decay epsilon
if agent.epsilon > args.final_epsilon:
agent.epsilon -= (args.init_epsilon - args.final_epsilon) / args.max_ep
# Evaluate during training
if ep % args.log_every == args.log_every-1:
total_reward = 0
for i in range(args.test_ep):
state = env.reset()
for j in range(env.spec.timestep_limit):
action = agent.sample_action(state, policy='greedy')
state, reward, done, _ = env.step(action)
total_reward += reward
if done:
break
current_mean_rewards = total_reward / args.test_ep
print('Episode: %d Average Reward: %.2f' %
(ep + 1, current_mean_rewards))
# save model if current model outpeform the old one
if best_mean_rewards is None or (current_mean_rewards >= best_mean_rewards):
best_mean_rewards = current_mean_rewards
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
save_name = args.save_path + str(round(best_mean_rewards, 2)) \
+ '_' + str(ep_base + ep + 1)
saver.save(agent.sess, save_name)
print('Model saved %s' % save_name)
# plot training rewards
plt.plot(steps_history, rewards_history)
plt.xlabel('steps')
plt.ylabel('running avg rewards')
plt.show()
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', default=None,
help='Whether to use a saved model. (*None|model path)')
parser.add_argument(
'--save_path', default='./models/',
help='Path to save a model during training.')
parser.add_argument(
'--double_q', default=True, help='enable or disable double dqn')
parser.add_argument(
'--log_every', default=500, help='Log and save model every x episodes')
parser.add_argument(
'--gpu', default=-1,
help='running on a specify gpu, -1 indicates using cpu')
parser.add_argument(
'--seed', default=31, help='random seed')
parser.add_argument(
'--max_ep', type=int, default=2000, help='Number of training episodes')
parser.add_argument(
'--test_ep', type=int, default=50, help='Number of test episodes')
parser.add_argument(
'--init_epsilon', type=float, default=0.75, help='initial epsilon')
parser.add_argument(
'--final_epsilon', type=float, default=0.2, help='final epsilon')
parser.add_argument(
'--buffer_size', type=int, default=50000, help='Size of memory buffer')
parser.add_argument(
'--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument(
'--batch_size', type=int, default=128, help='Size of training batch')
parser.add_argument(
'--gamma', type=float, default=0.99, help='Discounted factor')
parser.add_argument(
'--target_network_update', type=int, default=1000,
help='update frequency of target network.')
return parser.parse_args()
def set_random_seed(seed):
np.random.seed(seed)
tf.set_random_seed(seed)
if __name__ == '__main__':
main(args_parse())
| [
"badbobobo@gmail.com"
] | badbobobo@gmail.com |
26dd0cdf3db785f86670a75ed04fad31e9b09252 | 8202512dc4fef386dc927fa60c22596e149fb6f0 | /venv/bin/gunicorn | 0092995224d9586509475559b4ecc8a8608582ec | [] | no_license | dimoka777/tv | 6e8d6e754383a897588ddd203e0670650c22245e | 8159cb76c695802efb4a30ecc2cc5a71960a23c0 | refs/heads/master | 2020-06-19T05:18:59.333318 | 2019-07-13T15:19:50 | 2019-07-13T15:19:50 | 196,577,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/dimoka/Django/tv/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"dimokavip777@gmail.com"
] | dimokavip777@gmail.com | |
e278ef7ff6ed2f42a6e7361f4d93862a94eec361 | 6c96601d64a02c4050410c5e45efa6e1bd8253f6 | /wikked/web.py | c300577283de746aa268b31b14b5208e90308176 | [
"Apache-2.0"
] | permissive | ludovicchabant/Wikked | 7c0627f513f5ccbe052484dcb1ae89336e552cf4 | 02ec3c0361ac90b0366e7a90f8928a54d40616b5 | refs/heads/master | 2022-12-03T08:12:08.621371 | 2019-03-09T07:41:40 | 2019-03-09T07:41:40 | 15,740,703 | 17 | 0 | Apache-2.0 | 2022-12-03T01:25:58 | 2014-01-08T16:17:33 | Python | UTF-8 | Python | false | false | 7,577 | py | import os
import os.path
import logging
import urllib.parse
from werkzeug import SharedDataMiddleware
from flask import Flask, abort, g
from wikked.wiki import Wiki, WikiParameters
# Create the main app.
static_folder = os.path.join(os.path.dirname(__file__), 'static')
app = Flask(
'wikked',
static_folder=static_folder,
static_url_path='/static')
app.config.from_object('wikked.settings')
app.config.from_envvar('WIKKED_SETTINGS', silent=True)
# Setup some config defaults.
app.config.setdefault('SQL_DEBUG', False)
app.config.setdefault('SQL_COMMIT_ON_TEARDOWN', False)
app.config.setdefault('WIKI_ROOT', None)
app.config.setdefault('WIKI_UPDATE_ON_START', True)
app.config.setdefault('WIKI_AUTO_RELOAD', False)
app.config.setdefault('WIKI_ASYNC_UPDATE', False)
app.config.setdefault('WIKI_SERVE_FILES', False)
app.config.setdefault('WIKI_BROKER_URL',
'sqla+sqlite:///%(root)s/.wiki/broker.db')
app.config.setdefault('WIKI_NO_FLASK_LOGGER', False)
app.config.setdefault('WIKI_STYLESHEET', None)
app.config.setdefault('PROFILE', False)
app.config.setdefault('PROFILE_DIR', None)
app.config.setdefault('INFLUXDB_HOST', None)
app.config.setdefault('INFLUXDB_PORT', 8086)
app.config.setdefault('INFLUXDB_USERNAME', 'root')
app.config.setdefault('INFLUXDB_PASSWORD', 'root')
app.config.setdefault('INFLUXDB_DATABASE', 'database')
if app.config['WIKI_NO_FLASK_LOGGER']:
app.logger.handlers = []
# Find the wiki root, and further configure the app if there's a
# config file in there.
wiki_root = app.config['WIKI_ROOT']
if wiki_root is None:
from wikked.utils import find_wiki_root
wiki_root = find_wiki_root()
if wiki_root is None:
raise Exception("Can't find the wiki root to use.")
config_path = os.path.join(wiki_root, '.wiki', 'app.cfg')
if os.path.isfile(config_path):
app.config.from_pyfile(config_path)
# Make the app serve static content and wiki assets in DEBUG mode.
app.config['WIKI_ROOT'] = wiki_root
app.config['WIKI_FILES_DIR'] = os.path.join(wiki_root, '_files')
if app.config['WIKI_SERVE_FILES']:
app.wsgi_app = SharedDataMiddleware(
app.wsgi_app,
{'/files': app.config['WIKI_FILES_DIR']})
# Add a special route for the `.well-known` directory.
app.wsgi_app = SharedDataMiddleware(
app.wsgi_app,
{'/.well-known': os.path.join(wiki_root, '.well-known')})
# Profiling
if app.config['PROFILE']:
profile_dir = app.config['PROFILE_DIR']
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, profile_dir=profile_dir)
# Customize logging.
if app.config['DEBUG']:
lg = logging.getLogger('wikked')
lg.setLevel(logging.DEBUG)
if app.config['SQL_DEBUG']:
lg = logging.getLogger('sqlalchemy')
lg.setLevel(logging.DEBUG)
app.logger.debug("Creating Flask application...")
# This lets components further modify the wiki that's created for
# each request.
app.wikked_post_init = []
# When requested, set the wiki as a request global.
def get_wiki():
wiki = getattr(g, '_wiki', None)
if wiki is None:
wiki = Wiki(app.wiki_params)
for i in app.wikked_post_init:
i(wiki)
wiki.start()
g.wiki = wiki
return wiki
# Set the default wiki parameters.
app.wiki_params = app.config.get('WIKI_FACTORY_PARAMETERS', None)
if app.wiki_params is None:
app.wiki_params = WikiParameters(wiki_root)
# Just uncache pages when the user has edited one.
def uncaching_wiki_updater(wiki, url):
app.logger.debug("Uncaching all pages because %s was edited." % url)
wiki.db.uncachePages(except_url=url, only_required=True)
app.wiki_params.wiki_updater = uncaching_wiki_updater
# Login extension.
def user_loader(username):
wiki = get_wiki()
return wiki.auth.getUser(username)
# Setup the Jinja environment.
def get_read_url(url):
return '/read/' + url.lstrip('/')
def get_edit_url(url):
return '/edit/' + url.lstrip('/')
def get_rev_url(url, rev):
return '/rev/%s?%s' % (url.lstrip('/'),
urllib.parse.urlencode({'rev': rev}))
def get_diff_url(url, rev1=None, rev2=None):
args = {}
if rev1 is not None:
args['rev1'] = rev1
if rev2 is not None:
args['rev2'] = rev2
if len(args) > 0:
return '/diff/%s?%s' % (url.lstrip('/'),
urllib.parse.urlencode(args))
return '/diff/%s' % url.lstrip('/')
app.jinja_env.globals.update({
'get_read_url': get_read_url,
'get_edit_url': get_edit_url,
'get_rev_url': get_rev_url,
'get_diff_url': get_diff_url
})
from flask_login import LoginManager # NOQA
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.user_loader(user_loader)
login_manager.unauthorized_handler(lambda: abort(401))
# Bcrypt extension.
from wikked.bcryptfallback import Bcrypt # NOQA
app.bcrypt = Bcrypt(app)
# Import the views.
import wikked.commonroutes # NOQA
import wikked.api.admin # NOQA
import wikked.api.edit # NOQA
import wikked.api.history # NOQA
import wikked.api.read # NOQA
import wikked.api.special # NOQA
import wikked.api.user # NOQA
import wikked.views.admin # NOQA
import wikked.views.edit # NOQA
import wikked.views.error # NOQA
import wikked.views.history # NOQA
import wikked.views.read # NOQA
import wikked.views.special # NOQA
import wikked.views.user # NOQA
# Async wiki update.
if app.config['WIKI_ASYNC_UPDATE']:
app.logger.debug("Will use Celery tasks to update the wiki...")
from wikked.tasks import celery_app, update_wiki
# Configure Celery.
app.config['WIKI_BROKER_URL'] = app.config['WIKI_BROKER_URL'] % (
{'root': wiki_root})
celery_app.conf.update(app.config)
app.logger.debug("Using Celery broker: %s" % app.config['WIKI_BROKER_URL'])
# Make the wiki use the background update task.
def async_updater(wiki):
app.logger.debug("Running update task on Celery.")
update_wiki.delay(wiki.root)
app.wiki_params.wiki_updater = async_updater
# InfluxDB metrics.
if app.config['INFLUXDB_HOST']:
try:
import influxdb
except ImportError:
raise Exception("Please install the `influxdb` package if you need "
"analytics for your Wikked app.")
host = app.config['INFLUXDB_HOST']
port = app.config['INFLUXDB_PORT']
username = app.config['INFLUXDB_USERNAME']
password = app.config['INFLUXDB_PASSWORD']
database = app.config['INFLUXDB_DATABASE']
metrics_db = influxdb.InfluxDBClient(host, port, username, password,
database)
app.logger.info("Opening InfluxDB %s on %s:%s as %s." % (
database, host, port, username))
import time
from flask import request, request_started, request_tearing_down
def on_request_started(sender, **extra):
g.metrics_start_time = time.clock()
def on_request_tearing_down(sender, **extra):
duration = time.clock() - g.metrics_start_time
data = [
{
"name": "requests",
"columns": ["request_path", "compute_time"],
"points": [
[str(request.path), duration]
]
}
]
metrics_db.write_points(data)
request_started.connect(on_request_started, app)
request_tearing_down.connect(on_request_tearing_down, app)
| [
"ludovic@chabant.com"
] | ludovic@chabant.com |
656e3ca9877621760222278ef164bbd08c56b93f | a870e1db82fbf8f57b9d5fb4ebdc5f205df1a063 | /web/settings_local.py | 5c9f3553952426e2191ef7bf823ec68d679890c2 | [] | no_license | mireq/django-frontend-template | 36692d705dd84513e1389379219bdb619c9d1f8a | 9f3bd4e3e9374deb77586374a6ed62f6e6ccb316 | refs/heads/master | 2023-08-16T19:23:09.761808 | 2023-08-13T04:12:38 | 2023-08-13T04:12:38 | 50,784,205 | 0 | 1 | null | 2023-08-13T04:12:40 | 2016-01-31T16:44:42 | Python | UTF-8 | Python | false | false | 167 | py | # pylint: disable=wildcard-import,unused-wildcard-import
from .settings import *
COMPRESS_POSTCSS_BINARY = 'postcss' # path to postcss binary
ALLOWED_HOSTS = ['*']
| [
"miroslav.bendik@gmail.com"
] | miroslav.bendik@gmail.com |
1375c8329253029ffd84e0e0fcc00fa5367fdf5d | 480e33f95eec2e471c563d4c0661784c92396368 | /GeneratorInterface/PomwigInterface/python/POMWIG_SingleDiffractiveMinusWmunu_10TeV_cff.py | b86f148ed4e8c4c5f1a7b01056222a8797b3ed6e | [
"Apache-2.0"
] | permissive | cms-nanoAOD/cmssw | 4d836e5b76ae5075c232de5e062d286e2026e8bd | 4eccb8a758b605875003124dd55ea58552b86af1 | refs/heads/master-cmsswmaster | 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 | Apache-2.0 | 2022-05-23T07:58:09 | 2017-09-08T14:03:57 | C++ | UTF-8 | Python | false | false | 1,323 | py | import FWCore.ParameterSet.Config as cms
herwig6Parameters = cms.PSet(
comEnergy = cms.double(10000.0),
useJimmy = cms.bool(False),
doMPInteraction = cms.bool(False),
herwigHepMCVerbosity = cms.untracked.bool(False),
herwigVerbosity = cms.untracked.int32(1),
printCards = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(2),
crossSection = cms.untracked.double(-1.0),
filterEfficiency = cms.untracked.double(1.0),
)
source = cms.Source("EmptySource")
generator = cms.EDFilter("PomwigGeneratorFilter",
herwig6Parameters,
HerwigParameters = cms.PSet(
parameterSets = cms.vstring('SDInclusiveWmunu'),
SDInclusiveWmunu = cms.vstring('NSTRU = 14 ! H1 Pomeron Fit B',
'Q2WWMN = 1E-6 ! Minimum |t|',
'Q2WWMX = 4.0 ! Maximum |t|',
'YWWMIN = 1E-6 ! Minimum xi',
'YWWMAX = 0.2 ! Maximum xi',
'IPROC = 11452 ! Process PomP -> W -> munu',
'MODPDF(1) = 10150 ! Set MODPDF CTEQ61',
'MODPDF(2) = -1 ! Set MODPDF')
),
diffTopology = cms.int32(2),
survivalProbability = cms.double(0.05),
h1fit = cms.int32(2),
doPDGConvert = cms.bool(False)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
f27b89cf6bda2b7d42a99756ab87727eb21adda5 | 76c40ae54b4985cdb0b4692106795ded8115da42 | /train_cross_domain.py | 78924d5ad3718d7470998843a2f82cac444aec26 | [] | no_license | leosampaio/keras-generative | 1b23ea6b18af8c09bdf08c30dc6a0428c06eb385 | ffafbae19d24c0ce7e812f610c4754a343400a9e | refs/heads/master | 2021-05-09T09:04:43.636608 | 2018-11-21T17:26:28 | 2018-11-21T17:26:28 | 119,418,618 | 1 | 1 | null | 2018-01-29T17:48:49 | 2018-01-29T17:48:49 | null | UTF-8 | Python | false | false | 4,264 | py | import os
import sys
import math
import argparse
from keras import backend as K
import numpy as np
from sklearn.preprocessing import LabelBinarizer
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import matplotlib
matplotlib.use('Agg')
from models import models
from datasets import load_dataset
def main():
# Parsing arguments
parser = argparse.ArgumentParser(description='Training GANs or VAEs')
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--epoch', type=int, default=200)
parser.add_argument('--batchsize', type=int, default=50)
parser.add_argument('--output', default='output')
parser.add_argument('--zdims', type=int, default=256)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--testmode', action='store_true')
parser.add_argument('--conditional', action='store_true')
parser.add_argument('--aux-classifier', action='store_true')
parser.add_argument('--label-smoothing', default=0.0, type=float)
parser.add_argument('--input-noise', default=0.0, type=float)
parser.add_argument('--run-id', '-r', default=1, type=int)
parser.add_argument('--checkpoint-every', default=1, type=int)
parser.add_argument('--notify-every', default=1, type=int)
parser.add_argument('--triplet-margin', default=1., type=float)
parser.add_argument('--triplet-weight', default=1., type=float)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--n-layers-to-share', default=0, type=int)
parser.add_argument('--submodels', nargs=2,
help="Submodels used to build the bigger one",
required=True)
parser.add_argument('--resume-submodels', nargs=2,
help="Submodels pretrained weights")
parser.add_argument('--dis-loss-control', default=1., type=float)
args = parser.parse_args()
# select gpu and limit resources if applicable
if 'tensorflow' == K.backend():
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(args.gpu)
set_session(tf.Session(config=config))
# make output directory if not exists
if not os.path.isdir(args.output):
os.mkdir(args.output)
# load datasets
dataset = load_dataset(args.dataset)
# Construct model
if args.model not in models:
raise Exception('Unknown model:', args.model)
model = models[args.model](
input_shape=dataset.shape[1:],
z_dims=args.zdims,
output=args.output,
label_smoothing=args.label_smoothing,
input_noise=args.input_noise,
run_id=args.run_id,
test_mode=args.testmode,
checkpoint_every=args.checkpoint_every,
notify_every=args.notify_every,
aux_classifier=args.aux_classifier,
is_conditional=args.conditional,
conditional_dims=len(dataset.attr_names),
triplet_margin=args.triplet_margin,
triplet_weight=args.triplet_weight,
lr=args.lr,
submodels=args.submodels,
dis_loss_control=args.dis_loss_control,
submodels_weights=args.resume_submodels,
permutation_matrix_shape=(len(dataset), dataset.mirror_len)
)
if args.resume or args.resume_submodels:
model.load_model(args.resume)
# generate random samples to evaluate generated results over time
# use the same samples for all trainings - useful when resuming training
np.random.seed(14)
samples = np.random.normal(size=(100, args.zdims)).astype(np.float32)
conditionals_for_samples = np.array(
[LabelBinarizer().fit_transform(
range(0, len(dataset.attr_names)))
[i % len(dataset.attr_names)] for i in range(100)])
np.random.seed()
model.main_loop(dataset, samples,
samples_conditionals=conditionals_for_samples,
epochs=args.epoch,
batchsize=args.batchsize)
if __name__ == '__main__':
main()
| [
"leo.sampaio.ferraz.ribeiro@gmail.com"
] | leo.sampaio.ferraz.ribeiro@gmail.com |
04b1d77dab59666059f65cc7262e758d96a4570f | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/PYTHON_PRAC/python-mega-algo/conversions/hexadecimal_to_decimal.py | beb1c2c3ded67ee5fca164a33d6462fa7c495ebb | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,509 | py | hex_table = {hex(i)[2:]: i for i in range(16)} # Use [:2] to strip off the leading '0x'
def hex_to_decimal(hex_string: str) -> int:
"""
Convert a hexadecimal value to its decimal equivalent
#https://www.programiz.com/python-programming/methods/built-in/hex
>>> hex_to_decimal("a")
10
>>> hex_to_decimal("12f")
303
>>> hex_to_decimal(" 12f ")
303
>>> hex_to_decimal("FfFf")
65535
>>> hex_to_decimal("-Ff")
-255
>>> hex_to_decimal("F-f")
Traceback (most recent call last):
...
ValueError: Non-hexadecimal value was passed to the function
>>> hex_to_decimal("")
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
>>> hex_to_decimal("12m")
Traceback (most recent call last):
...
ValueError: Non-hexadecimal value was passed to the function
"""
hex_string = hex_string.strip().lower()
if not hex_string:
raise ValueError("Empty string was passed to the function")
is_negative = hex_string[0] == "-"
if is_negative:
hex_string = hex_string[1:]
if not all(char in hex_table for char in hex_string):
raise ValueError("Non-hexadecimal value was passed to the function")
decimal_number = 0
for char in hex_string:
decimal_number = 16 * decimal_number + hex_table[char]
return -decimal_number if is_negative else decimal_number
if __name__ == "__main__":
from doctest import testmod
testmod()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
f69c7a91af50182b1bf902e53170333407c5b9b3 | d145eb9a03200855caddbf53da53ee092386f93f | /python/etcd/test4.py | 40b212bbfbc7814103a5054b2c71eebf234395df | [] | no_license | oberstet/scratchbox | 0ecb44df40664526c4eab6dae69837735e8cf7fe | 87ac59b98782c70888df24b633f890e3305e7c8c | refs/heads/master | 2022-11-08T03:36:36.108804 | 2022-10-22T09:52:20 | 2022-10-22T09:52:20 | 3,698,059 | 26 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py | from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
import etcd
import txaio
@inlineCallbacks
def main(reactor):
# a Twisted etcd client
client = etcd.Client(reactor, u'http://localhost:2379')
# get etcd status
status = yield client.status()
print(status)
# get value for a key
try:
value = yield client.get(b'/cf/foo')
print('value={}'.format(value))
except IndexError:
print('no such key =(')
# set a value for some keys
for i in range(3):
rev = yield client.set('/cf/foo0{}'.format(i).encode(), b'woa;)')
print('value set, revision={}'.format(rev))
# delete key
key = u'/cf/foo02'.encode()
rev = yield client.delete(key)
print(rev)
# iterate over key range (maybe an async iter in the future?)
pairs = yield client.get(b'/cf/foo01', b'/cf/foo05')
for key, value in pairs.items():
print('key={}: {}'.format(key, value))
# iterate over keys with given prefix
pairs = yield client.get(b'/cf/foo0', prefix=True)
for key, value in pairs.items():
print('key={}: {}'.format(key, value))
# watch keys for change events
prefixes = [b'/cf/', b'/foo/']
# our callback that will be invoked for every change event
def on_watch(key, value):
print('watch callback fired for key {}: {}'.format(key, value))
# start watching on given key prefixes
d = client.watch(prefixes, on_watch)
# sleep for n seconds ..
delay = 10
print('watching {} for {} seconds ..'.format(prefixes, delay))
yield sleep(delay)
# .. and stop watching
yield d.cancel()
# submit transaction
# create lease
if __name__ == '__main__':
txaio.start_logging(level='info')
react(main)
| [
"tobias.oberstein@tavendo.de"
] | tobias.oberstein@tavendo.de |
769f7cb542a82ef942a57683d24a36f3ba41cbc1 | 0be644ba0208b1f9f93018f74580fd5047618a3b | /src/industries/power_plant.py | aa022bf8d0e6cd3ab277a822db436cfdd9905568 | [] | no_license | ebla71/XIS | 1ba48f019c063132b561d8c2e469634349b96aae | 9f96b8f210b91da29ce0d3b34e896f629fc7dede | refs/heads/main | 2023-08-22T11:45:22.723296 | 2021-10-05T09:11:11 | 2021-10-05T09:11:11 | 355,645,289 | 0 | 0 | null | 2021-09-25T21:51:01 | 2021-04-07T18:29:02 | Python | UTF-8 | Python | false | false | 4,892 | py | from industry import IndustryTertiary, TileLocationChecks
industry = IndustryTertiary(id='power_plant',
accept_cargo_types=['COAL', 'PETR', 'PEAT'],
prod_cargo_types=[],
prob_in_game='3',
prob_random='5',
prod_multiplier='[0, 0]',
map_colour='168',
life_type='IND_LIFE_TYPE_BLACK_HOLE',
prospect_chance='0.75',
name='string(STR_IND_POWER_PLANT)',
nearby_station_name='string(STR_STATION_POWERHUNGRY)',
fund_cost_multiplier='15',
intro_year=1900)
industry.economy_variations['FIRS'].enabled = True
industry.economy_variations['BASIC_ARCTIC'].enabled = True
industry.economy_variations['BASIC_ARCTIC'].accept_cargo_types = ['PEAT']
industry.add_tile(id='power_plant_tile_1',
animation_length=7,
animation_looping=True,
animation_speed=3,
custom_animation_control={'macro': 'random_first_frame',
'animation_triggers': 'bitmask(ANIM_TRIGGER_INDTILE_CONSTRUCTION_STATE)'},
location_checks=TileLocationChecks(require_effectively_flat=True,
disallow_industry_adjacent=True))
sprite_ground = industry.add_sprite(
sprite_number='GROUNDTILE_MUD_TRACKS'
)
sprite_ground_overlay = industry.add_sprite(
sprite_number='GROUNDTILE_MUD_TRACKS'
)
sprite_1 = industry.add_sprite(
sprite_number='2047'
)
sprite_2 = industry.add_sprite(
sprite_number='2050'
)
sprite_3 = industry.add_sprite(
sprite_number='2053'
)
sprite_4 = industry.add_sprite(
sprite_number='2054'
)
sprite_smoke_1 = industry.add_smoke_sprite(
smoke_type='white_smoke_big',
xoffset=3,
yoffset=0,
zoffset=36
)
industry.add_spritelayout(
id='power_plant_spritelayout_cooling_tower',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_1],
)
industry.add_spritelayout(
id='power_plant_spritelayout_large_building',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_2],
smoke_sprites=[sprite_smoke_1],
)
industry.add_spritelayout(
id='power_plant_spritelayout_small_building',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_3],
)
industry.add_spritelayout(
id='power_plant_spritelayout_substation',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_4],
)
industry.add_industry_layout(
id='power_plant_industry_layout_1',
layout=[(0, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(0, 1, 'power_plant_tile_1', 'power_plant_spritelayout_small_building'),
(1, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(1, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(2, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(2, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(3, 0, 'power_plant_tile_1', 'power_plant_spritelayout_substation'),
(3, 1, 'power_plant_tile_1', 'power_plant_spritelayout_substation')]
)
industry.add_industry_layout(
id='power_plant_industry_layout_2',
layout=[(0, 1, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(0, 2, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(1, 0, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(1, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(1, 2, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(2, 0, 'power_plant_tile_1', 'power_plant_spritelayout_small_building'),
(2, 1, 'power_plant_tile_1', 'power_plant_spritelayout_substation'),
(2, 2, 'power_plant_tile_1', 'power_plant_spritelayout_small_building')]
)
industry.add_industry_layout(
id='power_plant_industry_layout_3',
layout=[(0, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(0, 1, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(1, 0, 'power_plant_tile_1', 'power_plant_spritelayout_small_building'),
(1, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(2, 0, 'power_plant_tile_1', 'power_plant_spritelayout_substation'),
(2, 1, 'power_plant_tile_1', 'power_plant_spritelayout_small_building')]
)
| [
"jake.dander@gmail.com"
] | jake.dander@gmail.com |
13d0d57dd8b47b5a6e7bcc2d381ee1431205e156 | 84e13b07d2c1c2ee9bc670bbc78a677358f4713d | /0x07-python-test_driven_development/4-print_square.py | 6541eb37b96a7ee8c04013627e37407eb14cc943 | [] | no_license | thegermanblob/holbertonschool-higher_level_programming | b3ad5da5e120df1bced24313af50e2399f43a75c | f1b91a6cc1b9c3dd51dbcf83e61f0a084253c0be | refs/heads/main | 2023-09-05T09:00:56.464406 | 2021-11-22T14:58:37 | 2021-11-22T14:58:37 | 361,759,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #!/usr/bin/python3
""" Module that contains square """
def print_square(size):
""" Function that prints a square and validates size """
if type(size) is not int:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
if size == 0:
print("")
else:
for x in range(0, size):
for y in range(0, size):
print("#", end="")
print("")
| [
"thegermanblob@gmail.com"
] | thegermanblob@gmail.com |
0264140fed967a1644148682d47d81f3fd7b0d4b | 89a015d681c109801be843492dbbc76349fc8d24 | /setup.py | 4c5c6668172d3ad63eafbd7fcb7660ae259bb009 | [
"MIT"
] | permissive | chmouel/tekton-neat | 2d4eba1116f98aed1334bb9768514200ce6c71fd | 8714a0e673a7eb4c5670f4acbb61701719423cdd | refs/heads/main | 2023-01-06T03:46:37.275733 | 2020-11-06T10:43:09 | 2020-11-06T10:43:09 | 310,533,512 | 4 | 2 | MIT | 2020-11-06T10:37:46 | 2020-11-06T08:15:15 | Python | UTF-8 | Python | false | false | 1,515 | py | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = []
setup_requirements = ['pyyaml']
test_requirements = [
'pytest>=3',
]
setup(
author="Chmouel Boudjnah",
author_email='chmouel@chmouel.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Export your tekton templates neatly",
entry_points={
'console_scripts': [
'tekton-neat=tekton_neat.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='tekton-neat',
name='tekton-neat',
packages=find_packages(include=['tekton_neat', 'tekton_neat.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/chmouel/tekton-neat',
version='0.3.0',
zip_safe=False,
)
| [
"chmouel@chmouel.com"
] | chmouel@chmouel.com |
fe4d5bc55a87020fa99d3ab6ac248746bcac93f7 | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/baekjoon/all_problem/1967.py | 7869d00865f64f47645925bb25b3fd2d27997977 | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 1,243 | py | import sys
sys.stdin = open('input_1967.txt', 'r')
from collections import deque
def find_farthest_vertex(start_node):
distance, farthest_vertex = 0, start_node
dq = deque()
dq.append((start_node, 0))
visited = [False for _ in range(n + 1)]
visited[start_node] = True
while dq:
pop_node, pop_dist = dq.popleft()
for node in tree_info[pop_node]:
if not visited[node]:
visited[node] = True
now_dist = tree_info[pop_node][node]
dq.append((node, pop_dist + now_dist))
if distance < pop_dist + now_dist:
distance = pop_dist + now_dist
farthest_vertex = node
return distance, farthest_vertex
n = int(input())
if n == 1:
print(0)
else:
tree_info = dict()
for _ in range(n - 1):
node1, node2, dist = map(int, input().split())
if node1 not in tree_info:
tree_info[node1] = dict()
tree_info[node1][node2] = dist
if node2 not in tree_info:
tree_info[node2] = dict()
tree_info[node2][node1] = dist
temp_vertex = find_farthest_vertex(1)[1]
distance = find_farthest_vertex(temp_vertex)[0]
print(distance) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
e28df1d439e040246ab819554cc779df88495db5 | b13b603bf8f07da1100b7fcb2e505f9c389e5764 | /level2/구명보트.py | b2937d51318cf687b8b938b9224531f55a5bf9f3 | [] | no_license | 123qpq/programers | 3a499646f65bed9f15b0db3e66d7445536579942 | 970b62210f6c29ea0d13bd381fb1e0a9b997143f | refs/heads/main | 2023-06-20T12:00:10.681082 | 2021-07-20T04:36:46 | 2021-07-20T04:36:46 | 322,791,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from collections import deque
def solution(people, limit):
boats = 0
people.sort()
people = deque(people)
while people:
boats += 1
if len(people) == 1:
break
if people[0] + people[-1] <= limit:
people.popleft()
people.pop()
else:
people.pop()
return boats | [
"45002168+123qpq@users.noreply.github.com"
] | 45002168+123qpq@users.noreply.github.com |
594ba8fc903636d753d54513ba19fe91419f506c | 6502929152acc82097c6bb9fa9b211a30b23b6c0 | /2_anomaly_detection_nyc.py | 446aea1c4757dddf1487d6ef315105b8176e12b4 | [
"Apache-2.0"
] | permissive | lulzzz/RNN-Time-series-Anomaly-Detection | ceec98a902b3504224834c03a22e7e7673f11470 | 194a20100749bae0f18b7a9681055b92f892c7e9 | refs/heads/master | 2020-03-10T03:37:45.860990 | 2018-04-11T01:38:42 | 2018-04-11T01:38:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,349 | py | import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable
import preprocess_data
from model import model
from torch import optim
from matplotlib import pyplot as plt
import numpy as np
from anomalyDetector import fit_norm_distribution_param
from anomalyDetector import anomalyScore
from sklearn.svm import SVR
parser = argparse.ArgumentParser(description='PyTorch RNN Anomaly Detection Model on nyc_taxi Dataset')
parser.add_argument('--prediction_window_size', type=int, default=10,
help='prediction_window_size')
args_ = parser.parse_args()
print("=> loading checkpoint ")
checkpoint = torch.load('./save/nyc_taxi/checkpoint.pth.tar')
print("=> loaded checkpoint")
args = checkpoint['args']
args.prediction_window_size= args_.prediction_window_size
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
TimeseriesData = preprocess_data.DataLoad(args.data)
train_dataset = preprocess_data.batchify(args,TimeseriesData.trainData, 1)[:10000]
test_dataset = preprocess_data.batchify(args,TimeseriesData.testData, 1)
###############################################################################
# Build the model
###############################################################################
model = model.RNNPredictor(rnn_type = args.model, enc_inp_size=3, rnn_inp_size = args.emsize, rnn_hid_size = args.nhid,
dec_out_size=3,
nlayers = args.nlayers,)
model.load_state_dict(checkpoint['state_dict'])
del checkpoint
if args.cuda:
model.cuda()
# At any point you can hit Ctrl + C t
endPoint=3500
try:
mean, cov = fit_norm_distribution_param(args, model, train_dataset, endPoint,channel_idx=0)
train_scores, _, _, hiddens,_ = anomalyScore(args, model, train_dataset, mean, cov, 3000)
score_predictor = SVR(C=1.0,epsilon=0.2)
score_predictor.fit(torch.cat(hiddens,dim=0).numpy(),train_scores)
scores, sorted_predictions,sorted_errors, _, predicted_scores = anomalyScore(args, model, test_dataset, mean, cov, endPoint,score_predictor=score_predictor)
sorted_predictions = torch.cat(sorted_predictions, dim=0)
sorted_errors = torch.cat(sorted_errors,dim=0)
scores = np.array(scores)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
target= preprocess_data.reconstruct(test_dataset.cpu()[:, 0, 0].numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
sorted_predictions_mean = preprocess_data.reconstruct(sorted_predictions.mean(dim=1).numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
sorted_predictions_1step = preprocess_data.reconstruct(sorted_predictions[:,-1].numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
sorted_predictions_Nstep = preprocess_data.reconstruct(sorted_predictions[:,0].numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
#sorted_errors_mean = sorted_errors.mean(dim=1).abs().cpu().numpy()
sorted_errors_mean = sorted_errors.abs().mean(dim=1).cpu().numpy()
sorted_errors_mean *=TimeseriesData.trainData['seqData_std']
fig, ax1 = plt.subplots(figsize=(15,5))
ax1.plot(target,label='Target', color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_predictions_mean,label='Mean predictions', color='purple', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_predictions_1step,label='1-step predictions', color='green', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_predictions_Nstep,label=str(args.prediction_window_size)+'-step predictions', color='blue', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_errors_mean,label='Absolute mean prediction errors', color='orange', marker='.', linestyle='--', markersize=1, linewidth=1)
ax1.legend(loc='upper left')
ax1.set_ylabel('Value',fontsize=15)
ax1.set_xlabel('Index',fontsize=15)
ax2 = ax1.twinx()
ax2.plot(scores,label='Anomaly scores from \nmultivariate normal distribution', color='red', marker='.', linestyle='--', markersize=1, linewidth=1)
ax2.plot(predicted_scores,label='Predicted anomaly scores from SVR', color='cyan', marker='.', linestyle='--', markersize=1, linewidth=1)
ax2.legend(loc='upper right')
ax2.set_ylabel('anomaly score',fontsize=15)
plt.axvspan(3024,3040 , color='yellow', alpha=0.3)
plt.xlim([0, endPoint])
plt.title('Anomaly Detection on ' + args.data + ' Dataset', fontsize=18, fontweight='bold')
plt.tight_layout()
plt.xlim([1500,endPoint])
plt.savefig('result/'+args.data+'/fig_scores.png')
plt.show()
| [
"jmpark@rit.kaist.ac.kr"
] | jmpark@rit.kaist.ac.kr |
252b8ee31d1bd50da52453b3ffc3d15607759ff3 | e8bf00dba3e81081adb37f53a0192bb0ea2ca309 | /domains/nav/problems/auto/problem1083_SD.py | 44761016946ddef7cbb4c415a70a66f745d3cab7 | [
"BSD-3-Clause"
] | permissive | patras91/rae_release | 1e6585ee34fe7dbb117b084df982ca8a8aed6795 | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | refs/heads/master | 2023-07-13T20:09:41.762982 | 2021-08-11T17:02:58 | 2021-08-11T17:02:58 | 394,797,515 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | __author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4, 5], 4: [3], 5: [3]}
rv.DOORS = ['d1', 'd2']
rv.DOORLOCATIONS = {(2, 3): 'd1', (3, 5): 'd2'}
rv.DOORTYPES = {'d1': 'spring', 'd2': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3', 'r4']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free', 'r4': 'free'}
state.loc = {'r1': 4, 'r2': 3, 'r3': 1, 'r4': 3}
state.pos = {'o1': 4}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, }
tasks = {
10: [['fetch', 'r1', 'o1', 3]],
}
eventsEnv = {
} | [
"patras@umd.edu"
] | patras@umd.edu |
0b0c4aff2067846b9dd29a167bd9b9af5c1b88d7 | aa6985deb43e26732899d06ee39fd6cb4befc2ae | /strongmotionfetch/retriever.py | 78bb5088fde03bef4274155037a08d13e372b0ae | [
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | jnf0910/strongmotionfetch | 2592396183a651799b3ae3a33c77a2b25128d85d | c9e6f30797e6893bf60506b24096c52537a0110b | refs/heads/master | 2020-12-25T09:28:53.617747 | 2016-06-23T20:48:39 | 2016-06-23T20:48:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | class Retriever(object):
def __init__(self,rawfolder,inputfolder):
self._inputfolder = inputfolder
self._rawfolder = rawfolder
def getData(self,time,lat,lon,timewindow,radius):
self.fetch(time,lat,lon,timewindow,radius) #find files online, download to raw folder
traces = self.readFiles() #read any files downloaded into raw folder, turn into list of ObsPy Trace objects
amps = self.traceToAmps(traces) #pull out peak amplitudes, return as data structure
xmlstr = self.ampsToXML(amps) #convert these amps to an xml string
self.saveToXML(xmlstr) #write that xml string to a file in the input folder
def fetch(self,time,lat,lon,timewindow,radius):
#this is implemented in child classes
pass
def readFiles(self):
#this is implemented in child classes
pass
def traceToAmps(traces=None):
#this is implemented here
pass
def ampsToXML(amps=None):
#this is implemented here
pass
| [
"mhearne@usgs.gov"
] | mhearne@usgs.gov |
b76c818f8bfb550baeb076a2434b1a8e483f19e2 | 36b388d25580e60068da2d6cab88d94f90959749 | /lib/datasets/SearchDatasetWrap.py | f7925b3c9235992b325687ea7c761c732570e971 | [
"MIT"
] | permissive | z-x-yang/NAS-Projects | 4b733b381e325819f958d9af684267b9d4f7fac8 | 54ecec7f750e11077b2ecc60ddcd74ce417434ac | refs/heads/master | 2020-09-08T14:45:13.712958 | 2019-11-09T16:04:05 | 2019-11-09T16:04:05 | 221,163,104 | 3 | 0 | MIT | 2019-11-12T08:05:37 | 2019-11-12T08:05:34 | null | UTF-8 | Python | false | false | 1,186 | py | import torch, copy, random
import torch.utils.data as data
class SearchDataset(data.Dataset):
def __init__(self, name, data, train_split, valid_split, check=True):
self.datasetname = name
self.data = data
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
if check:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the splitted train and validation sets should have no intersection'
self.length = len(self.train_split)
def __repr__(self):
return ('{name}(name={datasetname}, train={tr_L}, valid={val_L})'.format(name=self.__class__.__name__, datasetname=self.datasetname, tr_L=len(self.train_split), val_L=len(self.valid_split)))
def __len__(self):
return self.length
def __getitem__(self, index):
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice( self.valid_split )
train_image, train_label = self.data[train_index]
valid_image, valid_label = self.data[valid_index]
return train_image, train_label, valid_image, valid_label
| [
"280835372@qq.com"
] | 280835372@qq.com |
c6269ba6d485726a565012d1258590f2aa850778 | 1e4eefff1c19ffb81016ce99f2284fb657293f65 | /sorting/test/insertion_sort.py | 0bc84a94a6f9223c9f3bc60740800fc4d84f4a58 | [] | no_license | Solero93/bcn-algorithm-club-py | 5e1edf15f087e0edf2cf7ba0859fb5e4523525ad | 1edf407498756e7ba36534387bb4241b8b455c4f | refs/heads/master | 2020-03-28T09:06:30.328130 | 2019-03-25T10:38:48 | 2019-03-25T10:38:48 | 148,014,386 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from unittest import TestCase
from src.insertion_sort import insertion_sort
from test.fixtures import test_cases
class BuiltInSortTest(TestCase):
def test_sort(self):
for case in test_cases:
self.assertEqual(insertion_sort(case), sorted(case),
msg=f'{case} should be {sorted(case)}')
| [
"solero93@gmail.com"
] | solero93@gmail.com |
00610e1274a588c4ba24de9621ed3e9c8cb3f68e | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/99.py | 126258ad6d501e7e292a6de1f96af9a0b43e54a6 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 169 | py | def word_count(input):
counts = {}
for word in input.split():
if not word in counts:
counts[word] = 1
else:
counts[word] = counts[word] + 1
return counts
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1a4b29df1d245f93801fe603eb728d890f2ba45e | adc6d8ee596e4710c3241332758bb6990bdd8914 | /Trabajo de grado_final/Anexos/Codigos/test_todoRE.py | b3e6bc0cdb7029307a37568546836197b0bdb655 | [] | no_license | NatalyTinoco/Trabajo-de-grado_Artefactos | cf9491c47a8a23ce5bab7c52498093a61319f834 | 5cc4e009f94c871c7ed0d820eb113398ac66ec2f | refs/heads/master | 2022-03-20T00:51:48.420253 | 2019-11-24T19:10:40 | 2019-11-24T19:10:40 | 197,964,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,752 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 5 12:53:35 2019
@author: Daniela y Nataly
Descripción del código: Código donde se une toda la metodología para identificación de Reflejos especulares (mRE).
*Entrada: imagePath1(ruta de la imagen)
*Salida: predic (predicción del clasificador (1-RE y 0-OTROS)), original_2(imagen original), imDU_2 (Imagen sin etiquetas),umbrImage (mascara con los RE reales),
original_3 (imagen con bbox graficados, ubicación de los artefactos) y bboxre (posición de los bbox).
"""
import cv2
import pickle
import pandas as pd
from Normalizacion import normalizacionMaxMin
from equalization import adaptativeequalization
from rOI import ROI
from ventanIDEA import ventanIDEA
from caracRE import caracRe
fileOpen = 'model_pickle'
with open(fileOpen,'rb') as f:
mpRE = pickle.load(f)
def test_all_RE(imagePath):
original = cv2.imread(imagePath)
original_2 =original.copy()
original_3=original.copy()
imNorm = normalizacionMaxMin(original)
imEqu = adaptativeequalization(imNorm)
imDR = imEqu.copy()
roiImage = ROI(imEqu)
for z in range(3):
imDR[:,:,z]=imEqu[:,:,z]*roiImage
imDR_2=original_2.copy()
for z in range(3):
imDR_2[:,:,z]=original_2[:,:,z]*roiImage
imDU_2=imDR_2.copy()
imDU = imDR.copy()
umbrImage = ventanIDEA(imDR,roiImage)
for z in range(3):
imDU[:,:,z]=imDR[:,:,z]*umbrImage
try:
contours,hierachy = cv2.findContours(umbrImage,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
_,contours,_ = cv2.findContours(umbrImage,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
predic=[]
bboxre=[],[],[],[],[]
if len(contours)==0:
pred=0
else:
for c in range(len(contours)):
cnt = contours[c]
x,y,w,h = cv2.boundingRect(cnt)
cropped2 = imDR[int(y):int(y+h),int(x):int(x+w)]
brillo,contraste,desvi=caracRe(cropped2)
carac=pd.DataFrame({'contrastB':contraste,'desviacionB':desvi,'Brillo':brillo},index =['1'])
pred=int(mpRE.predict(carac))
if pred == 1:
umbrImage[int(y):int(y+h),int(x):int(x+w)] = umbrImage[int(y):int(y+h),int(x):int(x+w)]
cv2.rectangle(original_3,(int(x),int(y)),(int(x+w),int(y+h)),(0,0,255),2)
bboxre[0].append(1)
bboxre[1].append(int(x))
bboxre[2].append(int(y))
bboxre[3].append(int(w+x))
bboxre[4].append(int(h+y))
else:
umbrImage[int(y):int(y+h),int(x):int(x+w)] = 0
predic.append(pred)
return predic, original_2 , imDU_2,umbrImage,original_3,bboxre
| [
"51056570+NatalyTinoco@users.noreply.github.com"
] | 51056570+NatalyTinoco@users.noreply.github.com |
ddd21339f6ece5a9cfaea4ecafc2c753eb9eee93 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Adaptiv XVA/FPythonCode/HedgingCostHooksTemplate.py | 9674f7e8d046ad05bf4c5c7fab47c7b123fe95f4 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | """ Compiled: 2020-09-18 10:38:50 """
#__src_file__ = "extensions/cva/adaptiv_xva/./etc/HedgingCostHooksTemplate.py"
'''---------------------------------------------------------------------
All functions in HedgingCostHooksTemplate can be
overridden in a HedgingCostHooks module. To do so, create a module called HedgingCostHooks
(or rename the HedgingCostHooksTemplate to HedgingCostHooks) and copy the function
declaration of the function you want to override into it.
---------------------------------------------------------------------'''
import acm
context = acm.GetDefaultContext()
def CreditDeskCounterParty():
return 'HedgingCost Credit sales desk'
def IsHedgingCostCandidate(trade):
"""Filter to enable Request HedgingCost"""
hasCreditBalance = trade.CreditBalance() != None
isStatusSimulated = trade.Status() == 'Simulated'
return hasCreditBalance and isStatusSimulated
def ConfirmedTradeStatus():
"""Trade status to use after HedgingCost is confirmed"""
return 'FO Confirmed'
def CreditBalanceIncludedTrades():
''' This method creates a filter (FASQLQuery) that specifies if a trade should be included in
the Credit Balance portfolio. The filter is merged with the filter defined by the mapping in the
extension value 'creditBalanceToTrade'.
It is also possible to use a stored insert items query
Example:
1. Create a shared Insert Items query that excludes trades where Status = Simulated named "GlobalHedgingCostFilter"
2. Use this stored query in the hook
def CreditBalanceIncludedTrades():
filter = acm.FStoredASQLQuery["GlobalHedgingCostFilter"]
return filter.Query()
'''
enum = acm.FEnumeration["enum(TradeStatus)"].Enumeration("Simulated")
return acm.Filter.SimpleAndQuery("FTrade", "Status", "GREATER", enum)
def GetSuggestedHedgingCost(trade):
calculationSpaceTradeSheet = acm.Calculations().CreateCalculationSpace(context, 'FTradeSheet')
denomValue = calculationSpaceTradeSheet.CalculateValue(trade, 'Incremental CVA')
return denomValue
| [
"81222178+nenchoabsa@users.noreply.github.com"
] | 81222178+nenchoabsa@users.noreply.github.com |
581247815b035fa94285115f1360f3627bd39e61 | c012d16bbb77853bc39973d920da94b70000dc01 | /Admins/migrations/0001_initial.py | bcd34f16091d2c29f5fe1ceda6ea5f1c88c388ed | [] | no_license | OverLoadedBurden/Dynamiet | 68c7e88d900581bdd53f1e51756c14db65359fd2 | 1988bbeeb73c313cf07c750fba5adb7739445012 | refs/heads/master | 2020-11-25T01:20:34.645831 | 2020-03-14T01:17:57 | 2020-03-14T01:17:57 | 228,428,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # Generated by Django 2.2.5 on 2020-02-14 21:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('name', models.CharField(max_length=255, primary_key=True, serialize=False)),
('password', models.CharField(max_length=255)),
('isAdmin', models.BooleanField(default=False)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
]
| [
"overloadedburden@gmail.com"
] | overloadedburden@gmail.com |
c9dc89b118798fc71c4cb566d283176f2deeaf07 | 23382d130e2a020ac90d7b88ab045b34f7719565 | /setup.py | e14c95a9495bc2105d2b1bc69d26a302ae7755d6 | [
"MIT"
] | permissive | dunossauro/sfp | f2967f519017e9c9a8f95ff6dccd09293775c72a | 944e78ff453aba692ed5b6a3cf7855093d6e987a | refs/heads/master | 2021-04-06T13:03:18.403904 | 2018-06-22T17:16:44 | 2018-06-22T17:16:44 | 124,821,555 | 4 | 2 | MIT | 2018-07-10T21:32:27 | 2018-03-12T02:16:11 | Python | UTF-8 | Python | false | false | 314 | py | from setuptools import setup
setup(name='sfp',
version='0.0.1',
description='Simple Functional programming',
url='https://github.com/z4r4tu5tr4/sfp',
author='Eduardo Mendes',
author_email='mendesxeduardo@gmail.com',
license='MIT',
packages=['sfp'],
zip_safe=False)
| [
"mendesxeduardo@gmail.com"
] | mendesxeduardo@gmail.com |
5f975472428eedee10faaba92c1b0d561dcc4e86 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /PhysicsTools/PatAlgos/python/selectionLayer1/electronCountFilter_cfi.py | 80af141c59547eefb55cd53baaca26468ed67b02 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 262 | py | import FWCore.ParameterSet.Config as cms
# module to filter on the number of Electrons
countPatElectrons = cms.EDFilter("PATCandViewCountFilter",
minNumber = cms.uint32(0),
maxNumber = cms.uint32(999999),
src = cms.InputTag("cleanPatElectrons")
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
901a4aed1e3a9151da4ec4c0e44368812347fc74 | bdde5ee603138399475ef42eeca67701f0d910ae | /mycoplasma_home/views/pagelets/public/HomePagelet.py | cfb042b9323db49d62695f1ed3c8d37cf7719204 | [] | no_license | idoerg/MyDIG | 63c19f980df8246a4a0b1c4e93fdd28bf69b97e9 | 88cc8f24a5d4b248dff1aafb54713e44537c611f | refs/heads/master | 2021-01-10T20:38:46.336870 | 2012-11-30T02:45:08 | 2012-11-30T02:45:08 | 5,939,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | '''
Pagelet for the Home Page
Author: Andrew Oberlin
Date: July 23, 2012
'''
from renderEngine.PageletBase import PageletBase
from mycoplasma_home.models import Organism, OrganismWithImages, OrganismWithGenome, OrganismWithTags
class HomePagelet(PageletBase):
'''
Renders the center of the home page
Params: request -- the Django request object with the POST & GET args
Returns: Dictionary of arguments for rendering this pagelet
'''
def doProcessRender(self, request):
self.setLayout('public/home.html')
allMycoplasma = Organism.objects.filter(genus__exact="Mycoplasma").order_by('species')
allGenomes = OrganismWithGenome.objects.values_list('organism_id', flat=True).order_by('organism_id')
allImages = OrganismWithImages.objects.values_list('organism_id', flat=True).order_by('organism_id')
allTags = OrganismWithTags.objects.values_list('organism_id', flat=True).order_by('organism_id')
return {
'all_mycoplasma' : allMycoplasma,
'all_genomes' : allGenomes,
'all_images' : allImages,
'all_tags' : allTags
}
| [
"andyoberlin@gmail.com"
] | andyoberlin@gmail.com |
898f544645356394e471bd139055540ae348b4ee | 3fbd28e72606e5358328bfe4b99eb0349ca6a54f | /.history/a_games_20210607183048.py | 6937ef94493d7d66c872ea111a2a66ef9fd75b41 | [] | no_license | Tarun1001/codeforces | f0a2ef618fbd45e3cdda3fa961e249248ca56fdb | 576b505d4b8b8652a3f116f32d8d7cda4a6644a1 | refs/heads/master | 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | n= int(input())
jer=[]
for _ in range(0, n):
h, a = map(int, input().split(""))
jer.append((h, a))
def sol(jer) :
count=0
for i in range(0,len(jer)):
for j in range(0,len(jer)):
if i==j:
continue
hometeamHomejersey= jer[i][0]
awayteamAwayjersey= jer[i][1]
if hometeamHomejersey==awayteamAwayjersey:
count+=1
return c
| [
"tarunsivasai8@gmail.com"
] | tarunsivasai8@gmail.com |
d6b7ff2786b8b58ae1df6bcc85b8c85ca3d12c8c | d0f21d669a1099fe7138d763985d0c392968f93f | /tests/test_visitors/test_ast/test_imports/test_protected_import.py | d27ec451dcc7abe58ea249e088f9f20d2d4a5fc7 | [
"MIT"
] | permissive | jigi-33/wemake-python-styleguide | 3aab4f13023d3a882b19e65a9967f8abe2a72db1 | 1239a4726b91de588b20b268c47485373bf125a1 | refs/heads/master | 2021-01-08T19:24:11.454562 | 2020-02-28T20:09:58 | 2020-02-29T05:39:16 | 242,120,262 | 1 | 0 | MIT | 2020-02-29T05:39:18 | 2020-02-21T11:08:25 | Python | UTF-8 | Python | false | false | 2,032 | py | # -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.best_practices import (
ProtectedModuleMemberViolation,
ProtectedModuleViolation,
)
from wemake_python_styleguide.visitors.ast.imports import WrongImportVisitor
import_public = 'import public'
import_protected = 'import _protected'
import_from_protected = 'from _protected import something'
import_from_protected_path = 'from path._protected import something'
import_protected_from = 'from some.path import _protected'
import_from_public = 'from public import something'
import_from_public_path = 'from public.path import something'
import_protected_as_alias = 'from some.path import _protected as not_protected'
@pytest.mark.parametrize('code', [
import_public,
import_from_public,
import_from_public_path,
])
def test_correct_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that correct imports are allowed."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
import_protected,
import_from_protected,
import_from_protected_path,
])
def test_incorrect_modules_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that imports from protected modules are restricted."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ProtectedModuleViolation])
@pytest.mark.parametrize('code', [
import_protected_from,
import_protected_as_alias,
])
def test_incorrect_module_members_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that importing of protected objects is restricted."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ProtectedModuleMemberViolation])
| [
"mail@sobolevn.me"
] | mail@sobolevn.me |
be8508f0365a7e8e553b47eb14de29e6433f5c5c | 14324a581c4f22c7ea8a96cc79725cdb84960e43 | /trees/migrations/0001_initial.py | 3c8b0a061d9d005b156b6ecbbccf9c5d97f9ef14 | [] | no_license | dogger123/django-treeapi | a3f141f87bb515e4af4f820a80daf6bacc40199d | 942da122d6c9909c21321a1aea2849428ba47120 | refs/heads/master | 2020-05-22T13:20:22.798164 | 2019-05-13T06:45:13 | 2019-05-13T06:45:13 | 186,357,054 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | # Generated by Django 2.2 on 2019-05-10 11:03
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataSet',
fields=[
('dataSet_id', models.AutoField(primary_key=True, serialize=False)),
('dataSet_name', models.CharField(max_length=200, null=True)),
('dataSet_type', models.CharField(max_length=200, null=True)),
('table_name', models.CharField(max_length=200, null=True)),
('size', models.IntegerField(default=0)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='cretetime')),
],
),
migrations.CreateModel(
name='Trees',
fields=[
('tree_id', models.AutoField(primary_key=True, serialize=False)),
('tree_name', models.CharField(default=0, max_length=200)),
('tree_type', models.CharField(max_length=200, null=True)),
('tree_dict', models.TextField(default=0)),
('detpth', models.IntegerField(default=0)),
('nodes_num', models.IntegerField(default=0)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='cretetime')),
('dataSet', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='trees.DataSet')),
],
),
migrations.CreateModel(
name='Analysis',
fields=[
('analysis_id', models.AutoField(primary_key=True, serialize=False)),
('analysis_name', models.CharField(max_length=200)),
('accuracy', models.FloatField()),
('ifthen', models.TextField()),
('content', models.TextField(null=True)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='cretetime')),
('dataSet', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='trees.DataSet')),
('tree', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='trees.Trees')),
],
),
]
| [
"="
] | = |
a53aa1a02c0c228ee59d066579a76805bac6c7cf | 0ac2dbd8427971ae05a2ba80b625c5c027b6c978 | /hue.py | a3e50a905eb8ff81ede8b96487f90edd6620a9ca | [
"MIT"
] | permissive | kalliope-project/kalliope_neuron_hue | 02b783e92170cc1d23516478df8a5b1770bac759 | 12729702c7c4827f0945c6bf71ef030dee8f4058 | refs/heads/master | 2021-01-11T20:11:33.053097 | 2020-07-29T15:33:46 | 2020-07-29T15:33:46 | 79,062,591 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,534 | py | import ipaddress
import logging
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException, InvalidParameterException
from phue import Bridge
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Hue(NeuronModule):
def __init__(self, **kwargs):
super(Hue, self).__init__(**kwargs)
self.bridge_ip = kwargs.get('bridge_ip', None)
self.groups_name = kwargs.get('groups_name', None)
self.lights_name = kwargs.get('lights_name', None)
self.group_name = kwargs.get('group_name', None)
self.light_name = kwargs.get('light_name', None)
self.state = kwargs.get('state', None)
self.brightness = kwargs.get('brightness', None)
# check if parameters have been provided
if self._is_parameters_ok():
# connect to the bridge
self.b = Bridge(self.bridge_ip)
# get all groups
groups = self.b.get_group()
if self.groups_name is not None:
for group_name in self.groups_name:
# get all lights id from in the target group name
lights_ids = self._get_lights_id_by_from_group_name(groups, group_name)
# switch status of each light in the group depending on the state
logger.debug("Lights id: %s" % lights_ids)
if lights_ids is not None:
for light_id in lights_ids:
self.switch_light(int(light_id))
if self.lights_name is not None:
for light_name in self.lights_name:
# get the id of the target light by its name
light = self.b.get_light(light_name)
if light is not None:
self.switch_light(light["name"])
if self.light_name is not None:
# get the id of the target light by its name
light = self.b.get_light(self.light_name)
if light is not None:
self.switch_light(light["name"])
if self.group_name is not None:
lights_ids = self._get_lights_id_by_from_group_name(groups, self.group_name)
# switch status of each light in the group depending on the state
logger.debug("Lights id: %s" % lights_ids)
if lights_ids is not None:
for light_id in lights_ids:
self.switch_light(int(light_id))
def _is_parameters_ok(self):
# test bridge ip is set
if self.bridge_ip is None:
raise MissingParameterException("Hue neuron needs a bridge_ip")
# test if the ip is a valid ip. The following line will raise an exception
ipaddress.ip_address(self.bridge_ip)
# user must set at least one parameter that concern group or light name
if self.groups_name is None and self.lights_name is None \
and self.group_name is None and self.light_name is None:
raise MissingParameterException("Hue neuron needs at least one of following parameters: "
"group_name, light_name, groups_name, lights_name")
# test groups_name or lights_name are a list
if self.groups_name is not None:
if not isinstance(self.groups_name, list):
raise InvalidParameterException(
"Hue neuron: groups_name must be a list")
if self.lights_name is not None:
if not isinstance(self.lights_name, list):
raise InvalidParameterException(
"Hue neuron: lights_name must be a list")
# test groups_name or lights_name are a list
if self.group_name is not None:
if not isinstance(self.group_name, str):
raise InvalidParameterException(
"Hue neuron: group_name must be a string")
if self.light_name is not None:
if not isinstance(self.light_name, str):
raise InvalidParameterException(
"Hue neuron: light_name must be a string")
# test state ok
if self.state is None:
raise MissingParameterException("Hue neuron needs a state \"on\" or \"off\"")
if self.state not in ["on", "off"]:
raise InvalidParameterException("Hue: state must be \"on\" or \"off\"")
if self.brightness is not None:
r = range(0, 101)
if int(self.brightness) not in r:
raise InvalidParameterException("Hue: brightness must be in range 0:100")
return True
@staticmethod
def _get_lights_id_by_from_group_name(groups, group_name_to_find):
"""
Return a list of light ID of the group by its name
:param groups: list of group from the bridge api
:param group_name_to_find: string group to find in the list
:return: list of lights IDs
"""
lights_id = None
for group in groups:
group_id = str(group)
group_dict = groups[group_id]
if group_dict["name"] == group_name_to_find:
lights_id = group_dict["lights"]
break
return lights_id
@staticmethod
def _get_boolean_from_state(state):
if state == "on":
return True
return False
def switch_light(self, light_identifier):
"""
Call the HUE api to switch the light depending on the desired state
:param light_identifier: ID or name of the light
"""
logger.debug("HUE: Switching light %s to state %s" % (light_identifier, self.state))
boolean_state = self._get_boolean_from_state(self.state)
self.b.set_light(light_identifier, 'on', boolean_state)
if boolean_state and self.brightness is not None:
brightness_number = self.get_brightness_number_from_percent(self.brightness)
logger.debug("HUE: Set brightness to %s" % self.brightness)
self.b.set_light(light_identifier, 'bri', brightness_number)
@staticmethod
def get_brightness_number_from_percent(brightness_percent):
"""
The phue lib wants a number between 0 and 254. The neuron ask for a percent between 0 and 100.
We need to convert
:param brightness_percent: integer between 0 and 100
:return:
"""
return int(round((254 * int(brightness_percent))/100))
| [
"nico.marcq@gmail.com"
] | nico.marcq@gmail.com |
0176a59c63441c166cfeb79c09e228a5f8d8e60a | f62fd455e593a7ad203a5c268e23129473d968b6 | /murano-3.2.0/murano/tests/unit/engine/system/test_workflowclient.py | e109fe367da54e038fac1b25d37a1fb6d2067d9d | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 6,229 | py | # Copyright (c) 2016 AT&T
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from mistralclient.api import client as mistralcli
except ImportError as mistral_import_error:
mistralcli = None
import mock
from oslo_config import cfg
from murano.dsl import murano_method
from murano.dsl import murano_type
from murano.engine.system import workflowclient
from murano.tests.functional.common import utils as test_utils
from murano.tests.unit import base
CONF = cfg.CONF
rand_name = test_utils.DeployTestMixin.rand_name
class TestMistralClient(base.MuranoTestCase):
def setUp(self):
super(TestMistralClient, self).setUp()
self.mistral_client_mock = mock.Mock()
self.mistral_client_mock.client = mock.MagicMock(
spec=mistralcli.client)
self._patch_client()
self.mock_class = mock.MagicMock(spec=murano_type.MuranoClass)
self.mock_method = mock.MagicMock(spec=murano_method.MuranoMethod)
self._this = mock.MagicMock()
self._this.owner = None
self.addCleanup(mock.patch.stopall)
def _patch_client(self):
self.mock_client = mock.Mock(return_value=self.mistral_client_mock)
self.client_patcher = mock.patch.object(workflowclient.MistralClient,
'_client', self.mock_client)
self.client_patcher.start()
self.mock_create_client = mock.Mock(
return_value=self.mistral_client_mock)
self.create_client_patcher = mock.patch.object(
workflowclient.MistralClient, '_create_client',
self.mock_create_client)
self.create_client_patcher.start()
def _unpatch_client(self):
self.client_patcher.stop()
self.create_client_patcher.stop()
def test_run_with_execution_success_state(self):
test_output = '{"openstack": "foo", "__execution": "bar", "task":'\
' "baz"}'
mock_execution = mock.MagicMock(
id='123', state='SUCCESS', output=test_output)
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
output = mc.run(run_name, timeout)
for prop in ['openstack', '__execution', 'task']:
self.assertFalse(hasattr(output, prop))
self.assertEqual({}, output)
def test_run_with_execution_error_state(self):
mock_execution = mock.MagicMock(
id='123', state='ERROR', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
expected_error_msg = 'Mistral execution completed with ERROR.'\
' Execution id: {0}. Output: {1}'\
.format(mock_execution.id, mock_execution.output)
with self.assertRaisesRegexp(workflowclient.MistralError,
expected_error_msg):
mc.run(run_name, timeout)
def test_run_except_timeout_error(self):
mock_execution = mock.MagicMock(
id='123', state='TEST_STATE', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
expected_error_msg = 'Mistral run timed out. Execution id: {0}.'\
.format(mock_execution.id)
with self.assertRaisesRegexp(workflowclient.MistralError,
expected_error_msg):
mc.run(run_name, timeout)
def test_run_with_immediate_timeout(self):
mock_execution = mock.MagicMock(
id='123', state='ERROR', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
run_name = rand_name('test')
timeout = 0
mc = workflowclient.MistralClient(self._this, 'regionOne')
self.assertEqual(mock_execution.id, mc.run(run_name, timeout))
def test_upload(self):
mc = workflowclient.MistralClient(self._this, 'regionOne')
definition = rand_name('test')
self.assertIsNone(mc.upload(definition))
self.assertTrue(workflowclient.MistralClient.
_client.workflows.create.called)
@mock.patch('murano.engine.system.workflowclient.auth_utils')
def test_client_property(self, _):
self._unpatch_client()
test_mistral_settings = {
'url': rand_name('test_mistral_url'),
'project_id': rand_name('test_project_id'),
'endpoint_type': rand_name('test_endpoint_type'),
'auth_token': rand_name('test_auth_token'),
'user_id': rand_name('test_user_id'),
'insecure': rand_name('test_insecure'),
'cacert': rand_name('test_ca_cert')
}
with mock.patch('murano.engine.system.workflowclient.CONF')\
as mock_conf:
mock_conf.mistral = mock.MagicMock(**test_mistral_settings)
region_name = rand_name('test_region_name')
mc = workflowclient.MistralClient(self._this, region_name)
mistral_client = mc._client
self.assertIsNotNone(mistral_client)
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
9afe0a1a5bce3f2082734a16953c0bbd764400f5 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /7_graph/二分图/无权二部图最大匹配问题/lcp4覆盖.py | 61be67685f06f8a62381a377bfa0e43cbe726510 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from typing import List
from 匈牙利算法 import Hungarian
DIR4 = [(0, 1), (1, 0), (0, -1), (-1, 0)]
class Solution:
def domino(self, row: int, col: int, broken: List[List[int]]) -> int:
H = Hungarian()
grid = [[0] * col for _ in range(row)]
for r, c in broken:
grid[r][c] = 1
for r in range(row):
for c in range(col):
if grid[r][c] == 1 or (r + c) & 1:
continue
cur = r * col + c
for dr, dc in DIR4:
nr, nc = r + dr, c + dc
if 0 <= nr < row and 0 <= nc < col and grid[nr][nc] == 0:
H.addEdge(cur, nr * col + nc)
return len(H.work())
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
8fe1be9af65e823f2f90637c4f4eac341e0fec56 | 70d39e4ee19154a62e8c82467ef75b601e584738 | /devops/terminate_exact_ec2.py | 631b42526a94ae6b2852b48145767358a038e527 | [
"Apache-2.0"
] | permissive | babywyrm/sysadmin | 6f2724be13ae7e5b9372278856a8c072073beffb | 2a5f3d29c7529bc917d4ff9be03af30ec23948a5 | refs/heads/master | 2023-08-16T03:50:38.717442 | 2023-08-16T03:05:55 | 2023-08-16T03:05:55 | 210,228,940 | 10 | 5 | null | 2023-05-01T23:15:31 | 2019-09-22T23:42:50 | PowerShell | UTF-8 | Python | false | false | 320 | py | #!/usr/bin/python3
##
##
##
########################################3
import sys
import boto3
##
##
#### _input_instance_id_to_kill_it__
##
##
ec2 = boto3.resource('ec2')
for instance_id in sys.argv[1:]:
instance = ec2.Instance(instance_id)
response = instance.terminate()
print(response)
###########
##
##
##
| [
"noreply@github.com"
] | babywyrm.noreply@github.com |
3a37b7b47d59f49963becb153844bc7c178688c7 | 1bd073f585706c31c406bceb81eb400f8ac27c1d | /tools/Polygraphy/polygraphy/tools/convert/convert.py | aa2311a379ccc4b9e4dc8acfdfe187ee41b5a4f9 | [
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | neevaco/TensorRT | 7b5e54c6a7cc6d0fc545e47ab7cf6656f23d5e19 | 650a4a6ed29403bec1a55663b48ef41a075d0b3c | refs/heads/neeva | 2023-05-29T19:20:26.431716 | 2022-08-19T23:09:26 | 2022-08-26T19:09:39 | 526,771,012 | 0 | 0 | Apache-2.0 | 2022-08-19T23:09:27 | 2022-08-19T22:49:25 | null | UTF-8 | Python | false | false | 4,377 | py | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
DataLoaderArgs,
ModelArgs,
OnnxFromTfArgs,
OnnxInferShapesArgs,
OnnxLoadArgs,
OnnxSaveArgs,
TfLoadArgs,
TrtConfigArgs,
TrtLoadEngineArgs,
TrtLoadNetworkArgs,
TrtLoadPluginsArgs,
TrtSaveEngineArgs,
)
from polygraphy.tools.base import Tool
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
trt_backend = mod.lazy_import("polygraphy.backend.trt")
class Convert(Tool):
"""
Convert models to other formats.
"""
def __init__(self):
super().__init__("convert")
def get_subscriptions(self):
return [
ModelArgs(model_opt_required=True),
TfLoadArgs(allow_artifacts=False),
OnnxFromTfArgs(),
OnnxInferShapesArgs(),
OnnxLoadArgs(allow_from_tf=True),
OnnxSaveArgs(output_opt=False),
DataLoaderArgs(), # For int8 calibration
TrtConfigArgs(),
TrtLoadPluginsArgs(),
TrtLoadNetworkArgs(),
TrtLoadEngineArgs(),
TrtSaveEngineArgs(output_opt=False),
]
def add_parser_args(self, parser):
parser.add_argument("-o", "--output", help="Path to save the converted model", required=True)
parser.add_argument(
"--convert-to",
help="The format to attempt to convert the model to."
"'onnx-like-trt-network' is EXPERIMETNAL and converts a TensorRT network to a format usable for visualization. "
"See 'OnnxLikeFromNetwork' for details. ",
choices=["onnx", "trt", "onnx-like-trt-network"],
)
onnx_args = self.arg_groups[OnnxLoadArgs].group
onnx_args.add_argument(
"--fp-to-fp16",
help="Convert all floating point tensors in an ONNX model to 16-bit precision. "
"This is *not* needed in order to use TensorRT's fp16 precision, but may be useful for other backends. "
"Requires onnxmltools. ",
action="store_true",
default=None,
)
def run(self, args):
if not args.convert_to:
_, ext = os.path.splitext(args.output)
if ext not in ModelArgs.EXT_MODEL_TYPE_MAPPING:
G_LOGGER.critical(
f"Could not automatically determine model type based on output path: {args.output}\nPlease specify the desired output format with --convert-to"
)
convert_type = ModelArgs.ModelType(ModelArgs.EXT_MODEL_TYPE_MAPPING[ext])
elif args.convert_to == "onnx-like-trt-network":
convert_type = "onnx-like-trt-network"
else:
CONVERT_TO_MODEL_TYPE_MAPPING = {"onnx": "onnx", "trt": "engine"}
convert_type = ModelArgs.ModelType(CONVERT_TO_MODEL_TYPE_MAPPING[args.convert_to])
if convert_type == "onnx-like-trt-network":
onnx_like = trt_backend.onnx_like_from_network(self.arg_groups[TrtLoadNetworkArgs].load_network())
onnx_backend.save_onnx(onnx_like, args.output)
elif convert_type.is_onnx():
model = self.arg_groups[OnnxLoadArgs].load_onnx()
if args.fp_to_fp16:
model = onnx_backend.convert_to_fp16(model)
self.arg_groups[OnnxSaveArgs].save_onnx(model, args.output)
elif convert_type.is_trt():
with self.arg_groups[TrtLoadEngineArgs].load_engine() as engine:
self.arg_groups[TrtSaveEngineArgs].save_engine(engine, args.output)
else:
G_LOGGER.critical(f"Cannot convert to model type: {convert_type}")
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.