blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e9f82bbc6dacde92d1f90f2d143e408042e520f | d15ed15aa3df11ce3bc5a007d65dc90ad7b7471d | /go.py | b6b362f5b72c686da4ac1f1525c638befe53b2e4 | [] | no_license | dansgithubuser/dansMap | 95947005c74f975355858f4b059b8913410814e9 | 48e035b1d6c308e83d5ddb5884475bfb88fb3eae | refs/heads/master | 2020-03-17T02:18:56.329812 | 2018-06-24T15:34:22 | 2018-06-24T15:34:22 | 133,185,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import os
import sys
DIR=os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(DIR, 'deps'))
import djangogo
parser=djangogo.make_parser()
args=parser.parse_args()
djangogo.main(args,
project='dansmap',
app='map',
database='map_database',
user='map_database_user',
heroku_repo='https://git.heroku.com/safe-everglades-62273.git',
heroku_url='https://safe-everglades-62273.herokuapp.com',
)
| [
"dansonlinepresence@gmail.com"
] | dansonlinepresence@gmail.com |
2da4bc5d14efc19541749e60986d62a072c681ff | 75c96e6070fb5c2473a7ae3be30a2d3c9bd9301a | /src/colormap/__init__.py | 0266932dba1e3d059d933f1638b9e05c1b604ac1 | [
"BSD-3-Clause"
] | permissive | Akronix/colormap | c727c80c52d8f0a545b4a54974569315319113e9 | e6d9a6e8bc9e3b3ac530a48577a5bee857267304 | refs/heads/master | 2020-08-05T04:16:34.328499 | 2018-12-29T15:10:18 | 2018-12-29T15:10:18 | 212,390,848 | 1 | 0 | BSD-3-Clause | 2019-10-02T16:37:32 | 2019-10-02T16:37:31 | null | UTF-8 | Python | false | false | 1,003 | py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# This file is part of the colormap software
#
# Copyright (c) 2014
#
# File author(s): Thomas Cokelaer <cokelaer@gmail.com>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# Website: https://www.github.com/cokelaer/colormap
# Documentation: http://packages.python.org/colormap
#
##############################################################################
"""main colormap module"""
from __future__ import print_function
from __future__ import division
import pkg_resources
try:
version = pkg_resources.require("colormap")[0].version
__version__ = version
except Exception:
version = ''
from .xfree86 import *
from . import colors
from .colors import *
from .get_cmap import *
c = Colormap()
colormap_names = c.colormaps + c.diverging_black
# create an alias to test_colormap methiod
test_colormap = c.test_colormap
test_cmap = c.test_colormap
| [
"cokelaer@gmail.com"
] | cokelaer@gmail.com |
d8d1fab3a4dd38f701ee8cb531edb455f731e1e9 | 368fec101daec272c8d44d592558906ee8043bc1 | /tradefed_cluster/util/ndb_util_test.py | 7d229b359db6b182129e9bc29496883828aabea4 | [
"Apache-2.0"
] | permissive | maksonlee/tradefed_cluster | 3acb0a899c073315c3e80b830784ec94a201a085 | 0568fc1d9b9dca79aed2de493955ce1adebb1d6b | refs/heads/master | 2023-08-09T17:35:46.045476 | 2023-07-21T18:54:26 | 2023-07-21T18:54:54 | 369,842,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | # Copyright 202 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ndb_util."""
import unittest
from google.cloud import ndb
from tradefed_cluster import testbed_dependent_test
from tradefed_cluster.util import ndb_util
def _MockModelRenameFooToBar(obj):
obj.bar = obj.foo
def _MockModelRenameBarToZzz(obj):
obj.zzz = obj.bar
class MockModel(ndb_util.UpgradableModel):
foo = ndb.StringProperty()
bar = ndb.StringProperty()
zzz = ndb.StringProperty()
_upgrade_steps = [
_MockModelRenameFooToBar,
_MockModelRenameBarToZzz,
]
class UpgradableModelTest(testbed_dependent_test.TestbedDependentTest):
def testUpgrade(self):
obj = MockModel(foo='foo')
obj.schema_version = 0
obj.Upgrade()
self.assertEqual(obj.zzz, 'foo')
def testUpgrade_oneVersion(self):
obj = MockModel(bar='foo')
obj.schema_version = 1
obj.Upgrade()
self.assertEqual(obj.zzz, 'foo')
def testUpgrade_latestVersion(self):
obj = MockModel(zzz='zzz')
obj.put()
obj.Upgrade()
self.assertEqual(obj.zzz, 'zzz')
def testPostGetHook(self):
obj = MockModel(foo='foo')
obj.schema_version = 0
obj.put()
obj = obj.key.get()
self.assertEqual(obj.zzz, 'foo')
if __name__ == '__main__':
unittest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
4358059093f7fef6e061d934bfa2b80593531bc6 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /jaxnerf/eval.py | 9ece8a83f7159aa0df74218fffb0c3781352ec66 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 5,263 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation script for Nerf."""
import functools
from os import path
from absl import app
from absl import flags
import flax
from flax.metrics import tensorboard
from flax.training import checkpoints
import jax
from jax import random
import numpy as np
from jaxnerf.nerf import datasets
from jaxnerf.nerf import models
from jaxnerf.nerf import utils
FLAGS = flags.FLAGS
utils.define_flags()
def main(unused_argv):
rng = random.PRNGKey(20200823)
if FLAGS.config is not None:
utils.update_flags(FLAGS)
if FLAGS.train_dir is None:
raise ValueError("train_dir must be set. None set now.")
if FLAGS.data_dir is None:
raise ValueError("data_dir must be set. None set now.")
dataset = datasets.get_dataset("test", FLAGS)
rng, key = random.split(rng)
model, init_variables = models.get_model(key, dataset.peek(), FLAGS)
optimizer = flax.optim.Adam(FLAGS.lr_init).create(init_variables)
state = utils.TrainState(optimizer=optimizer)
del optimizer, init_variables
# Rendering is forced to be deterministic even if training was randomized, as
# this eliminates "speckle" artifacts.
def render_fn(variables, key_0, key_1, rays):
return jax.lax.all_gather(
model.apply(variables, key_0, key_1, rays, False), axis_name="batch")
# pmap over only the data input.
render_pfn = jax.pmap(
render_fn,
in_axes=(None, None, None, 0),
donate_argnums=3,
axis_name="batch",
)
# Compiling to the CPU because it's faster and more accurate.
ssim_fn = jax.jit(
functools.partial(utils.compute_ssim, max_val=1.), backend="cpu")
last_step = 0
out_dir = path.join(FLAGS.train_dir,
"path_renders" if FLAGS.render_path else "test_preds")
if not FLAGS.eval_once:
summary_writer = tensorboard.SummaryWriter(
path.join(FLAGS.train_dir, "eval"))
while True:
state = checkpoints.restore_checkpoint(FLAGS.train_dir, state)
step = int(state.optimizer.state.step)
if step <= last_step:
continue
if FLAGS.save_output and (not utils.isdir(out_dir)):
utils.makedirs(out_dir)
psnr_values = []
ssim_values = []
if not FLAGS.eval_once:
showcase_index = np.random.randint(0, dataset.size)
for idx in range(dataset.size):
print(f"Evaluating {idx+1}/{dataset.size}")
batch = next(dataset)
pred_color, pred_disp, pred_acc = utils.render_image(
functools.partial(render_pfn, state.optimizer.target),
batch["rays"],
rng,
FLAGS.dataset == "llff",
chunk=FLAGS.chunk)
if jax.host_id() != 0: # Only record via host 0.
continue
if not FLAGS.eval_once and idx == showcase_index:
showcase_color = pred_color
showcase_disp = pred_disp
showcase_acc = pred_acc
if not FLAGS.render_path:
showcase_gt = batch["pixels"]
if not FLAGS.render_path:
psnr = utils.compute_psnr(((pred_color - batch["pixels"])**2).mean())
ssim = ssim_fn(pred_color, batch["pixels"])
print(f"PSNR = {psnr:.4f}, SSIM = {ssim:.4f}")
psnr_values.append(float(psnr))
ssim_values.append(float(ssim))
if FLAGS.save_output:
utils.save_img(pred_color, path.join(out_dir, "{:03d}.png".format(idx)))
utils.save_img(pred_disp[Ellipsis, 0],
path.join(out_dir, "disp_{:03d}.png".format(idx)))
if (not FLAGS.eval_once) and (jax.host_id() == 0):
summary_writer.image("pred_color", showcase_color, step)
summary_writer.image("pred_disp", showcase_disp, step)
summary_writer.image("pred_acc", showcase_acc, step)
if not FLAGS.render_path:
summary_writer.scalar("psnr", np.mean(np.array(psnr_values)), step)
summary_writer.scalar("ssim", np.mean(np.array(ssim_values)), step)
summary_writer.image("target", showcase_gt, step)
if FLAGS.save_output and (not FLAGS.render_path) and (jax.host_id() == 0):
with utils.open_file(path.join(out_dir, f"psnrs_{step}.txt"), "w") as f:
f.write(" ".join([str(v) for v in psnr_values]))
with utils.open_file(path.join(out_dir, f"ssims_{step}.txt"), "w") as f:
f.write(" ".join([str(v) for v in ssim_values]))
with utils.open_file(path.join(out_dir, "psnr.txt"), "w") as f:
f.write("{}".format(np.mean(np.array(psnr_values))))
with utils.open_file(path.join(out_dir, "ssim.txt"), "w") as f:
f.write("{}".format(np.mean(np.array(ssim_values))))
if FLAGS.eval_once:
break
if int(step) >= FLAGS.max_steps:
break
last_step = step
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
fc21f78fac993994c6c1d017982080abc3e00a18 | 762c307de73db674c214619778802b863548bf2e | /env/bin/pilfont.py | ea37f1eb1170b5b23568a42c9f4bb924effcd14d | [] | no_license | mansourgueye275/django-bloggy | 56d9d6a2131a71c20d6c341764503b76ba3a45c1 | 1b8080ad26244d3d60e20e24ad6520d7a663381b | refs/heads/master | 2021-06-23T02:16:30.301697 | 2017-09-01T18:56:23 | 2017-09-01T18:56:23 | 102,141,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | #!/home/mansour/Documents/RealPython/django-bloggy/env/bin/python3
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"zoe14@live.fr"
] | zoe14@live.fr |
3591a5c5b02eaef9d5ce5f72aab5bcfda5dc4e60 | 6febc1719503d0f9dbc97f6b1202116370391b10 | /clarico/clarico_category/controllers/main.py | 1a9f165596162d8f4442ae3d08af86cabf4b192d | [] | no_license | arshakil/Odoo-Development | 5c6a1795cd64a8ebef5abfdf7d6245804594bcd8 | df37f6e8c2f7d89cdbdb36d0a8fd501ef8bfe563 | refs/heads/master | 2022-12-11T05:17:12.123339 | 2020-07-28T07:38:58 | 2020-07-28T07:38:58 | 248,154,189 | 0 | 2 | null | 2022-12-08T03:51:50 | 2020-03-18T06:20:59 | Python | UTF-8 | Python | false | false | 587 | py | from odoo import http
from odoo.http import request
from odoo import SUPERUSER_ID
from odoo import models, fields, api
class claricoCategory(http.Controller):
@http.route(['/showcase_data'],type='json', auth='public', website=True , csrf=False, cache=30)
def category_data(self,template,limit=10):
data=request.env['product.public.category'].search([['parent_id','=',False]],limit=limit)
values = {'object':data}
return request.env.ref(template).render(values)
| [
"azizur.rahman363410@gmail.com"
] | azizur.rahman363410@gmail.com |
f2ab634a4d0e2b54bbdbd6cb5b20849ba87ef995 | 0db86f23fd8f6ff3b6119db0b7fab0f8522611f6 | /Intro_To_Python/HW17/census.py | 4eecf12f9f57d54ea047c9f2da4485ce67a78d9b | [] | no_license | justinminsk/Python_Files | 8a6d96ecc6596e19413b35758d3234900d2381b6 | 68b3815fae58bc2d7ec86bcd42d46354d8b3d2f0 | refs/heads/master | 2021-05-15T11:24:25.473043 | 2018-02-08T20:23:12 | 2018-02-08T20:23:12 | 108,322,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | from sqlite3 import *
def make_database():
dataframe = connect('census.db')
return dataframe
def make_db_table():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('CREATE TABLE Density(Province TEXT, Population INTEGER, Area REAL)')
dataframe.commit()
return df
def add_entries():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('CREATE TABLE Density(Province TEXT, Population INTEGER, Area REAL)')
dataframe.commit()
table = [
('Newfoundland and Labrador', 512930, 370501.69),
('Prince Edward Island', 135294, 5684.39),
('Nova Scotia', 908007, 52917.43),
('New Brunswick', 729498, 71355.67),
('Quebec', 7237479, 1357743.08),
('Ontario', 11410046, 907655.59),
('Manitoba', 1119583, 551937.87),
('Saskatchewan', 978933, 586561.35),
('Alberta', 2974807, 639987.12),
('British Columbia', 3907738, 926492.48),
('Yukon Territory', 28674, 474706.97),
('Northwest Territories', 37360, 1141108.37),
('Nunavut', 26745, 1925460.18),
]
for line in table:
df.execute('INSERT INTO Density VALUES (?, ?, ?)', line)
dataframe.commit()
def get_content():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('CREATE TABLE Density(Province TEXT, Population INTEGER, Area REAL)')
dataframe.commit()
table = [
('Newfoundland and Labrador', 512930, 370501.69),
('Prince Edward Island', 135294, 5684.39),
('Nova Scotia', 908007, 52917.43),
('New Brunswick', 729498, 71355.67),
('Quebec', 7237479, 1357743.08),
('Ontario', 11410046, 907655.59),
('Manitoba', 1119583, 551937.87),
('Saskatchewan', 978933, 586561.35),
('Alberta', 2974807, 639987.12),
('British Columbia', 3907738, 926492.48),
('Yukon Territory', 28674, 474706.97),
('Northwest Territories', 37360, 1141108.37),
('Nunavut', 26745, 1925460.18),
]
for line in table:
df.execute('INSERT INTO Density VALUES (?, ?, ?)', line)
dataframe.commit()
df.execute('SELECT * FROM Density')
for line in df.fetchall():
print(line)
def get_pop():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Population FROM Density')
for line in df.fetchall():
print(line)
def get_prov_lt10mill():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE Population < 1000000')
for line in df.fetchall():
print(line)
def get_prov_lt10mill_gt5mill():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE (Population < 1000000 or Population > 5000000)')
for line in df.fetchall():
print(line)
def get_prov_nlt10mill_ngt5mill():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE NOT(Population < 1000000 or Population > 5000000)')
for line in df.fetchall():
print(line)
def get_prov_landgt200th():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE Area > 200000')
for line in df.fetchall():
print(line)
def get_popden():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province, Population / Area FROM Density')
for line in df.fetchall():
print(line)
if __name__ == '__main__':
get_popden()
| [
"justin.minsk@gmail.com"
] | justin.minsk@gmail.com |
21c9c76e357ed82d65fb33410d6e55d014fba9f3 | 18df7bd3c6a4e35f93b0163b09f0bd304fd82fb9 | /conda/cli/main_run.py | 2e25b2a4149c2a1bf1f325e83c47d951004d188c | [
"BSD-3-Clause",
"MIT"
] | permissive | mitchellkrogza/conda | f1d092d913ac121e3df705decfb3b2e584bf829b | 958f4056578282ef380cdbfc09d3dd736cc5643a | refs/heads/master | 2020-03-25T06:46:56.095771 | 2018-08-02T05:28:42 | 2018-08-02T05:28:42 | 143,523,511 | 1 | 0 | null | 2018-08-04T11:34:43 | 2018-08-04T11:34:43 | null | UTF-8 | Python | false | false | 2,723 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
from os.path import abspath, join
import sys
from tempfile import NamedTemporaryFile
from ..base.context import context
from ..common.compat import ensure_binary, iteritems, on_win
from ..gateways.disk.delete import rm_rf
from ..gateways.subprocess import subprocess_call
def get_activated_env_vars():
env_location = context.target_prefix
if on_win:
env_var_map = _get_activated_env_vars_win(env_location)
else:
env_var_map = _get_activated_env_vars_unix(env_location)
env_var_map = {str(k): str(v) for k, v in iteritems(env_var_map)}
return env_var_map
def _get_activated_env_vars_win(env_location):
try:
conda_bat = os.environ["CONDA_BAT"]
except KeyError:
conda_bat = abspath(join(sys.prefix, 'condacmd', 'conda.bat'))
temp_path = None
try:
with NamedTemporaryFile('w+b', suffix='.bat', delete=False) as tf:
temp_path = tf.name
tf.write(ensure_binary(
"@%CONDA_PYTHON_EXE% -c \"import os, json; print(json.dumps(dict(os.environ)))\""
))
# TODO: refactor into single function along with code in conda.core.link.run_script
cmd_builder = [
"%s" % os.getenv('COMSPEC', 'cmd.exe'),
"/C \"",
"@SET PROMPT= ",
"&&",
"@SET CONDA_CHANGEPS1=false",
"&&",
"@CALL {0} activate \"{1}\"".format(conda_bat, env_location),
"&&",
"\"{0}\"".format(tf.name),
"\"",
]
cmd = " ".join(cmd_builder)
result = subprocess_call(cmd)
finally:
if temp_path:
rm_rf(temp_path)
assert not result.stderr, result.stderr
env_var_map = json.loads(result.stdout)
return env_var_map
def _get_activated_env_vars_unix(env_location):
try:
conda_exe = os.environ["CONDA_EXE"]
except KeyError:
conda_exe = abspath(join(sys.prefix, 'bin', 'conda'))
cmd_builder = [
"sh -c \'"
"eval \"$(\"{0}\" shell.posix hook)\"".format(conda_exe),
"&&",
"conda activate \"{0}\"".format(env_location),
"&&",
"\"$CONDA_PYTHON_EXE\" -c \"import os, json; print(json.dumps(dict(os.environ)))\"",
"\'",
]
cmd = " ".join(cmd_builder)
result = subprocess_call(cmd)
assert not result.stderr, result.stderr
env_var_map = json.loads(result.stdout)
return env_var_map
def execute(args, parser):
from .conda_argparse import _exec
env_vars = get_activated_env_vars()
_exec(args.executable_call, env_vars)
| [
"kfranz@continuum.io"
] | kfranz@continuum.io |
a007a093bbf5492cf4d74affab5f7938abe6ad6a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1171.py | fbe4a49e751c83434826e0418b4e5ba6c252d680 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #!/usr/bin/env python
T = long(raw_input()) #number of real tests
for t in range(1,T+1):
A = []
for i in range(4):
x = raw_input()
A.append([q for q in x])
x = raw_input() #get rid of trailing blank line
out = ""
dotcnt = 0
#we have the board now look for winners
for r in range(4):
if A[r].count('X') + A[r].count('T') == 4:
out = "X won"
elif A[r].count('O') + A[r].count('T') == 4:
out = "O won"
dotcnt = dotcnt + A[r].count('.')
if out:
print "Case #%i: %s"%(t,out)
continue
C=[] #check one diagonal
for r in range(4):
C.append(A[r][r]) #build the diagonal
if C.count('X') + C.count('T') == 4:
out = "X won"
elif C.count('O') + C.count('T') == 4:
out = "O won"
if out:
print "Case #%i: %s"%(t,out)
continue
C=[] #check other diagonal
for r in range(4):
c=3-r
C.append(A[r][c]) #build the diagonal
if C.count('X') + C.count('T') == 4:
out = "X won"
elif C.count('O') + C.count('T') == 4:
out = "O won"
if out:
print "Case #%i: %s"%(t,out)
continue
B = []
x = []
for c in range(4):
for r in range(4):
x.append(A[r][c])
B.append(x)
x=[]
for r in range(4):
if B[r].count('X') + B[r].count('T') == 4:
out = "X won"
elif B[r].count('O') + B[r].count('T') == 4:
out = "O won"
if out:
print "Case #%i: %s"%(t,out)
continue
if dotcnt == 0:
print "Case #%i: %s"%(t,"Draw")
else:
print "Case #%i: %s"%(t,"Game has not completed")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c1f31c8430d43e31a6eef4e68c76c7800e5e01cd | 8e246e74d91565f8040f7dffcfc43e8e72c56285 | /pixiedust/utils/dataFrameAdapter.py | f1393bd6b45c532667676e739348a2ab5e8f891f | [
"Apache-2.0"
] | permissive | EldritchJS/pixiedust | 706728e034be6281320a1d927d2acb74c3c20dc5 | 5eea4a7f061fa6278e7d21db2df18accf48c1d5e | refs/heads/master | 2021-01-21T20:29:27.262716 | 2017-05-24T02:30:29 | 2017-05-24T02:30:29 | 92,239,313 | 0 | 0 | null | 2017-05-24T02:05:31 | 2017-05-24T02:05:31 | null | UTF-8 | Python | false | false | 5,093 | py | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import re
from pyspark.sql.types import *
import pixiedust.utils.dataFrameMisc as dataFrameMisc
def createDataframeAdapter(entity):
if dataFrameMisc.isPandasDataFrame(entity):
return PandasDataFrameAdapter(entity)
elif dataFrameMisc.isPySparkDataFrame(entity):
return entity
raise ValueError("Invalid argument")
"""
Adapter interface to Spark APIs. Passed to pixiedust visualizations that expect a Spark DataFrame so they can work
with pandas dataframe with no code change.
This is Experimental, currently support only a subset of the Spark DataFrame APIs.
"""
class PandasDataFrameAdapter(object):
def __init__(self, entity):
self.entity = entity
self.sparkDF = dataFrameMisc.isPySparkDataFrame(entity);
def __getattr__(self, name):
if self.sparkDF and hasattr(self.entity, name):
return self.entity.__getattribute__(name)
if name=="schema":
return type("AdapterSchema",(),{"fields": self.getFields()})()
elif name=="groupBy":
return lambda cols: AdapterGroupBy(self.entity.groupby(cols))
elif name=="dropna":
return lambda: PandasDataFrameAdapter(pd.DataFrame(self.entity.dropna()))
elif name=="sort":
return lambda arg: self
elif name=="select":
return lambda name: PandasDataFrameAdapter(self.entity[name].reset_index())
elif name=="orderBy":
return lambda col: PandasDataFrameAdapter(self.entity.sort("agg",ascending=False))
raise AttributeError("{0} attribute not found".format(name))
def count(self):
if self.sparkDF:
return self.entity.count()
else:
return len(self.entity.index)
def take(self,num):
if self.sparkDF:
return self.entity.take(num)
else:
df = self.entity.head(num)
colNames = self.entity.columns.values.tolist()
def makeJsonRow(row):
ret = {}
for i,v in enumerate(colNames):
ret[v]=row[i]
return ret
return [makeJsonRow(self.entity.iloc[i].values.tolist()) for i in range(0,len(df.index))]
def getFields(self):
if self.sparkDF:
return self.entity.schema.fields
else:
#must be a pandas dataframe
def createObj(a,b):
return type("",(),{
"jsonValue":lambda self: {"type": b, "name": a}, "name":a,
"dataType": IntegerType() if np.issubdtype(b, np.integer) or np.issubdtype(b, np.float) else StringType()
})()
return [createObj(a,b) for a,b in zip(self.entity.columns, self.entity.dtypes)]
def getTypeName(self):
if self.sparkDF:
return self.entity.schema.typeName()
else:
return "Pandas DataFrame Row"
def toPandas(self):
if self.sparkDF:
return self.entity.toPandas()
else:
return self.entity
class AdapterGroupBy(object):
def __init__(self, group):
self.group = group
def count(self):
return PandasDataFrameAdapter(self.group.size().reset_index(name="count"))
def agg(self,exp):
m=re.search("(\w+?)\((.+?)\)(?:.+?(?:as\s+(\w*))|$)",str(exp),re.IGNORECASE)
if m is None:
raise AttributeError("call to agg with not supported expression: {0}".format(str(exp)))
funcName=m.group(1).upper()
groupedCol=m.group(2)
alias=m.group(3) or "agg"
if funcName=="SUM":
return PandasDataFrameAdapter(self.group[groupedCol].sum().reset_index(name=alias))
elif funcName=="AVG":
return PandasDataFrameAdapter(self.group[groupedCol].mean().reset_index(name=alias))
elif funcName == "MIN":
return PandasDataFrameAdapter(self.group[groupedCol].min().reset_index(name=alias))
elif funcName == "MAX":
return PandasDataFrameAdapter(self.group[groupedCol].max().reset_index(name=alias))
elif funcName == "COUNT":
return PandasDataFrameAdapter(self.group[groupedCol].count().reset_index(name=alias))
else:
raise AttributeError("Unsupported aggregation function {0}".format(funcName)) | [
"david_taieb@us.ibm.com"
] | david_taieb@us.ibm.com |
34f18b43569133bf9e8e85b99fd90377e982c9b2 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Facebook/Publishing/PublishNote.py | 5cb13e2c67897b548f536ca97016a58a44a8c195 | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# PublishNote
# Publishes a note on a given profile.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class PublishNote(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the PublishNote Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Facebook/Publishing/PublishNote')
def new_input_set(self):
return PublishNoteInputSet()
def _make_result_set(self, result, path):
return PublishNoteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PublishNoteChoreographyExecution(session, exec_id, path)
class PublishNoteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the PublishNote
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_Message(self, value):
"""
Set the value of the Message input for this Choreo. ((required, string) The contents of the note.)
"""
InputSet._set_input(self, 'Message', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the profile that the note will be published to. Defaults to "me" indicating the authenticated user.)
"""
InputSet._set_input(self, 'ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) A subject line for the note being created.)
"""
InputSet._set_input(self, 'Subject', value)
class PublishNoteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the PublishNote Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
def getFacebookObjectId(self):
"""
Get the ID of the object that has been created
"""
return self.getJSONFromString(self._output.get('Response', [])).get("id", [])
class PublishNoteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PublishNoteResultSet(response, path)
| [
"cedric.warny@gmail.com"
] | cedric.warny@gmail.com |
b4724b82e389ae6ab9ab29dfe11c7ad6f6d3f090 | ea4e262f3dc18a089895fef08bedefc60b66e373 | /unsupervised_learning/0x04-autoencoders/3-variational.py | e87abd498de0857be678fa67f68543ab58e9abff | [] | no_license | d1sd41n/holbertonschool-machine_learning | 777899d4914e315883ba0c887d891c0c8ab01c8a | 5f86dee95f4d1c32014d0d74a368f342ff3ce6f7 | refs/heads/main | 2023-07-17T09:22:36.257702 | 2021-08-27T03:44:24 | 2021-08-27T03:44:24 | 317,399,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,502 | py | #!/usr/bin/env python3
"""[summary]
Returns:
[type]: [description]
"""
import tensorflow.keras as keras
def autoencoder(input_dims, hidden_layers, latent_dims):
"""[summary]
Args:
input_dims ([type]): [description]
hidden_layers ([type]): [description]
latent_dims ([type]): [description]
Returns:
[type]: [description]
"""
backend = keras.backend
def s_a(args):
z_mean, z_log_sigma = args
batch = backend.shape(
z_mean
)[0]
epsilon = backend.random_normal(
shape=(batch, latent_dims),
mean=0.0,
stddev=0.1)
return z_mean + backend.exp(
z_log_sigma) * epsilon
encoder_In = keras.Input(
shape=(
input_dims,))
encoder = encoder_In
for nodes in hidden_layers:
encoder = keras.layers.Dense(
nodes,
activation='relu'
)(encoder)
z_mean = keras.layers.Dense(
latent_dims)(
encoder)
z_log_sigma = keras.layers.Dense(
latent_dims
)(encoder)
z = keras.layers.Lambda(
s_a)([z_mean, z_log_sigma]
)
decoder_In = keras.Input(
shape=(latent_dims,
))
decoder = decoder_In
for nodes in hidden_layers[::-1]:
decoder = keras.layers.Dense(
nodes,
activation='relu'
)(
decoder)
decoder = keras.layers.Dense(
input_dims,
activation='sigmoid'
)(
decoder)
encoder = keras.Model(encoder_In,
[z, z_mean, z_log_sigma]
)
decoder = keras.Model(
decoder_In,
decoder)
out = decoder(
encoder(
encoder_In))
auto = keras.Model(
encoder_In,
out)
def cost_f(val1, val2):
reconstruction_loss = keras.losses.binary_crossentropy(
encoder_In,
out
)
reconstruction_loss *= input_dims
kl_loss = 1 + z_log_sigma
kl_loss = kl_loss - backend.square(
z_mean) - backend.exp(
z_log_sigma
)
kl_loss = backend.sum(
kl_loss,
axis=-1)
kl_loss *= -0.5
cost_f = backend.mean(
reconstruction_loss + kl_loss
)
return cost_f
auto.compile(
optimizer='adam',
loss=cost_f
)
return encoder, decoder, auto
| [
"1498@holbertonschool.com"
] | 1498@holbertonschool.com |
59916d167f39c2a441b8c66f11211e9e2010d2c8 | 60cdd2f763e8ebd19eae1392a1533ce889123ba2 | /main.py | 10661e422cb540c411bf2c3e0779cff557d333e9 | [] | no_license | shohei/logic_analyzer | 79fd7d88be421cac7989369ef640e51e548b9f1a | 2d662d9e81aacd1c81b0a5d389c891b7ebbca466 | refs/heads/master | 2020-05-21T10:11:58.469328 | 2017-04-05T14:03:42 | 2017-04-05T14:03:42 | 69,441,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,718 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from ctypes import *
from numpy import *
from dwfconstants import *
import math
import sys
import matplotlib.pyplot as plt
import pdb
from decoder import decodemap
if __name__=="__main__":
f = open("record.csv", "w")
if sys.platform.startswith("win"):
dwf = cdll.dwf
elif sys.platform.startswith("darwin"):
dwf = cdll.LoadLibrary("/Library/Frameworks/dwf.framework/dwf")
else:
dwf = cdll.LoadLibrary("libdwf.so")
#declare ctype variables
hdwf = c_int()
sts = c_byte()
#print DWF version
version = create_string_buffer(16)
dwf.FDwfGetVersion(version)
print ("DWF Version: "+version.value)
#open device
print ("Opening first device")
dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))
if hdwf.value == hdwfNone.value:
print ("failed to open device")
quit()
print ("Configuring Digital Out / In...")
# generate counter
# generate on DIO-0 1MHz pulse (100MHz/25/(3+1)), 25% duty (3low 1high)
#1kHz
# dwf.FDwfDigitalOutEnableSet(hdwf, c_int(i), c_int(1))
# dwf.FDwfDigitalOutDividerSet(hdwf, c_int(i), c_int(25))
# dwf.FDwfDigitalOutCounterSet(hdwf, c_int(i), c_int(3), c_int(1))
# for i in range(0, 16):
for i in range(0, 1):
dwf.FDwfDigitalOutEnableSet(hdwf, c_int(i), c_int(1))
dwf.FDwfDigitalOutDividerSet(hdwf, c_int(i), c_int(25*1000)) #1MHz -> 1kHz
dwf.FDwfDigitalOutCounterSet(hdwf, c_int(i), c_int(3), c_int(1))
for i in range(2, 15):
dwf.FDwfDigitalOutEnableSet(hdwf, c_int(i), c_int(1))
dwf.FDwfDigitalOutDividerSet(hdwf, c_int(i), c_int(25*1000)) #1MHz -> 1kHz
dwf.FDwfDigitalOutCounterSet(hdwf, c_int(i), c_int(4), c_int(0))
dwf.FDwfDigitalOutConfigure(hdwf, c_int(1))
# set number of sample to acquire
nSamples = 1000
# nSamples = 1000
rgwSamples = (c_uint16*nSamples)()
cAvailable = c_int()
cLost = c_int()
cCorrupted = c_int()
cSamples = 0
fLost = 0
fCorrupted = 0
# in record mode samples after trigger are acquired only
# dwf.FDwfDigitalInAcquisitionModeSet(hdwf, acqmodeRecord)
dwf.FDwfDigitalInAcquisitionModeSet(hdwf, acqmodeScanScreen)
# sample rate = system frequency / divider, 100MHz/1000 = 100kHz
dwf.FDwfDigitalInDividerSet(hdwf, c_int(1*100*10)) #10kHz
# 16bit per sample format
dwf.FDwfDigitalInSampleFormatSet(hdwf, c_int(16))
# number of samples after trigger
# dwf.FDwfDigitalInTriggerPositionSet(hdwf, c_int(nSamples))
# trigger when all digital pins are low
# dwf.FDwfDigitalInTriggerSourceSet(hdwf, trigsrcDetectorDigitalIn)
# trigger detector mask: low & hight & ( rising | falling )
# dwf.FDwfDigitalInTriggerSet(hdwf, c_int(0xFFFF), c_int(0), c_int(0), c_int(0))
# 16個のピン全てでローボルテージトリガをかける
# dwf.FDwfDigitalInTriggerSet(hdwf, c_int(0xFFFF), c_int(0), c_int(0), c_int(0))
# begin acquisition
dwf.FDwfDigitalInConfigure(hdwf, c_bool(0), c_bool(1))
print ("Starting record")
plt.ion()
fig = plt.figure() # Create figure
axes = fig.add_subplot(111) # Add subplot (dont worry only one plot appears)
axes.set_autoscale_on(True) # enable autoscale
axes.autoscale_view(True,True,True)
# axes.autoscale_view(True,True,True)
hl, = plt.plot([], [])
hl.set_xdata(range(0,len(rgwSamples)))
# current_range = 0
# while cSamples < nSamples:
x = 0
y = 0
z = 0
while True:
if(cSamples == nSamples):
# current_range += len(rgwSamples)
# hl.set_xdata(range(current_range,current_range+nSamples))
# axes.relim() # Recalculate limits
# axes.autoscale_view(True,True,True) #Autoscale
# plt.draw()
# plt.pause(0.01)
for v in rgwSamples:
hexa = int(v)
x += decodemap.ix[hexa,"x"]
y += decodemap.ix[hexa,"y"]
z += decodemap.ix[hexa,"z"]
f.write("%d %d %d\n" % (x,y,z))
rgwSamples = (c_uint16*nSamples)()
cSamples = 0
dwf.FDwfDigitalInStatus(hdwf, c_int(1), byref(sts))
if cSamples == 0 and (sts == DwfStateConfig or sts == DwfStatePrefill or sts == DwfStateArmed) :
# acquisition not yet started.
continue
dwf.FDwfDigitalInStatusRecord(hdwf, byref(cAvailable), byref(cLost), byref(cCorrupted))
cSamples += cLost.value
if cLost.value:
fLost = 1
print ("Samples were lost! Reduce sample rate")
if cCorrupted.value:
print ("Samples could be corrupted! Reduce sample rate")
fCorrupted = 1
if cAvailable.value==0 :
continue
if cSamples+cAvailable.value > nSamples :
cAvailable = c_int(nSamples-cSamples)
dwf.FDwfDigitalInStatusData(hdwf, byref(rgwSamples, 2*cSamples), c_int(2*cAvailable.value))
# print cAvailable.value
cSamples += cAvailable.value
# total_pulse += len((nonzero(rgwSamples))[0])
# hl.set_ydata(rgwSamples)
# axes.relim() # Recalculate limits
# axes.autoscale_view(True,True,True) #Autoscale
# plt.draw()
# plt.pause(0.01)
#never reached
dwf.FDwfDeviceClose(hdwf)
f.close()
| [
"shoaok@gmail.com"
] | shoaok@gmail.com |
2505e320f99f41f3496e7a095506b46a630e9e81 | e1a7d00dbe27403427078c627ccebe1562a6049d | /mercury/plugin/client/activity_window.py | 800a6df0ee4cca37dbc125cb1e0e71b51922cb5f | [
"Apache-2.0"
] | permissive | greenlsi/mercury_mso_framework | f24fc167230057bb07b7de5dc9fbb10490293fee | cb425605de3341d27ce43fb326b300cb8ac781f6 | refs/heads/master | 2023-04-28T02:18:16.362823 | 2023-04-18T12:03:23 | 2023-04-18T12:03:23 | 212,610,400 | 2 | 1 | Apache-2.0 | 2023-03-02T14:36:56 | 2019-10-03T15:12:32 | Python | UTF-8 | Python | false | false | 731 | py | from ..common.event_generator import *
class SrvActivityWindowGenerator(EventGenerator[None], ABC):
pass
class ConstantSrvWindowGenerator(SrvActivityWindowGenerator, PeriodicGenerator[None]):
def __init__(self, **kwargs):
kwargs = {**kwargs, 'period': kwargs['length']}
super().__init__(**kwargs)
class UniformSrvWindowGenerator(SrvActivityWindowGenerator, UniformDistributionGenerator[None]):
pass
class GaussianSrvWindowGenerator(SrvActivityWindowGenerator, GaussianDistributionGenerator[None]):
pass
class ExponentialSrvWindowGenerator(SrvActivityWindowGenerator, ExponentialDistributionGenerator[None]):
pass
class LambdaSrvSessionDuration(LambdaDrivenGenerator[None]):
pass
| [
"rcardenas.rod@gmail.com"
] | rcardenas.rod@gmail.com |
5cb11f9692234a96b5d785b845b15d74b1f63c91 | 52b6508d4f6f38f068b27c414970aa21460a7b25 | /terraform_validator/custom_rules/ManagedPolicyOnUserRule.py | 713d17af796bee5e9815401961f5155ef3e1b3c0 | [
"MIT"
] | permissive | rubelw/terraform-validator | 97698751ed828e54b4257a378c2dd21c5ec9bf24 | a9d0335a532acdb4070e5537155b03b34915b73e | refs/heads/master | 2020-03-31T04:09:22.435690 | 2018-10-22T03:24:40 | 2018-10-22T03:24:40 | 151,893,251 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | from __future__ import absolute_import, division, print_function
import inspect
import sys
from builtins import (str)
from terraform_validator.custom_rules.BaseRule import BaseRule
def lineno():
"""Returns the current line number in our program."""
return str(' - ManagedPolicyOnUserRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))
class ManagedPolicyOnUserRule(BaseRule):
def __init__(self, cfn_model=None, debug=None):
"""
Initialize
:param cfn_model:
"""
BaseRule.__init__(self, cfn_model, debug=debug)
def rule_text(self):
"""
Get rule text
:return:
"""
if self.debug:
print('rule_text'+lineno())
return 'IAM managed policy should not apply directly to users. Should be on group'
def rule_type(self):
"""
Get rule type
:return:
"""
self.type= 'VIOLATION::FAILING_VIOLATION'
return 'VIOLATION::FAILING_VIOLATION'
def rule_id(self):
"""
Get rule id
:return:
"""
if self.debug:
print('rule_id'+lineno())
self.id ='F12'
return 'F12'
def audit_impl(self):
"""
Audit
:return: violations
"""
if self.debug:
print('ManagedPolicyOnUserRule - audit_impl'+lineno())
violating_policies = []
resources= self.cfn_model.resources_by_type('AWS::IAM::ManagedPolicy')
if len(resources)>0:
for resource in resources:
if self.debug:
print('resource: '+str(resource)+lineno())
if hasattr(resource,'users'):
if resource.users:
if self.debug:
print('users: '+str(resource.users))
if len(resource.users)>0:
violating_policies.append(str(resource.logical_resource_id))
else:
if self.debug:
print('no violating_policies' + lineno())
return violating_policies | [
"rubelwi@Wills-MacBook-Pro.local"
] | rubelwi@Wills-MacBook-Pro.local |
b318fa6c4d7fb0cbd18c7f80899aac561a1d4362 | be50b4dd0b5b8c3813b8c3158332b1154fe8fe62 | /Backtracking/Python/MaximalString.py | 72a24b627e0102f42fd6027473225816f4bd5698 | [] | no_license | Zimmermann25/InterviewBit | a8d89e090068d9644e28085625963c8ce75d3dff | 6d2138e740bd5ba8eab992d9bf090977e077bfc5 | refs/heads/main | 2023-03-24T18:12:48.244950 | 2021-03-24T14:36:48 | 2021-03-24T14:36:48 | 350,835,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | class Solution:
# @param A : string
# @param B : integer
# @return a strings
#backtrack naciagany na siłe, w cpp dla ponizszych danych program sie wywala
# a tutaj iteracyjnie wszystko git, ale nie zalicza rozwiązania
#10343456789765432457689065543876
#10
# w c++ backtracking z komentarza jest uznawany, choć dla długości A ~kilkanaści juz się
#wysypuje xdddddddd, tu działa poprawnie i nie jest uznawany :(
def solve(self, A, B):
arr = list(A)
counter = 0
i = 0
while i < len(arr):# tak jak w selection sort, ale tutaj maks B swapów
maxIndex = i
k = i+1
#print("A[maxIndex]: ", arr[i], "arr: ", arr, "i: ", i)
while k < len(arr):
if arr[k] > arr[maxIndex]: # co z >=??
maxIndex = k
if arr[k]==9:break
k+=1
if maxIndex!=i:#sprawdz, czy wykonac swap
arr[i], arr[maxIndex] = arr[maxIndex], arr[i]
counter +=1
if counter ==B:break
i +=1
return "".join(arr) | [
"noreply@github.com"
] | Zimmermann25.noreply@github.com |
187c493ffee7eea9d2b9be959e9c3f10767c80e0 | b8755b5f0b5a3c1bba1270cc8f20dc172abb0634 | /django_data/jobs/forms.py | ac34414120aaf22d30e0ee7da22da786f158b801 | [
"Apache-2.0"
] | permissive | AndersonHJB/Django_Leraning | bf44af05b0e604342fd97cb8699385461cbbb965 | 95c34057f643b234478e72665c6454ebd99cb6cd | refs/heads/main | 2023-07-12T02:47:34.289089 | 2021-08-15T10:17:32 | 2021-08-15T10:17:32 | 367,765,444 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from django.forms import ModelForm
from .models import Resume
class ResumeForm(ModelForm):
class Meta:
model = Resume
fields = ["username", "city", "phone",
"email", "apply_position", "born_address", "gender", "picture", "attachment",
"bachelor_school", "master_school", "major", "degree",
"candidate_introduction", "work_experience", "project_experience"] | [
"1432803776@qq.com"
] | 1432803776@qq.com |
5d53cc8f424a2e4af644b6cfad96ee5e9faf1337 | e831c22c8834030c22c54b63034e655e395d4efe | /Array/36-ValidSudoku.py | 021ad548ba09cc2b8417a947f7488b2aedd40882 | [] | no_license | szhmery/leetcode | a5eb1a393422b21f9fd4304b3bdc4a9db557858c | 9fcd1ec0686db45d24e2c52a7987d58c6ef545a0 | refs/heads/master | 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | from typing import List
class Solution():
def isValidSudoku(self, board: List[List[str]]) -> bool:
rows = [{} for i in range(9)]
columns = [{} for i in range(9)]
boxes = [{} for i in range(9)]
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
num = int(num)
box_index = (i // 3) * 3 + j // 3
rows[i][num] = rows[i].get(num, 0) + 1
columns[j][num] = columns[j].get(num, 0) + 1
boxes[box_index][num] = boxes[box_index].get(num, 0) + 1
if rows[i][num] > 1 or columns[j][num] > 1 or boxes[box_index][num] > 1:
return False
return True
if __name__ == '__main__':
solution = Solution()
board = [["5", "3", ".", ".", "7", ".", ".", ".", "."]
, ["6", ".", ".", "1", "9", "5", ".", ".", "."]
, [".", "9", "8", ".", ".", ".", ".", "6", "."]
, ["8", ".", ".", ".", "6", ".", ".", ".", "3"]
, ["4", ".", ".", "8", ".", "3", ".", ".", "1"]
, ["7", ".", ".", ".", "2", ".", ".", ".", "6"]
, [".", "6", ".", ".", ".", ".", "2", "8", "."]
, [".", ".", ".", "4", "1", "9", ".", ".", "5"]
, [".", ".", ".", ".", "8", ".", ".", "7", "9"]]
is_valid = solution.isValidSudoku(board)
print("Is valid? -> {}".format(is_valid))
board = [["8", "3", ".", ".", "7", ".", ".", ".", "."]
, ["6", ".", ".", "1", "9", "5", ".", ".", "."]
, [".", "9", "8", ".", ".", ".", ".", "6", "."]
, ["8", ".", ".", ".", "6", ".", ".", ".", "3"]
, ["4", ".", ".", "8", ".", "3", ".", ".", "1"]
, ["7", ".", ".", ".", "2", ".", ".", ".", "6"]
, [".", "6", ".", ".", ".", ".", "2", "8", "."]
, [".", ".", ".", "4", "1", "9", ".", ".", "5"]
, [".", ".", ".", ".", "8", ".", ".", "7", "9"]]
is_valid = solution.isValidSudoku(board)
print("Is valid? -> {}".format(is_valid))
| [
"szhmery@gmail.com"
] | szhmery@gmail.com |
d7278abc6eb7faec64da695befeefb216e2a9c29 | ed12b604e0626c1393406d3495ef5bbaef136e8a | /Iniciante/Python/exercises from 1000 to 1099/exercise_1017.py | b2aa81cfeb034d2763bee49764fc7b6004986d30 | [] | no_license | NikolasMatias/urionlinejudge-exercises | 70200edfd2f9fc3889e024dface2579b7531ba65 | ca658ee8b2100e2b687c3a081555fa0770b86198 | refs/heads/main | 2023-09-01T20:33:53.150414 | 2023-08-21T07:07:32 | 2023-08-21T07:07:32 | 361,160,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def calculateLitros(horasViagem, velocidadeMedia):
distancia = velocidadeMedia*horasViagem
qtdeLitros = distancia / 12.0
print("{:.3f}".format(qtdeLitros))
calculateLitros(int(input()), int(input())) | [
"nikolas.matias500@gmail.com"
] | nikolas.matias500@gmail.com |
235f90dbc5a2bfaf134a72fb2b5c6503e62e0fcc | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/sklearn/covariance/_shrunk_covariance.py | 38d8f9e44a41fcd0a6b9e914976237455456d8e5 | [] | no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:7eb0d2a7c9800d929396246a06bff3d5f7db239c9aa22020464f240899bb46d2
size 21096
| [
"golubstrazh@gmail.com"
] | golubstrazh@gmail.com |
0a4ecd1ec7cc3e33c5458bf9b06e84e770f48c95 | cad46af6291d48f3b3d7cc4fdf4357cae7243803 | /SDscript_Butterworth_GD_edit.py | 80e98c5a4e8ae15238fc2d74482d39e3f8ff4589 | [] | no_license | gddickinson/flika_scripts | f0977ff8911ba445a38db834a69cd7171c9bacf8 | 429de8bafc64e7a1e4a9f7828f7db38661cfe6d2 | refs/heads/master | 2023-06-23T16:24:13.843686 | 2023-06-15T21:34:59 | 2023-06-15T21:34:59 | 173,676,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | # Instructions:
# First, open a window.
# Trim the window in time.
# Then black level subtract: 90 for LLS, 520 for TIRF.
# Set parameters.
# Run script.
####################################
### Set post-filter type ####
####################################
#postFilterType = 'butterworth'
postFilterType = 'savgol'
################################
#### Parameters ############
################################
sigma = 2 # Change this number if you want to vary the sigma of the gaussian blur
sampling_interval = 10 # frame duration in ms
q = 0.06 # for LLS: 10ms frame duration, q=0.02; 20ms frame duration, q=0.05
# for TIRF: 10ms frame duration, q=0.1902
#Butterworth filter options
low_cutoff = 1 # Hz
high_cutoff = 20 # Hz
filter_order = 3 # Increasing filter order increases the steepness of the filter rolloff
#Sav-Gol filter options
window_length = 21 # The length of the filter window (i.e. the number of coefficients). Must be a positive odd integer.
polyorder = 5 # The order of the polynomial used to fit the samples. polyorder must be less than window_length.
#Convolution filter options
boxcar_width = 150 # boxcar width in terms of ms
#######################################
## Run after specifying parameters ###
#######################################
from scipy.ndimage.filters import convolve
sampling_rate = 1/(sampling_interval/1000) # in Hz
try:
assert high_cutoff <= .5 * sampling_rate
except AssertionError:
print('High Frequency Cutoff is above the Nyquist frequency. Lower your high frequency cutoff')
high_cutoff_scaled = high_cutoff / (sampling_rate/2)
low_cutoff_scaled = low_cutoff / (sampling_rate/2)
boxcar_frames = int(np.round(boxcar_width / sampling_interval))
#For testing
#A = np.sqrt(10) * np.random.randn(10000, 10,10) + 10
#Window(A, 'original image')
nFrames = g.win.mt
prefilter = gaussian_blur(sigma, keepSourceWindow=True)
A = prefilter.image
if postFilterType == 'butterworth':
postfilter = butterworth_filter(filter_order, low_cutoff_scaled, high_cutoff_scaled, keepSourceWindow=True)
B = postfilter.image
prefilter.close()
#postfilter.close()
Window(A, 'original image -> gaussian blur')
if postFilterType == 'savgol':
if window_length % 2 != 1 or window_length < 1:
raise TypeError("window_length size must be a positive odd number")
if window_length < polyorder + 2:
raise TypeError("window_length is too small for the polynomials order")
B = scipy.signal.savgol_filter(A, window_length, polyorder, axis=0)
Window(B, 'original image -> gaussian blur -> savgol filtered')
mean_A = convolve(A, weights=np.full((boxcar_frames,1,1),1.0/boxcar_frames))
mean_B = convolve(B, weights=np.full((boxcar_frames,1,1),1.0/boxcar_frames))
B2 = B**2 # B squared
mean_B2 = convolve(B2, weights=np.full((boxcar_frames,1,1),1.0/boxcar_frames))
variance_B = mean_B2 - mean_B**2
stdev_B = np.sqrt(variance_B)
mean_A[mean_A<0] = 0 #removes negative values
Window(stdev_B - np.sqrt(q*mean_A), 'stdev minus sqrt mean') | [
"george.dickinson@gmail.com"
] | george.dickinson@gmail.com |
c8f3b5602a77ff20e816318a011523b2773f2071 | 02a68279e0d04340de4f87f7737b351cd6da1420 | /run_auxvae.py | 9fbf2250de6204d7cac36c03e1d5a7dc69e55abb | [
"MIT"
] | permissive | lim0606/AdversarialVariationalBayes | ffdbb875bae666f06913503b1bcd417c1b4e948f | 93487ca64007c8381e1ed5fc3d131b5da751ba47 | refs/heads/master | 2020-07-25T16:06:22.420746 | 2019-09-13T21:32:57 | 2019-09-13T21:32:57 | 208,349,706 | 0 | 0 | MIT | 2019-09-13T21:31:01 | 2019-09-13T21:31:01 | null | UTF-8 | Python | false | false | 4,363 | py | import os
import scipy.misc
import numpy as np
import argparse
from avb.utils import pp
from avb import inputs
from avb.auxvae.train import train
from avb.auxvae.test import test
from avb.decoders import get_decoder
from avb.auxvae.models import get_encoder, get_encoder_aux, get_decoder_aux
import tensorflow as tf
parser = argparse.ArgumentParser(description='Train and run a avae.')
parser.add_argument("--nsteps", default=200000, type=int, help="Iterations to train.")
parser.add_argument("--learning-rate", default=1e-4, type=float, help="Learning rate of for adam.")
parser.add_argument("--ntest", default=100, type=int, help="How often to run test code.")
parser.add_argument("--batch-size", default=64, type=int, help="The size of batch images.")
parser.add_argument("--image-size", default=108, type=int, help="The size of image to use (will be center cropped).")
parser.add_argument("--output-size", default=64, type=int, help="The size of the output images to produce.")
parser.add_argument("--encoder", default="conv0", type=str, help="Architecture to use.")
parser.add_argument("--decoder", default="conv0", type=str, help="Architecture to use.")
parser.add_argument("--adversary", default="conv0", type=str, help="Architecture to use.")
parser.add_argument("--c-dim", default=3, type=int, help="Dimension of image color. ")
parser.add_argument("--z-dim", default=100, type=int, help="Dimension of latent space.")
parser.add_argument("--a-dim", default=100, type=int, help="Dimension for auxiliary variables.")
parser.add_argument("--z-dist", default="gauss", type=str, help="Prior distribution of latent space.")
parser.add_argument("--cond-dist", default="gauss", type=str, help="Conditional distribution.")
parser.add_argument("--anneal-steps", default="0", type=int, help="How many steps to use for annealing.")
parser.add_argument("--is-anneal", default=False, action='store_true', help="True for training, False for testing.")
parser.add_argument("--dataset", default="celebA", type=str, help="The name of dataset.")
parser.add_argument("--data-dir", default="data", type=str, help="Path to the data directory.")
parser.add_argument('--split-dir', default='data/splits', type=str, help='Folder where splits are found')
parser.add_argument("--log-dir", default="tf_logs", type=str, help="Directory name to save the checkpoints.")
parser.add_argument("--sample-dir", default="samples", type=str, help="Directory name to save the image samples.")
parser.add_argument("--eval-dir", default="eval", type=str, help="Directory where to save logs.")
parser.add_argument("--is-train", default=False, action='store_true', help="True for training, False for testing.")
parser.add_argument("--is-01-range", default=False, action='store_true', help="If image is constrained to values between 0 and 1.")
parser.add_argument("--test-nite", default=0, type=int, help="Number of iterations of ite.")
parser.add_argument("--test-nais", default=10, type=int, help="Number of iterations of ais.")
parser.add_argument("--test-ais-nchains", default=16, type=int, help="Number of chains for ais.")
parser.add_argument("--test-ais-nsteps", default=100, type=int, help="Number of annealing steps for ais.")
parser.add_argument("--test-ais-eps", default=1e-2, type=float, help="Stepsize for AIS.")
parser.add_argument("--test-is-center-posterior", default=False, action='store_true', help="Wether to center posterior plots.")
def main():
args = parser.parse_args()
config = vars(args)
config['gf_dim'] = 64
config['df_dim'] = 64
config['test_is_adaptive_eps'] = False
pp.pprint(config)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
decoder = get_decoder(args.decoder, config)
encoder = get_encoder(args.encoder, config)
decoder_aux = get_decoder_aux(args.encoder, config)
encoder_aux = get_encoder_aux(args.encoder, config)
if args.is_train:
x_train = inputs.get_inputs('train', config)
x_val = inputs.get_inputs('val', config)
train(encoder, decoder, encoder_aux, decoder_aux, x_train, x_val, config)
else:
x_test = inputs.get_inputs('test', config)
test(encoder, decoder, encoder_aux, decoder_aux, x_test, config)
if __name__ == '__main__':
main()
| [
"lars.mescheder@tuebingen.mpg.de"
] | lars.mescheder@tuebingen.mpg.de |
a12e92b19c21b0082dfaee4fd0e55de9baa0a579 | 963b4cf9fe1de845d994d0c8d3c9bb3def326b5b | /SomeProgs/Python Stuff/Coding Assignments/MeanMedianMode.py | 227d8e60f3814bd424ed898824903f4285954e58 | [] | no_license | lws803/cs1010_A0167 | aa727bdf029168238674d84ea6ce9c75905b8971 | 5759332364909ee1d2eb9c26b0d95d4dc153656f | refs/heads/master | 2022-03-13T02:52:26.488846 | 2019-11-14T20:53:15 | 2019-11-14T20:53:15 | 105,607,027 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # Name: Tan Tze Guang
# Class: 13S28
# Date: 26 March 2013
# This program finds the mean, median and mode of a list of numbers
def mean(List):
sums = 0 # A temporary storage for sum of numbers in list
for items in range(len(List)): # Sums all numbers in the list
sums = sums + items
mean_num = sums/len(List) # Finds the 'Mean' here
return mean_num
def median(List):
List.sort() # Changes the list into numerical order
count = len(List)
check = count % 2 # Checks whether the nuber is odd or even
if check == 0: # Even number
median_num = (List[(len(List))//2] + List[(len(List)//2)+1])/2
return median_num
if check == 1: # Odd number
median_num = List[(len(List)//2)]
return median_num
def mode(List): # Currently only can find 1 mode
# Multiple modes will cause the smaller mode to be removed
List.sort()
frequency = [] # Creates a list to store values of frequency of value
count = len(List) - 1
for items in List:
freq = 0 # Ensures that freq is reset after every loop
freq = List.count(items)
frequency.append(freq)
print("This is the current List:",List)
print("Frequency of numbers is:",frequency)
while count > 0: # This is to remove all non-mode numbers
if frequency[0] == frequency[1]:
List.pop(0)
frequency.pop(0)
elif frequency[0] > frequency[1]:
List.pop(1)
frequency.pop(1)
elif frequency[0] < frequency[1]:
List.pop(0)
frequency.pop(0)
count = count - 1
return List[0]
def main():
print("This program finds the mean,median and mode of a list of numbers.")
print("Currently, the program is only able to find 1 mode.")
print("In the case of multiple modes, the smaller mode will be removed.")
print("")
numbers = [8,6,7,9,9,6,4,4,6,8,9,9,9,8,7,7,6]
print("The list has",len(numbers),"numbers")
print()
mean_number = mean(numbers)
print("The mean of this list of numbers is",mean_number)
print()
median_number = median(numbers)
print("The median of this list of numbers is",median_number)
print()
mode_number = mode(numbers)
print("The mode of this list of numbers is",mode_number)
main()
| [
"omnikron96@gmail.com"
] | omnikron96@gmail.com |
7d2c62c8741ade915952b7fbf0f4c30ee5fa5b0f | ce90676fd0867aced31b86cb6b05db1b5f0f3828 | /random/tst234.py | cac1d82fe7ef0ebacd1700f1777d040e9c14b528 | [] | no_license | KoliosterNikolayIliev/Python_101_NI_Solutions | 9733c1e10967468fe48fc752532bcefacdebcfa9 | 2b878796bbdeff33590036560c0188c72f8fdb3f | refs/heads/main | 2023-08-11T01:08:19.482456 | 2021-10-03T17:33:32 | 2021-10-03T17:33:32 | 369,302,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,351 | py | import requests
import uuid
headers = {'Authorization': 'Token 201ad808f1e2dd3136777f56db2568a08fbfc219'}
# returns json of all banks in the given country
def get_banks_by_country(country):
response = requests.get(f'https://ob.nordigen.com/api/aspsps/?country={country}', headers=headers)
return response.json()
# returns the bank with the given id
def get_bank_by_id(bank_id):
response = requests.get(f'https://ob.nordigen.com/api/aspsps/{bank_id}', headers=headers)
return response.json()
def create_end_user_agreement(max_historical_days, enduser_id, aspsp_id):
"""
Use this step only if you want to specify the length of transaction history you want to retrieve.
If you skip this step, by default 90 days of transaction history will be retrieved.
:param max_historical_days: is the length of the transaction history to be retrieved, default is 90 days
:param enduser_id: is a unique end-user ID of someone who's using your services. Usually, it's UUID
:param aspsp_id: is the an id of a bank
"""
data = {'max_historical_days': max_historical_days, 'enduser_id': enduser_id, 'aspsp_id': aspsp_id}
response = requests.post('https://ob.nordigen.com/api/agreements/enduser/', headers=headers, data=data)
return response.json()
def create_requisition(enduser_id, reference, redirect, agreements, user_language=''):
"""
requisition is a collection of inputs for creating links and retrieving accounts.
For requisition API requests you will need to provide
:param enduser_id: if you made an user agreement the id should be the same as the user agreement
:param reference: additional layer of unique ID defined by you
:param redirect: URL where the end user will be redirected after finishing authentication in ASPSP
:param agreements: is an array of ID(s) from user agreement or an empty array if you didn't create
:param user_language: optional
:return:
"""
data = {
'enduser_id': enduser_id,
'reference': reference,
'redirect': redirect,
'agreements': agreements,
'user_language': user_language
}
response = requests.post('https://ob.nordigen.com/api/requisitions/', headers=headers, data=data)
return response.json()
# this is will build a link for authentication in ASPSP
def build_link(requisition_id, aspsp_id):
data = {
'aspsp_id': aspsp_id
}
response = requests.post(f'https://ob.nordigen.com/api/requisitions/{requisition_id}/links/', headers=headers,
data=data)
return response.json()
# the user's bank accounts can be listed. Pass the requisition ID to view the accounts.
def list_accounts(requisition_id):
response = requests.get(f'https://ob.nordigen.com/api/requisitions/{requisition_id}/', headers=headers)
return response.json()
"""
How to use nordigen api:
step 1: Get Access Token - https://ob.nordigen.com/
step 2: Choose a Bank - use get_banks_by_country() function to chose available banks.
step 3: Create an end-user agreement - (optional) if you want more than 90 transaction history days
use create_end_user_agreement() function
step 4: Create a requisition - user create_requisition function
step 5: Build a Link - when you created requisition you can build a link for authentication in ASPSP use
build_link() function
step 6: Access accounts - when you connected an account when you use list_accounts() function with the requisition_id
that you created you should see a accounts id's
step 7: Now when you connected an bank account you can use the functions bellow to get the data you need.
"""
def get_account_metadata(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/', headers=headers)
return response.json()
def get_account_balances(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/balances/', headers=headers)
return response.json()
def get_account_details(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/details/', headers=headers)
return response.json()
def get_account_transactions(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/transactions/', headers=headers)
return response.json() | [
"65191727+KoliosterNikolayIliev@users.noreply.github.com"
] | 65191727+KoliosterNikolayIliev@users.noreply.github.com |
0e508934c548e7e866977dbb13a82672b6180f94 | 024316d7672c7c2b2c558003c586df5116c73731 | /wavesynlib/fileutils/__init__.py | a476a38b6e9e90ed2515e926198448c35366f8d1 | [] | no_license | kaizhongkaizhong/WaveSyn | c305b03e44a961892a792a49601f625a90ae4f70 | b7918af5f66dba8c0d63cbb986465febe075cec2 | refs/heads/master | 2023-04-23T00:20:44.134876 | 2021-05-03T07:43:10 | 2021-05-03T07:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 28 00:38:46 2016
@author: Feng-cong Li
"""
import os
from os.path import dirname, join
import hy
try:
from wavesynlib.fileutils.hyutils import *
except hy.errors.HyCompilerError:
utils_path = join(dirname(__file__), 'hyutils.hy')
os.system(f'hyc {utils_path}')
from wavesynlib.fileutils.hyutils import *
| [
"xialulee@live.cn"
] | xialulee@live.cn |
f27d0a091eba208fe96474ee4959effa93451745 | a85ce270c8c67ab8a8c1bea577c4f8a0a054f8bf | /.venv/bin/jupyter-nbconvert | b7eb233280e1039b38407259910ad9d30b5b112b | [] | no_license | MohammedGhafri/data_visualization | afe2496100a5d204dcae3a8dd13bea51fe8f3c7c | c10c5dd8d1c687c5cf1f402b48178a3413c5abe1 | refs/heads/master | 2022-12-10T16:55:16.629174 | 2020-09-02T19:39:33 | 2020-09-02T19:39:33 | 292,341,273 | 0 | 0 | null | 2020-09-02T19:39:34 | 2020-09-02T16:52:04 | Python | UTF-8 | Python | false | false | 254 | #!/home/ghafri/data_visualization/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from nbconvert.nbconvertapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"eng.m.ghafri@gmail.com"
] | eng.m.ghafri@gmail.com | |
e239870c1886ba30562c9a92b0c9771ad29f59c4 | 76f4c947d5259bd8b3060fdb559b98720c670cae | /django_custom_user_model/django_custom_user_model/settings.py | ad8915fcc0ed18c9e600b175190cc90aab81ea63 | [
"MIT"
] | permissive | zkan/django-custom-user-model | 49de78f25af6c9324313aeea067a361778b5b225 | 3cb5937444b2c7a4f9f8a30621e8f2ed680dca1f | refs/heads/master | 2020-03-22T04:51:03.362817 | 2018-07-03T07:29:27 | 2018-07-03T07:29:27 | 139,525,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | """
Django settings for django_custom_user_model project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd6oy1n7!7v#7y!asiy7l1wujuhz8n)_4b+k_v*x*4d$pcr6u&n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_custom_user_model.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_custom_user_model.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"kan@prontomarketing.com"
] | kan@prontomarketing.com |
abeffd04fd1e1951cd2f585cfac2f1bbae933fdc | f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6 | /ICA_SDK/ICA_SDK/models/object_store_access.py | 2ed6681b5101d31413bd507ed8012a758430dcd6 | [] | no_license | jsialar/integrated_IAP_SDK | 5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb | c9ff7685ef0a27dc4af512adcff914f55ead0edd | refs/heads/main | 2023-08-25T04:16:27.219027 | 2021-10-26T16:06:09 | 2021-10-26T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,883 | py | # coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ICA_SDK.configuration import Configuration
class ObjectStoreAccess(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'aws_s3_temporary_upload_credentials': 'AwsS3TemporaryUploadCredentials',
'direct_upload_credentials': 'DirectUploadCredentials',
'session_id': 'str'
}
attribute_map = {
'aws_s3_temporary_upload_credentials': 'awsS3TemporaryUploadCredentials',
'direct_upload_credentials': 'directUploadCredentials',
'session_id': 'sessionId'
}
def __init__(self, aws_s3_temporary_upload_credentials=None, direct_upload_credentials=None, session_id=None, local_vars_configuration=None): # noqa: E501
"""ObjectStoreAccess - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._aws_s3_temporary_upload_credentials = None
self._direct_upload_credentials = None
self._session_id = None
self.discriminator = None
if aws_s3_temporary_upload_credentials is not None:
self.aws_s3_temporary_upload_credentials = aws_s3_temporary_upload_credentials
if direct_upload_credentials is not None:
self.direct_upload_credentials = direct_upload_credentials
if session_id is not None:
self.session_id = session_id
@property
def aws_s3_temporary_upload_credentials(self):
"""Gets the aws_s3_temporary_upload_credentials of this ObjectStoreAccess. # noqa: E501
:return: The aws_s3_temporary_upload_credentials of this ObjectStoreAccess. # noqa: E501
:rtype: AwsS3TemporaryUploadCredentials
"""
return self._aws_s3_temporary_upload_credentials
@aws_s3_temporary_upload_credentials.setter
def aws_s3_temporary_upload_credentials(self, aws_s3_temporary_upload_credentials):
"""Sets the aws_s3_temporary_upload_credentials of this ObjectStoreAccess.
:param aws_s3_temporary_upload_credentials: The aws_s3_temporary_upload_credentials of this ObjectStoreAccess. # noqa: E501
:type: AwsS3TemporaryUploadCredentials
"""
self._aws_s3_temporary_upload_credentials = aws_s3_temporary_upload_credentials
@property
def direct_upload_credentials(self):
"""Gets the direct_upload_credentials of this ObjectStoreAccess. # noqa: E501
:return: The direct_upload_credentials of this ObjectStoreAccess. # noqa: E501
:rtype: DirectUploadCredentials
"""
return self._direct_upload_credentials
@direct_upload_credentials.setter
def direct_upload_credentials(self, direct_upload_credentials):
"""Sets the direct_upload_credentials of this ObjectStoreAccess.
:param direct_upload_credentials: The direct_upload_credentials of this ObjectStoreAccess. # noqa: E501
:type: DirectUploadCredentials
"""
self._direct_upload_credentials = direct_upload_credentials
@property
def session_id(self):
"""Gets the session_id of this ObjectStoreAccess. # noqa: E501
The id of the upload session # noqa: E501
:return: The session_id of this ObjectStoreAccess. # noqa: E501
:rtype: str
"""
return self._session_id
@session_id.setter
def session_id(self, session_id):
"""Sets the session_id of this ObjectStoreAccess.
The id of the upload session # noqa: E501
:param session_id: The session_id of this ObjectStoreAccess. # noqa: E501
:type: str
"""
self._session_id = session_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectStoreAccess):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ObjectStoreAccess):
return True
return self.to_dict() != other.to_dict()
| [
"siajunren@gmail.com"
] | siajunren@gmail.com |
c066233bb8db9f3c120392b1b16cc9ce30cc4375 | fa5c0d0bc7dc3a18be3350f1b7da2068e0362afb | /duanping/save.py | 0f13959f99a200b0783a794d2a1b570a9305f1a4 | [] | no_license | earthloong/Douban_spiders | f2f1a368e97364456b226b0382817768248fabfc | 58af80e7a84d5ab970822c1ae68fbd6772e03084 | refs/heads/master | 2020-04-28T16:25:34.778998 | 2019-02-02T04:02:17 | 2019-02-02T04:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # -*- coding: utf-8 -*-
# 此程序用来抓取 的数据
import os
import csv
import json
import sys
from save_data import database
class Spider(object):
def __init__(self):
self.db = database()
def get_data(self): # 获取数据
results = []
paths = os.listdir(os.getcwd())
for path in paths:
if 'data_DB.csv' in path:
with open(path, 'rU') as f:
tmp = csv.reader(f)
for i in tmp:
# print 'i:',i
t = [x.decode('gbk', 'ignore') for x in i]
# print 't:',t
if len(t) == 11:
dict_item = {'product_number': t[0],
'plat_number': t[1],
'nick_name': t[2],
'cmt_date': t[3],
'cmt_time': t[4],
'comments': t[5],
'like_cnt': t[6],
'cmt_reply_cnt': t[7],
'long_comment': t[8],
'last_modify_date': t[9],
'src_url': t[10]}
results.append(dict_item)
else:
print '少字段>>>t:',t
return results
def save_sql(self, table_name): # 保存到sql
items = self.get_data()
all = len(items)
count = 1
for item in items:
try:
print 'count:%d | all:%d' % (count, all)
count += 1
self.db.up_data(table_name, item)
except Exception as e:
print '插入数据库错误>>>',e
pass
if __name__ == "__main__":
spider = Spider()
spider = Spider()
print u'开始录入数据'
spider.save_sql('T_COMMENTS_PUB_MOVIE') # 手动输入库名
print u'录入完毕'
spider.db.db.close()
| [
"492741071@qq.com"
] | 492741071@qq.com |
59e14cb31e210da2525806ce609826785c4b60fd | f47d52330c2f53e8bc3086c23854d3022b802866 | /split_coco.py | dc773dd40c59242d23e4433ffcdf34a4a8f41edf | [] | no_license | Qidian213/GigaVersion | 45dd31d8209d79e3b77a891a19cd6c2bbb5e2683 | 2244e3a60be800b7fbe1fde28fb10be51890ce0a | refs/heads/main | 2023-06-30T18:16:23.232348 | 2021-08-02T01:08:01 | 2021-08-02T01:08:01 | 391,779,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py |
import pycocotools.coco as coco
import json
import numpy as np
import cv2
import shutil
import random
train_val_list = json.load(open('train_val.json', 'r'))
train_list = train_val_list['train']
val_list = train_val_list['val']
train_images_coco =[]
train_annotations =[]
val_images_coco =[]
val_annotations =[]
img_num = 0
ann_num = 0
coco_data = coco.COCO('VISIBLE_COCO.json')
categories = coco_data.dataset['categories']
print(categories)
images = coco_data.getImgIds()
for img_id in images:
img_num += 1
img_info = coco_data.loadImgs(ids=[img_id])[0]
ann_ids = coco_data.getAnnIds(imgIds=[img_id])
img_anns = coco_data.loadAnns(ids=ann_ids)
file_name = img_info['file_name'].split('__')[0]
if(file_name in train_list):
img_info['id'] = img_num
img_info['file_name'] = img_info['file_name']
train_images_coco.append(img_info)
for ann in img_anns:
ann['image_id'] = img_num
ann['id'] = ann_num
ann_num += 1
train_annotations.append(ann)
else:
img_info['id'] = img_num
img_info['file_name'] = img_info['file_name']
val_images_coco.append(img_info)
for ann in img_anns:
ann['image_id'] = img_num
ann['id'] = ann_num
ann_num += 1
val_annotations.append(ann)
train_data_coco={}
train_data_coco['images'] = train_images_coco
train_data_coco['categories'] = categories
train_data_coco['annotations']= train_annotations
json.dump(train_data_coco, open('visible_bbox_train.json', 'w'), indent=4)
val_data_coco={}
val_data_coco['images'] = val_images_coco
val_data_coco['categories'] = categories
val_data_coco['annotations']= val_annotations
json.dump(val_data_coco, open('visible_bbox_val.json', 'w'), indent=4)
| [
"xhx1247786632@gmail.com"
] | xhx1247786632@gmail.com |
c8666866110c40d9d6ea8c980dfedb9f87daa040 | 0444918f75705bdfa177b45fdf8b903c6b63ab88 | /examples/dymoscale_simpletest.py | c9808bdccd88959f6205aae602a08224eb1f3e49 | [
"MIT"
] | permissive | ntoll/Adafruit_CircuitPython_DymoScale | 80785aba4a67a5ab5e533b75d05c968a999d0d5e | c57e45659650bf4ffb2b33eaea7dc462f6c63cbf | refs/heads/master | 2020-07-17T16:26:21.503499 | 2019-09-03T10:48:55 | 2019-09-03T10:48:55 | 206,054,402 | 0 | 0 | MIT | 2019-09-03T10:47:33 | 2019-09-03T10:47:33 | null | UTF-8 | Python | false | false | 639 | py | import time
import board
import digitalio
import adafruit_dymoscale
# initialize the dymo scale
units_pin = digitalio.DigitalInOut(board.D3)
units_pin.switch_to_output()
dymo = adafruit_dymoscale.DYMOScale(board.D4, units_pin)
# take a reading of the current time
time_stamp = time.monotonic()
while True:
reading = dymo.weight
text = "{} g".format(reading.weight)
print(text)
# to avoid sleep mode, toggle the units pin every 2 mins.
if (time.monotonic() - time_stamp) > 120:
print('toggling units button...')
dymo.toggle_unit_button()
# reset the time
time_stamp = time.monotonic()
| [
"robots199@me.com"
] | robots199@me.com |
68002fbdfd606d9c99b4eaa029b17e29c4aed4f9 | 5f404180423f854df798ea907fd13094f1eccfae | /tests/test_tutorial_filter.py | 14e85a66403ea7c8c5236dd1e7e14af866f9428c | [
"MIT"
] | permissive | sdpython/td3a_cpp | e776bceb65285eca7f9f0400fb5f96cd8bf2393e | 1ca08907be03a09bb9cb89b2ca334b1fa5305648 | refs/heads/master | 2023-05-11T06:18:00.968817 | 2023-04-30T09:15:14 | 2023-04-30T09:15:14 | 226,640,683 | 1 | 7 | NOASSERTION | 2021-10-21T22:46:49 | 2019-12-08T09:05:21 | Cython | UTF-8 | Python | false | false | 2,262 | py | """
Unit tests for ``random_strategy``.
"""
import unittest
import numpy
from numpy.testing import assert_equal
from td3a_cpp.tutorial.experiment_cython import (
pyfilter_dmax, filter_dmax_cython,
filter_dmax_cython_optim,
cyfilter_dmax,
cfilter_dmax, cfilter_dmax2, cfilter_dmax16,
cfilter_dmax4
)
class TestTutorialFilter(unittest.TestCase):
def test_pyfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
pyfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_dmax_cython(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
filter_dmax_cython(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_dmax_cython_optim(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
filter_dmax_cython_optim(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cyfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cyfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax2(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax2(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax16(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax16(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax4(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax4(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_cfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
if __name__ == '__main__':
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
fe8a4ba65c8e91aefd4f7691f21ea64334745863 | dd87194dee537c2291cf0c0de809e2b1bf81b5b2 | /k8sclient/models/v1beta1_scale_spec.py | 57cf26a08a700d346cc6e98651b1499c8acf638a | [
"Apache-2.0"
] | permissive | Arvinhub/client-python | 3ea52640ab02e4bf5677d0fd54fdb4503ecb7768 | d67df30f635231d68dc4c20b9b7e234c616c1e6a | refs/heads/master | 2023-08-31T03:25:57.823810 | 2016-11-02T22:44:36 | 2016-11-02T22:44:36 | 73,865,578 | 1 | 0 | Apache-2.0 | 2018-10-10T12:16:45 | 2016-11-15T23:47:17 | Python | UTF-8 | Python | false | false | 3,582 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None):
"""
V1beta1ScaleSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int'
}
self.attribute_map = {
'replicas': 'replicas'
}
self._replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this V1beta1ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this V1beta1ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta1ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this V1beta1ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
a3717d40f9bcfd31bd41b77ef503e38bca83308a | 4e26d797d72678a1c14ee59522964013eef3d551 | /usuarios/admin.py | 2517a9a6bcbd0522246cf577f8e40c0106d80d1a | [] | no_license | GomesMilla/SistemaDeControle | 3def1f47793b28317b2462dc61098145c6329588 | b9e2aad12bfaa8858ea45aa9adfc3c0f879a45e8 | refs/heads/main | 2023-03-15T05:25:23.606211 | 2021-03-23T19:19:10 | 2021-03-23T19:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.contrib import admin
from usuarios.models import Usuario
admin.site.register(Usuario)
# Register your models here.
| [
"camila.adriana.gomes@outlook.com"
] | camila.adriana.gomes@outlook.com |
375e994b7829514a5c1fcb79ec62436a0367d65f | e0b50a8ff40097b9896ad69d098cbbbbe4728531 | /dcmanager/api/app.py | fca85673325aade81b61debc60690625e0a23c64 | [
"Apache-2.0"
] | permissive | aleks-kozyrev/stx-distcloud | ccdd5c76dd358b8aa108c524138731aa2b0c8a53 | a4cebb85c45c8c5f1f0251fbdc436c461092171c | refs/heads/master | 2020-03-27T11:11:09.348241 | 2018-08-27T14:33:50 | 2018-08-27T14:33:50 | 146,470,708 | 0 | 0 | Apache-2.0 | 2018-08-28T15:47:12 | 2018-08-28T15:47:08 | Python | UTF-8 | Python | false | false | 2,808 | py | # Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import pecan
from keystonemiddleware import auth_token
from oslo_config import cfg
from oslo_middleware import request_id
from oslo_service import service
from dcmanager.common import context as ctx
from dcmanager.common.i18n import _
def setup_app(*args, **kwargs):
opts = cfg.CONF.pecan
config = {
'server': {
'port': cfg.CONF.bind_port,
'host': cfg.CONF.bind_host
},
'app': {
'root': 'dcmanager.api.controllers.root.RootController',
'modules': ['dcmanager.api'],
"debug": opts.debug,
"auth_enable": opts.auth_enable,
'errors': {
400: '/error',
'__force_dict__': True
}
}
}
pecan_config = pecan.configuration.conf_from_dict(config)
# app_hooks = [], hook collection will be put here later
app = pecan.make_app(
pecan_config.app.root,
debug=False,
wrap_app=_wrap_app,
force_canonical=False,
hooks=lambda: [ctx.AuthHook()],
guess_content_type_from_ext=True
)
return app
def _wrap_app(app):
app = request_id.RequestId(app)
if cfg.CONF.pecan.auth_enable and cfg.CONF.auth_strategy == 'keystone':
conf = dict(cfg.CONF.keystone_authtoken)
# Change auth decisions of requests to the app itself.
conf.update({'delay_auth_decision': True})
# NOTE: Policy enforcement works only if Keystone
# authentication is enabled. No support for other authentication
# types at this point.
return auth_token.AuthProtocol(app, conf)
else:
return app
_launcher = None
def serve(api_service, conf, workers=1):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(conf, api_service, workers=workers)
def wait():
_launcher.wait()
| [
"scott.little@windriver.com"
] | scott.little@windriver.com |
54256491ebbf1cf309cd445c078bf5fcd3c63642 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/0278-First-Bad-Version/0278.py | ab08d44570fbedd547f7a90eca4cb6bba6e4ad05 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 307 | py | class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
l, r = 0, n
while l < r:
mid = (l + r) >> 1
if isBadVersion(mid):
r = mid
else:
l = mid + 1
return l | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
9f0de9c4bfa3b96834d1c14d7260d39cedcaddd5 | 391decb17414b32941bf43380de4d1474334c29c | /.history/Function_Login_20210907201238.py | 18e67d522775604c3f0cbd51e45425cce2f964bc | [] | no_license | leonardin999/Restaurant-Management-Systems-GUI-RMS- | b17cf910ce0955b370ab51d00d161f96a2fb5ccd | 531726a378ced78de079bfffb68a0a304cfbc328 | refs/heads/main | 2023-08-12T09:21:46.196684 | 2021-09-08T14:58:48 | 2021-09-08T14:58:48 | 403,201,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | ################################################################################
##
## BY: PHUNG HUNG BINH
## This project can be used freely for all uses, as long as they maintain the
## respective credits only in the Python scripts, any information in the visual
## interface (GUI) can be modified without any implication.
##
## There are limitations on Qt licenses if you want to use your products
## commercially, I recommend reading them on the official website:
## https://doc.qt.io/qtforpython/licenses.html
##
from main import *
## ==> GLOBALS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
## ==> COUT INITIAL MENU
count = 1
class Functions_Login(Login_Windown):
def removeTitleBar(status):
global GLOBAL_TITLE_BAR
GLOBAL_TITLE_BAR = status
def uiDefinitions(self):
## SHOW ==> DROP SHADOW
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(17)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 150))
self.ui.frame.setGraphicsEffect(self.shadow)
self.shadow1 = QGraphicsDropShadowEffect(self)
self.shadow1.setBlurRadius(17)
self.shadow1.setXOffset(0)
self.shadow1.setYOffset(0)
self.shadow1.setColor(QColor(0, 0, 0, 150))
self.ui.login_area.setGraphicsEffect(self.shadow1)
## SHOW ==> DROP SHADOW
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(17)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 150))
self.ui.frame_main.setGraphicsEffect(self.shadow)
### ==> MINIMIZE
self.ui.btn_minimize.clicked.connect(lambda: self.showMinimized())
self.ui.btn_close.clicked.connect(lambda: self.close())
| [
"89053434+leonardin999@users.noreply.github.com"
] | 89053434+leonardin999@users.noreply.github.com |
b5b4b09ec0510470ba447b1e2cd4ec172a5f9bf3 | ec551303265c269bf1855fe1a30fdffe9bc894b6 | /old/t20191017_intersection/intersection.py | c3a5a88436acc04574efeb610a63177c156cb504 | [] | no_license | GongFuXiong/leetcode | 27dbda7a5ced630ae2ae65e19d418ebbc65ae167 | f831fd9603592ae5bee3679924f962a3ebce381c | refs/heads/master | 2023-06-25T01:05:45.683510 | 2021-07-26T10:05:25 | 2021-07-26T10:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: KM
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: yangkm601@gmail.com
@software: garner
@time: 2019/10/17
@url:https://leetcode-cn.com/problems/intersection-of-two-arrays/
@desc:
349. 两个数组的交集
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入: nums1 = [1,2,2,1], nums2 = [2,2]
输出: [2]
示例 2:
输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
输出: [9,4]
说明:
输出结果中的每个元素一定是唯一的。
我们可以不考虑输出结果的顺序。
'''
import math
class Solution:
def intersection(self, nums1, nums2):
nums1 = set(nums1)
nums2 = set(nums2)
new_nums = []
for num1 in nums1:
if num1 in nums2:
new_nums.append(num1)
return new_nums
if __name__ == "__main__":
solution = Solution()
print("--------1-------")
nums1 = [1,2,2,1]
nums2 = [2,2]
res=solution.intersection(nums1,nums2)
print("res:{0}".format(res))
print("--------2-------")
nums1 = [4,9,5]
nums2 = [9,4,9,8,4]
res=solution.intersection(nums1,nums2)
print("res:{0}".format(res))
| [
"958747457@qq.com"
] | 958747457@qq.com |
f0b58de91c55f71530c50df047a31bbb1fe13f48 | 7de954bcc14cce38758463f0b160c3c1c0f7df3f | /cmsplugin_cascade/cms_plugins.py | 5910ca915a921223057b73e18cdafecb596be46f | [
"MIT"
] | permissive | pmutale/djangocms-cascade | c66210a0afad0d2783c2904732972fb8890d7614 | 066b8a1ca97d3afd8b79968a7f5af506a265095c | refs/heads/master | 2022-10-28T12:34:18.610777 | 2015-08-21T20:58:03 | 2015-08-21T20:58:03 | 41,476,721 | 0 | 0 | MIT | 2022-10-23T19:43:13 | 2015-08-27T09:05:23 | Python | UTF-8 | Python | false | false | 1,098 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from .settings import CASCADE_PLUGINS
for module in CASCADE_PLUGINS:
try:
# if a module was specified, load all plugins in module settings
module_settings = import_module('{}.settings'.format(module))
module_plugins = getattr(module_settings, 'CASCADE_PLUGINS', [])
for p in module_plugins:
try:
import_module('{}.{}'.format(module, p))
except ImportError as err:
msg = "Plugin {} as specified in {}.settings.CMSPLUGIN_CASCADE_PLUGINS could not be loaded: {}"
raise ImproperlyConfigured(msg.format(p, module, err.message))
except ImportError:
try:
# otherwise try with cms_plugins in the named module
import_module('{}.cms_plugins'.format(module))
except ImportError:
# otherwise just use the named module as plugin
import_module('{}'.format(module))
| [
"jacob.rief@gmail.com"
] | jacob.rief@gmail.com |
a36a97b7755caecb44009eb13acd970f356a1e1d | b5ef3b9da130f604f111bd469128b73e78d6ba9d | /bt5/erp5_crm/SkinTemplateItem/portal_skins/erp5_crm/Event_setTextContentFromNotificationMessage.py | f1a3d8ee8cc3c8743764af664b9b86970b517a98 | [] | no_license | soediro/erp5 | 154bb2057c4cd12c14018c1ab2a09a78b2d2386a | 3d1a8811007a363b7a43df4b295b5e0965c2d125 | refs/heads/master | 2021-01-11T00:31:05.445267 | 2016-10-05T09:28:05 | 2016-10-07T02:59:00 | 70,526,968 | 1 | 0 | null | 2016-10-10T20:40:41 | 2016-10-10T20:40:40 | null | UTF-8 | Python | false | false | 1,330 | py | portal = context.getPortalObject()
if not language:
language = context.getLanguage()
if not language:
language = portal.portal_preferences.getPreferredCustomerRelationLanguage()
notification_message = portal.portal_notifications.getDocumentValue(
language=language,
reference=reference)
if substitution_method_parameter_dict is None:
substitution_method_parameter_dict = {}
# Notification method will receive the current event under "event_value" key.
# This way notification method can return properties from recipient or follow up of the event.
substitution_method_parameter_dict.setdefault('event_value', context)
if notification_message is not None:
context.setContentType(notification_message.getContentType())
target_format = "txt"
if context.getContentType() == 'text/html':
target_format = "html"
mime, text_content = notification_message.convert(target_format,
substitution_method_parameter_dict=substitution_method_parameter_dict)
context.setTextContent(text_content)
context.setAggregateList(notification_message.getProperty('aggregate_list', []))
if not context.hasTitle():
context.setTitle(notification_message.asSubjectText(
substitution_method_parameter_dict=substitution_method_parameter_dict))
| [
"georgios.dagkakis@nexedi.com"
] | georgios.dagkakis@nexedi.com |
a0dd12ad29f566a0c62075e3ac57d306a8d68e30 | b5811a11a7d22414a5690a681cdbb6ab95e08e06 | /backend/employee/admin.py | bed287a77df0de1ac2ec6fccebbe4036c36d5f07 | [] | no_license | kausko/PULSE-X | fa2d2fe913c0d091d807b245d4946922536c699c | 88a89d3fcd20289679e3b68aa36561b24ae9ea4e | refs/heads/master | 2023-05-09T12:47:44.860620 | 2021-04-21T14:07:43 | 2021-04-21T14:07:43 | 303,328,807 | 0 | 0 | null | 2021-06-05T00:10:07 | 2020-10-12T08:37:08 | Jupyter Notebook | UTF-8 | Python | false | false | 272 | py | from django.contrib import admin
from .models import Review
@admin.register(Review)
class ReviewAdmin(admin.ModelAdmin):
list_display = ['user', 'sentiment', 'flag', 'visited', 'sarcasm', 'helpfulness', 'is_twitter', ]
list_filter = ['visited', 'is_twitter', ]
| [
"tanmaypardeshi@gmail.com"
] | tanmaypardeshi@gmail.com |
63c96727ba5c7934d8f0c298575bb0199dc6bd74 | 5f10ca2439551040b0af336fd7e07dcc935fc77d | /Binary tree/二叉树性质相关题目/110. Balanced Binary Tree.py | b38c2eadc37c9e14c688c3837e5efefee9db1c50 | [] | no_license | catdog001/leetcode2.0 | 2715797a303907188943bf735320e976d574f11f | d7c96cd9a1baa543f9dab28750be96c3ac4dc731 | refs/heads/master | 2021-06-02T10:33:41.552786 | 2020-04-08T04:18:04 | 2020-04-08T04:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,477 | py | # -*- coding: utf-8 -*-
# @Time : 2/11/2020 8:28 PM
# @Author : LI Dongdong
# @FileName: 110. Balanced Binary Tree.py
''''''
'''
题目分析
1.要求:Given a binary tree, determine if it is height-balanced.
For this problem, a height-balanced binary tree is defined as:
a binary tree in which the left and right subtrees of every node differ in height by no more than 1.
Example 1:
Given the following tree [3,9,20,null,null,15,7]:
3
/ \
9 20
/ \
15 7
Return true.
Example 2:
Given the following tree [1,2,2,3,3,null,null,4,4]:
1
/ \
2 2
/ \
3 3
/ \
4 4
Return false.
2.理解:left and right node's subtree height difference is no more than 1
3.类型:character of tree
4.确认输入输出及边界条件:
input: root with definition, no range, repeated? Y order? N
output: True/False
corner case: None -> True Only one-> True
4.方法及方法分析:top-down-dfs bottom-up-dfs
time complexity order: top-down-dfs O(N) < brute force-dfs O(NlogN)
space complexity order: top-down-dfs O(N) = brute force-dfs O(N)
'''
from collections import deque
def constructTree(nodeList): # input: list using bfs, output: root
new_node = []
for elem in nodeList: # transfer list val to tree node
if elem:
new_node.append(TreeNode(elem))
else:
new_node.append(None)
queue = deque()
queue.append(new_node[0])
resHead = queue[0]
i = 1
while i <= len(new_node) - 1: # bfs method building
head = queue.popleft()
head.left = new_node[i] # build left and push
queue.append(head.left)
if i + 1 == len(new_node): # if no i + 1 in new_node
break
head.right = new_node[i + 1] # build right and push
queue.append(head.right)
i = i + 2
return resHead
'''
A.
思路:top-down-dfs
方法:
比较每个节点的子树的最大高度
main function: scan every node, while compare max height of every node's subtree by DFS or BFS
helper function: calculate the max height of a root by DFS or BFS
time complex:
skewed tree: O(N*N),but after check the height of the first 2 subtrees, function stop,
so it is actually O(N*2) = O(N)
average: for height function, O(logN). So it was O(NlogN) for N nodes.
space complex: O(N) The recursion stack may contain all nodes if the tree is skewed.
易错点:测量高度的函数
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if not root: # corner case
return True
if abs(self.depth(root.left) - self.depth(root.right)) > 1: # check root
return False
return self.isBalanced(root.left) and self.isBalanced(root.right) # check subtree
def depth(self, root): # calculate the height of tree, input:root, output:int
if not root: # corner case
return 0
if not root.left and not root.right: # corner case
return 1
return 1 + max(self.depth(root.left), self.depth(root.right)) # dfs to accumulate depth
root = constructTree([3,9,20,None, None, 15,7])
X = Solution()
print(X.isBalanced(root))
'''
自己的写法
'''
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if not root:
return True
if abs(self.depth(root.left, 0) - self.depth(root.right, 0)) > 1:
return False
return self.isBalanced(root.left) and self.isBalanced(root.right)
def depth(self, root, numb): # input: root, output: depth
if not root:
return numb
if not root.left and root.right:
return self.depth(root.right, numb + 1)
if root.left and not root.right:
return self.depth(root.left, numb + 1)
return max(self.depth(root.left, numb + 1), self.depth(root.right, numb + 1))
'''
test code
input None - True, only one - True
input
3
/ \
9 20
/ \
15 7
root 3 9 20 15 7
root.left 9 None 15 None None
root.right 20 NOne 7 None None
abs(L-R) 1 0 9 0 0
'''
'''
B.
要返回是否平衡,就要需要目前最大深度这个中间变量,故dfs返回两个值,一个是是否平衡,一个是高度
基于求最大深度的模板修改,dfs可以返回多个性质,bottom up的思路
dfs返回是否是balanced,和height
'''
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if not root: # corner case
return True
def dfs(root): # return max height and if is balanced
if not root:
return True, 0
leftBalanced, leftH = dfs(root.left)
rightBalanced, rightH = dfs(root.right)
if abs(leftH - rightH) > 1 or not leftBalanced or not rightBalanced:
return False, max(leftH, rightH) + 1
else:
return True, max(leftH, rightH) + 1
isBalanced, maxHeight = dfs(root)
if isBalanced:
return True
else:
return False
'''
思路:bottom-up- 栈模拟递归
'''
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
depth, stack = {None: 0}, [(root, False)]
while stack:
node, visited = stack.pop()
if not node:
continue
if not visited:
stack.append((node, True))
stack.append((node.right, False))
stack.append((node.left, False))
else:
left, right = depth[node.left], depth[node.right]
if left == -1 or right == -1 or abs(left-right) > 1:
depth[node] = -1 # or return False`
else:
depth[node] = max(left, right) + 1
return depth[root] != -1
'''
test code
input None - True, only one - True
input
3
/ \
9 20
/ \
15 7
root 3 9 20 15 7
root.left 9 15
root.right
depth left 0 0 0
depth right 0 0 0
abs 1 0 0 0
return 3 1 2 1 1
'''
| [
"lidongdongbuaa@gmail.com"
] | lidongdongbuaa@gmail.com |
1289da36f9af10997b469d227d42e7e76c5f609a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/insert_20200610210502.py | c8dce3ee0bda5a8dea0ebbfa3dee6e32c5927854 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # nums is a list
# find where n is to be inserted
# soo,you loop through the array
# the array is sorted
# to know the position you should check whethere n is greater than nums[i]
# continue the loop as you check
def Insert(nums,n):
i = 0
while i < len(nums):
if n != nums[i]:
if n > nums[i]:
i +=1
# print(i-1)
else:
print(i+1)
return i+
else:
print(i)
return i
Insert([1,3,4,6],5)
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
00b249fc818e6a25ba0fe40b06a35ccf0a5fb550 | 237162607427106ae9564670d47427a62356861f | /core/migrations/0141_planitem_realize_every_time.py | 46c28c415ce382c8aa4bb459be31cda393ee0fd7 | [] | no_license | pitipund/basecore | 8648c1f4fa37b6e6075fd710ca422fe159ba930e | a0c20cec1e17dd0eb6abcaaa7d2623e38b60318b | refs/heads/master | 2020-09-13T20:16:02.622903 | 2019-11-20T09:07:15 | 2019-11-20T09:07:15 | 221,885,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-06-07 18:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0140_doctorgroup'),
]
operations = [
migrations.AddField(
model_name='planitem',
name='realize_every_time',
field=models.BooleanField(default=False),
),
]
| [
"longman_694@hotmail.com"
] | longman_694@hotmail.com |
3c1ab0bac6360d881bc4117a080e38bb0d5ced9e | 19d1a808c9bb3dfcbd4a5b852962e6f19d18f112 | /python/multiprocessing_lock.py | eeaf149caf3dc777cc922aeada80352bfebff0f6 | [] | no_license | dataAlgorithms/data | 7e3aab011a9a2442c6d3d54d8d4bfd4d1ce0a6d3 | 49c95a0e0d0c23d63be2ef095afff76e55d80f5d | refs/heads/master | 2020-04-15T12:45:34.734363 | 2018-04-21T10:23:48 | 2018-04-21T10:23:48 | 61,755,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | from multiprocessing import Process, Lock
def worker_with(lock, f):
with lock:
fs = open(f, "a+")
fs.write("Lock acquired via with\n")
fs.close()
def worker_no_with(lock, f):
lock.acquire()
try:
fs = open(f, "a+")
fs.write("Lock acquired directly\n")
fs.close()
finally:
lock.release()
if __name__ == "__main__":
f = "file.txt"
lock = Lock()
w = Process(target=worker_with, args=(lock, f))
nw = Process(target=worker_no_with, args=(lock, f))
w.start()
nw.start()
w.join()
nw.join() | [
"noreply@github.com"
] | dataAlgorithms.noreply@github.com |
fbdf99d5569a466f8d2cc4657e6077b12baf4099 | 97884252481ff208519194ecd63dc3a79c250220 | /pyobs/events/roofopened.py | 35b412ca265e434dcd39f53c97dcd70ec21adcbf | [
"MIT"
] | permissive | pyobs/pyobs-core | a1f30137d7f991bad4e115de38f543e59a6e30d2 | 2d7a06e5485b61b6ca7e51d99b08651ea6021086 | refs/heads/master | 2023-09-01T20:49:07.610730 | 2023-08-29T09:20:05 | 2023-08-29T09:20:05 | 174,351,157 | 9 | 3 | NOASSERTION | 2023-09-14T20:39:48 | 2019-03-07T13:41:27 | Python | UTF-8 | Python | false | false | 185 | py | from .event import Event
class RoofOpenedEvent(Event):
"""Event to be sent when the roof has finished opening."""
__module__ = "pyobs.events"
__all__ = ["RoofOpenedEvent"]
| [
"thusser@uni-goettingen.de"
] | thusser@uni-goettingen.de |
b78fb673d40631f1eb4b0d2635d17b5e2ad390eb | 05032af4b4c522d4c3ee2d70e61ee1f30fa6abf3 | /12_Accepting_user_inputs_GUI.py | 288d5c19d7bd0aa4d05cdaf185b16feaa5489bf8 | [] | no_license | tayyabmalik4/python_GUI | c2db4bd6b4f2a153e5bced69073b17240126e7d0 | 608a7e43e17a27b90239a2ebae3338ce52d7b20d | refs/heads/main | 2023-07-28T01:31:41.840077 | 2021-09-11T17:45:23 | 2021-09-11T17:45:23 | 404,079,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | # (12)*************************Accepting User Inputs in new text file in tkinter form**************************
from tkinter import *
root = Tk()
root.geometry("644x344")
def getvals():
print(f"{namevalue.get(),phonevalue.get(),gendervalue.get(),contactvalue.get(),paymentvalue.get(),foodservicevalue.get()}")
with open('12_Accepting_user_inputs_records.txt','a') as f:
f.write(f"{namevalue.get(),phonevalue.get(),gendervalue.get(),contactvalue.get(),paymentvalue.get(),foodservicevalue.get()}\n")
# -----Creating Labels
Label(root, text="Welcome to Tayyab Travels",font='comixsansms 13 bold',pady=15).grid(row=0,column=3)
name = Label(root, text='Name')
phone = Label(root, text= "phone")
gender = Label(root, text= "Gender")
contact = Label(root, text="Emergency Contect")
payment = Label(root, text="Payment Mode")
name.grid(row=1, column=2)
phone.grid(row=2, column=2)
gender.grid(row=3, column=2)
contact.grid(row=4, column=2)
payment.grid(row=5, column=2)
# ----Now Creating the variable which we store the entries
namevalue = StringVar()
phonevalue = StringVar()
gendervalue = StringVar()
contactvalue = StringVar()
paymentvalue = StringVar()
foodservicevalue = IntVar()
# -----Now Creat a Entry using Entry class for our form
nameentry =Entry(root,textvariable=namevalue)
phoneentry = Entry(root, textvariable=phonevalue)
genderentry = Entry(root, textvariable=gendervalue)
contactentry = Entry(root, textvariable=contactvalue)
paymententry = Entry(root, textvariable=paymentvalue)
# ----Now packing the entries using grid class
nameentry.grid(row=1,column=3)
phoneentry.grid(row=2,column=3)
genderentry.grid(row=3,column=3)
contactentry.grid(row=4,column=3)
paymententry.grid(row=5,column=3)
# ---creating Checkbox
foodservice = Checkbutton(text="Want to prebool your meals? ",variable= foodservicevalue)
foodservice.grid(row=6,column=3)
# ----Button and packing it and assigning it a command
Button(text="Submit to Tayyab Travels",command=getvals).grid(row=7,column=3)
root.mainloop()
| [
"mtayyabmalik99@gmail.com"
] | mtayyabmalik99@gmail.com |
fc29b47a813bb30aeb84be26305c0dd6d6477bca | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/Python_Hand-on_Solve_200_Problems/Section 17 Recursion/sum_of_list_solution.py | d50c69d3d31130293893aaab17cbfa9b1112274f | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 320 | py | # # To add a new cell, type '# %%'
# # To add a new markdown cell, type '# %% [markdown]'
# # %%
# # Write a Python program to calculate the sum of a list of numbers. (in recursion fashion)
#
# ___ list_sum num_List
# __ le. ? __ 1
# r_ ? 0
# ____
# r_ ? 0 + ? ? 1|
#
# print ? 2, 4, 5, 6, 7
#
#
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
cd2467a1fdcd3909917783859542b8cf97f59f5b | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/vm/VmSwitch.py | bcea418e58ba6b99d43c18c8a4e4364eee01b6ce | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,917 | py | """This module contains the general information for VmSwitch ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class VmSwitchConsts():
ADMIN_STATE_DISABLE = "disable"
ADMIN_STATE_ENABLE = "enable"
INT_ID_NONE = "none"
MANAGER_RHEV_M = "rhev-m"
MANAGER_SCVMM = "scvmm"
MANAGER_UNMANAGED = "unmanaged"
MANAGER_VCENTER = "vcenter"
OWN_DISCOVERED = "discovered"
OWN_MANAGED = "managed"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
VENDOR_MICROSOFT = "microsoft"
VENDOR_UNDETERMINED = "undetermined"
VENDOR_VMWARE = "vmware"
class VmSwitch(ManagedObject):
"""This is VmSwitch class."""
consts = VmSwitchConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("VmSwitch", "vmSwitch", "switch-[name]", VersionMeta.Version111j, "InputOutput", 0xfff, [], ["admin", "ls-config", "ls-config-policy", "ls-network", "pn-policy"], [u'extvmmProvider', u'extvmmSwitchSet', u'vmOrg'], [u'extvmmUpLinkPP', u'vmVnicProfInst'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disable", "enable"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"ext_key": MoPropertyMeta("ext_key", "extKey", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, 1, 33, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version221b, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version201m, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{1,40}""", [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"key_inst": MoPropertyMeta("key_inst", "keyInst", "ushort", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"manager": MoPropertyMeta("manager", "manager", "string", VersionMeta.Version201m, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["rhev-m", "scvmm", "unmanaged", "vcenter"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, 0x80, None, None, r"""[ !#$%&\(\)\*\+,\-\.:;=\?@\[\]_\{\|\}~a-zA-Z0-9]{1,16}""", [], []),
"own": MoPropertyMeta("own", "own", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, ["discovered", "managed"], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["local", "pending-policy", "policy"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x200, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x400, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"uuid": MoPropertyMeta("uuid", "uuid", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F]){8}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){4}\-([0-9a-fA-F]){12})|0""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version221b, MoPropertyMeta.READ_WRITE, 0x800, None, None, None, ["microsoft", "undetermined", "vmware"], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"extKey": "ext_key",
"fltAggr": "flt_aggr",
"id": "id",
"intId": "int_id",
"keyInst": "key_inst",
"manager": "manager",
"name": "name",
"own": "own",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"uuid": "uuid",
"vendor": "vendor",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.descr = None
self.ext_key = None
self.flt_aggr = None
self.id = None
self.int_id = None
self.key_inst = None
self.manager = None
self.own = None
self.policy_level = None
self.policy_owner = None
self.sacl = None
self.status = None
self.uuid = None
self.vendor = None
ManagedObject.__init__(self, "VmSwitch", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
f4f38c9c5d24372ddfff33125b42134aab81c2e2 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /QcswPnY2cAbrfwuWE_0.py | 7ec3b3b0c8567e7f036275c1cc969cd1b5fc4448 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | """
Create a function that filters out factorials from a list. A factorial is a
number that can be represented in the following manner:
n! = n * (n-1) * (n-2) * ... * 3 * 2 * 1
Recursively, this can be represented as:
n! = n * (n-1)!
### Examples
filter_factorials([1, 2, 3, 4, 5, 6, 7]) ➞ [1, 2, 6]
filter_factorials([1, 4, 120]) ➞ [1, 120]
filter_factorials([8, 9, 10]) ➞ []
### Notes
N/A
"""
def filter_factorials(numbers):
factorials=[1]
n=max(numbers)
temp=1
for i in range(1,n+1):
temp*=i
factorials.append(temp)
return [i for i in numbers if i in factorials]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
97e6d00f92412bbd534ab4503951218c1842d523 | 908b5e9f5246309b45cf14ea0f7f2cc39c3853f1 | /build/vrpn_client_ros/catkin_generated/pkg.installspace.context.pc.py | cab6a546b2ff257f05d4079c96aedd1f54e26bd6 | [] | no_license | crvogt/vicon_ws | 4a2cc0aa2d1403edcf9240b545a77ca9c1e038e8 | ab474b7eb127c12aefdde1d2055cc4cdce0db952 | refs/heads/master | 2021-07-15T11:43:31.987944 | 2018-03-05T17:35:25 | 2018-03-05T17:35:25 | 95,583,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/carson/vicon_ws/install/include;/opt/ros/indigo/include".split(';') if "/home/carson/vicon_ws/install/include;/opt/ros/indigo/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lvrpn_client_ros;/opt/ros/indigo/lib/libvrpn.a".split(';') if "-lvrpn_client_ros;/opt/ros/indigo/lib/libvrpn.a" != "" else []
PROJECT_NAME = "vrpn_client_ros"
PROJECT_SPACE_DIR = "/home/carson/vicon_ws/install"
PROJECT_VERSION = "0.1.1"
| [
"crvogt26@gmail.com"
] | crvogt26@gmail.com |
fe420ba34f5b3c6a319b46161e88ec9faaf9962f | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scattergeo/marker/_colorscale.py | 0d3a158256a6b78a1a74b5260a7e1f03ecdff770 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 512 | py | import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self,
plotly_name='colorscale',
parent_name='scattergeo.marker',
**kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
implied_edits={'autocolorscale': False},
role='style',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
14a3817962cd3561dba203d203fc978f818f205a | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/mgmt/commerce/models/usage_management_client_enums.py | d21d71ad515dbba392389fc344bfc79a31bdc8fe | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 1,056 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class AggregationGranularity(Enum):
daily = "Daily"
hourly = "Hourly"
| [
"me@teopeurt.com"
] | me@teopeurt.com |
d6d7c0f0a9eaba7901aa642d5230cb6d2c6d8f1f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2692/59018/260917.py | 470f6781ca1279f4085c9ea366a973489def6d5f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | def shipWithinDays(self, weights):
n=len(weights)
left=max(weights)
right=sum(weights)
res=left
while left<=right:
mid=(left+right)//2
count=0
su=0
for i in range(n):
su+=weights[i]
if su>mid:
count+=1
su=weights[i]
count+=1
if count<=D:
res=mid
right=mid-1
else:
left=mid+1
return res
info=input()[1:-1].split(',')
List=[int(y) for y in info]
D=int(input())
print(shipWithinDays(List,D)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
1e2ecbb223cef7769987b8657dce0290f53d0d56 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /numerical_analysis_backup/small-scale-multiobj/pod150_milp/connections/runsimu3_connections.py | 04598461721bb936e4e2ec41fdd27a4e6aa999ee | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,438 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition import Arch4_decompose
from arch1 import ModelSDM_arch1
from arch2_decomposition import Arch2_decompose
from arch5_decomposition import Arch5_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
n_sim = 1 # number of simulations
n_start = 3 # index of start
n_end = n_start+n_sim # index of end
time_limit_routing = 1000 # 1000
time_limit_sa = 18000
alpha = 1
beta = 0
result = np.zeros((n_sim, 15))
total_cnk = []
for i in range(n_start, n_end):
filename = 'traffic_matrix__matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [int(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
total_cnk.append(tm.flatten().astype(bool).sum())
result[i-n_start, 14] = tm.flatten().astype(bool).sum()
print "\n"
print total_cnk
print "\n"
#%% arch4
print "Architecture 4"
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 0] = m.connections_lb
result[i-n_start, 1] = m.connections_ub
result[i-n_start, 2] = m.throughput_lb
result[i-n_start, 3] = m.throughput_ub
#%% arch1
print "Architecutre 1"
m = ModelSDM_arch1(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model(mipfocus=1, timelimit=time_limit_routing,mipgap=0.01)
result[i-n_start, 4] = m.connections
result[i-n_start, 5] = m.throughput
#%% arch2
print "Architecture 2"
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 6] = m.connections_lb
result[i-n_start, 7] = m.connections_ub
result[i-n_start, 8] = m.throughput_lb
result[i-n_start, 9] = m.throughput_ub
#%% arch5
print "Architecture 5"
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1, timelimit=time_limit_routing, mipgap=0.01)
m.create_model_sa(mipfocus=1, timelimit=time_limit_sa)
result[i-n_start, 10] = m.connections_lb
result[i-n_start, 11] = m.connections_ub
result[i-n_start, 12] = m.throughput_lb
result[i-n_start, 13] = m.throughput_ub
file_name = "result_connections_{}to{}.csv".format(n_start, n_end)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['arch4_connections_lb', 'arch4_connections_ub',
'arch4_throughput_lb', 'arch4_throughput_ub',
'arch1_connections', 'arch1_throughput',
'arch2_connections_lb', 'arch2_connections_ub',
'arch2_throughput_lb', 'arch2_throughput_ub',
'arch5_connections_lb', 'arch5_connections_ub',
'arch5_throughput_lb', 'arch5_throughput_ub',
'total_cnk'])
writer.writerows(result) | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
59cccb9b036905a4dcb9b90f777018c6b23081c2 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oshow_progress.py | 9ff815774d5e57e9adbef2bcf8bb26e912fabca7 | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,905 | py | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
try:
from StringIO import StringIO # for python 3
except ImportError:
from io import StringIO # for python 2
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.utils.typechecks import assert_is_type
import inspect
def h2oshow_progress():
"""
Python API test: h2o.show_progress()
Command is verified by eyeballing the pyunit test output file and make sure the progress bars are there.
Here, we will assume the command runs well if there is no error message.
"""
try: # only only work with Python 3.
s = StringIO()
sys.stdout = s # redirect output
h2o.show_progress() # true by default.
training_data = h2o.upload_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
Y = 3
X = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
model = H2OGeneralizedLinearEstimator(family="binomial", alpha=0, Lambda=1e-5)
model.train(x=X, y=Y, training_frame=training_data)
sys.stdout=sys.__stdout__ # restore old stdout
# make sure the word progress is found and % is found. That is how progress is displayed.
assert ("progress" in s.getvalue()) and ("100%" in s.getvalue()), "h2o.show_progress() command is not working."
except Exception as e: # will get error for python 2
sys.stdout=sys.__stdout__ # restore old stdout
assert_is_type(e, AttributeError) # error for using python 2
assert "encoding" in e.args[0], "h2o.show_progress() command is not working."
allargs = inspect.getargspec(h2o.show_progress)
assert len(allargs.args)==0, "h2o.show_progress() should have no arguments!"
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oshow_progress)
else:
h2oshow_progress()
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
ec91569702a15fcfcce3a0a53e0befcdb08371a1 | 7e54d5449b511d06158cfc0e2c928b8656e15ac7 | /sortedm2m_tests/models.py | aeb5846d6754d17787b4d7b7a34694466e9107dd | [
"BSD-3-Clause"
] | permissive | jonny5532/django-sortedm2m | 1f326271ef665c4c26f1f5b631bb9f0b70daf853 | bff0707efcc3257e47355cb2e77ab1abe3c48320 | refs/heads/master | 2021-01-15T18:45:55.789077 | 2012-01-19T10:50:15 | 2012-01-19T10:50:15 | 2,563,529 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | # -*- coding: utf-8 -*-
from django.db import models
from sortedm2m.fields import SortedManyToManyField
class Shelf(models.Model):
books = SortedManyToManyField('Book', related_name='shelves')
class Book(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Store(models.Model):
books = SortedManyToManyField('sortedm2m_tests.Book', related_name='stores')
class MessyStore(models.Model):
books = SortedManyToManyField('Book',
sorted=False,
related_name='messy_stores')
class SelfReference(models.Model):
me = SortedManyToManyField('self', related_name='hide+')
def __unicode__(self):
return unicode(self.pk)
| [
"gregor@muellegger.de"
] | gregor@muellegger.de |
b249087047ebb31723edc290464c8960c892c52c | 4138be36f76f33815360ca74a3c80dd1b99bee19 | /tests/m2m_and_gfk_through/models.py | 867abb221851ea093a1ffe67f725cc6ad1db04ec | [
"MIT"
] | permissive | mikewolfd/django-gm2m | e385385b56fb2faa2277e270884f65f60e62ab0c | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | refs/heads/master | 2021-08-22T23:32:17.459805 | 2017-07-19T02:11:21 | 2017-07-19T02:11:21 | 112,767,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | """
Test case for issue #5
Django 1.8 migration problems with combined M2M and GM2M relations
"""
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
import gm2m
class GM2MLinks(models.Model):
class Meta:
app_label = 'm2m_and_gfk_through'
sources = gm2m.GM2MField()
class MembershipThrough(models.Model):
class Meta:
app_label = 'm2m_and_gfk_through'
possibly = models.ForeignKey('Membership')
link = models.ForeignKey(GM2MLinks)
class Membership(models.Model):
class Meta:
app_label = 'm2m_and_gfk_through'
many_link = models.ManyToManyField(GM2MLinks, through=MembershipThrough)
class RandomData(models.Model):
"""
Even though this seems completely unrelated to any of the other models,
just adding a GFK causes the problems to surface with an M2M-Through
"""
class Meta:
app_label = 'm2m_and_gfk_through'
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
my_gfk = GenericForeignKey('content_type', 'object_id')
| [
"thomas@ksytek.com"
] | thomas@ksytek.com |
397ea1f63ccc85b05f6d84497893b4ac7c16d5bd | 4fc1c45a7e570cc1204d4b5f21150f0771d34ea5 | /tools/benchmark/statistics.py | 46ee420c2e156cd62301b2d0da0e667f9a4ca590 | [] | no_license | CN1Ember/feathernet_mine | 77d29576e4ecb4f85626b94e6ff5884216af3098 | ac0351f59a1ed30abecd1088a46c7af01afa29d5 | refs/heads/main | 2023-05-28T17:19:06.624448 | 2021-06-17T04:39:09 | 2021-06-17T04:39:09 | 374,603,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,962 | py | import torch
import torch.nn as nn
from collections import OrderedDict
from .model_hook import ModelHook
from .stat_tree import StatTree, StatNode
from .reporter import report_format
def get_parent_node(root_node, stat_node_name):
assert isinstance(root_node, StatNode)
node = root_node
names = stat_node_name.split('.')
for i in range(len(names) - 1):
node_name = '.'.join(names[0:i+1])
child_index = node.find_child_index(node_name)
assert child_index != -1
node = node.children[child_index]
return node
def convert_leaf_modules_to_stat_tree(leaf_modules):
assert isinstance(leaf_modules, OrderedDict)
create_index = 1
root_node = StatNode(name='root', parent=None)
for leaf_module_name, leaf_module in leaf_modules.items():
names = leaf_module_name.split('.')
for i in range(len(names)):
create_index += 1
stat_node_name = '.'.join(names[0:i+1])
parent_node = get_parent_node(root_node, stat_node_name)
node = StatNode(name=stat_node_name, parent=parent_node)
parent_node.add_child(node)
if i == len(names) - 1: # leaf module itself
input_shape = leaf_module.input_shape.numpy().tolist()
output_shape = leaf_module.output_shape.numpy().tolist()
node.input_shape = input_shape
node.output_shape = output_shape
node.parameter_quantity = leaf_module.parameter_quantity.numpy()[0]
node.inference_memory = leaf_module.inference_memory.numpy()[0]
node.MAdd = leaf_module.MAdd.numpy()[0]
node.Flops = leaf_module.Flops.numpy()[0]
node.ConvFlops = leaf_module.ConvFlops.numpy()[0]
node.duration = leaf_module.duration.numpy()[0]
node.Memory = leaf_module.Memory.numpy().tolist()
return StatTree(root_node)
class ModelStat(object):
def __init__(self, model, input_size, query_granularity=1):
assert isinstance(model, nn.Module)
assert isinstance(input_size, (tuple, list)) and len(input_size) == 4
self._model = model
self._input_size = input_size
self._query_granularity = query_granularity
def _analyze_model(self):
model_hook = ModelHook(self._model, self._input_size)
leaf_modules = model_hook.retrieve_leaf_modules()
stat_tree = convert_leaf_modules_to_stat_tree(leaf_modules)
collected_nodes = stat_tree.get_collected_stat_nodes(self._query_granularity)
return collected_nodes
def show_report(self):
collected_nodes = self._analyze_model()
report = report_format(collected_nodes)
print(report)
def stat(model, input_size, query_granularity=1):
ms = ModelStat(model, input_size, query_granularity)
ms.show_report()
| [
"chenguo@gpu017.scut-smil.cn"
] | chenguo@gpu017.scut-smil.cn |
657299928bba96983fc8b5a5e462eea61359d6db | f4dd8aa4e5476ffde24e27273dd47913c7f9177a | /Dlv2_safe2/tests/parser/26-Hanoi-Tower.asp.test.py | 3ef549c01be148d1405e32e3ec93f938ad439a0b | [
"Apache-2.0"
] | permissive | dave90/Dlv_safe2 | e56071ec1b07c45defda571cb721852e2391abfb | f127f413e3f35d599554e64aaa918bc1629985bc | refs/heads/master | 2020-05-30T10:44:13.473537 | 2015-07-12T12:35:22 | 2015-07-12T12:35:22 | 38,256,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | input = """
% The meaning of the time predicate is self-evident. As for the disk
% predicate, there are k disks 1,2,...,k. Disks 1, 2, 3, 4 denote pegs.
% Disks 5, ... are "movable". The larger the number of the disk,
% the "smaller" it is.
%
% The program uses additional predicates:
% on(T,N,M), which is true iff at time T, disk M is on disk N
% move(t,N), which is true iff at time T, it is disk N that will be
% moved
% where(T,N), which is true iff at time T, the disk to be moved is moved
% on top of the disk N.
% goal, which is true iff the goal state is reached at time t
% steps(T), which is the number of time steps T, required to reach the goal (provided part of Input data)
% Read in data
on(0,N1,N) :- on0(N,N1).
onG(K,N1,N) :- ongoal(N,N1), steps(K).
% Specify valid arrangements of disks
% Basic condition. Smaller disks are on larger ones
:- time(T), on(T,N1,N), N1>=N.
% Specify a valid move (only for T<t)
% pick a disk to move
move(T,N) | noMove(T,N) :- disk(N), time(T), steps(K), T<K.
:- move(T,N1), move(T,N2), N1 != N2.
:- time(T), steps(K), T<K, not diskMoved(T).
diskMoved(T) :- move(T,Fv1).
% pick a disk onto which to move
where(T,N) | noWhere(T,N) :- disk(N), time(T), steps(K), T<K.
:- where(T,N1), where(T,N2), N1 != N2.
:- time(T), steps(K), T<K, not diskWhere(T).
diskWhere(T) :- where(T,Fv1).
% pegs cannot be moved
:- move(T,N), N<5.
% only top disk can be moved
:- on(T,N,N1), move(T,N).
% a disk can be placed on top only.
:- on(T,N,N1), where(T,N).
% no disk is moved in two consecutive moves
:- move(T,N), move(TM1,N), TM1=T-1.
% Specify effects of a move
on(TP1,N1,N) :- move(T,N), where(T,N1), TP1=T+1.
on(TP1,N,N1) :- time(T), steps(K), T<K,
on(T,N,N1), not move(T,N1), TP1=T+1.
% Goal description
:- not on(K,N,N1), onG(K,N,N1), steps(K).
:- on(K,N,N1), not onG(K,N,N1),steps(K).
% Solution
put(T,M,N) :- move(T,N), where(T,M), steps(K), T<K.
"""
output = """
% The meaning of the time predicate is self-evident. As for the disk
% predicate, there are k disks 1,2,...,k. Disks 1, 2, 3, 4 denote pegs.
% Disks 5, ... are "movable". The larger the number of the disk,
% the "smaller" it is.
%
% The program uses additional predicates:
% on(T,N,M), which is true iff at time T, disk M is on disk N
% move(t,N), which is true iff at time T, it is disk N that will be
% moved
% where(T,N), which is true iff at time T, the disk to be moved is moved
% on top of the disk N.
% goal, which is true iff the goal state is reached at time t
% steps(T), which is the number of time steps T, required to reach the goal (provided part of Input data)
% Read in data
on(0,N1,N) :- on0(N,N1).
onG(K,N1,N) :- ongoal(N,N1), steps(K).
% Specify valid arrangements of disks
% Basic condition. Smaller disks are on larger ones
:- time(T), on(T,N1,N), N1>=N.
% Specify a valid move (only for T<t)
% pick a disk to move
move(T,N) | noMove(T,N) :- disk(N), time(T), steps(K), T<K.
:- move(T,N1), move(T,N2), N1 != N2.
:- time(T), steps(K), T<K, not diskMoved(T).
diskMoved(T) :- move(T,Fv1).
% pick a disk onto which to move
where(T,N) | noWhere(T,N) :- disk(N), time(T), steps(K), T<K.
:- where(T,N1), where(T,N2), N1 != N2.
:- time(T), steps(K), T<K, not diskWhere(T).
diskWhere(T) :- where(T,Fv1).
% pegs cannot be moved
:- move(T,N), N<5.
% only top disk can be moved
:- on(T,N,N1), move(T,N).
% a disk can be placed on top only.
:- on(T,N,N1), where(T,N).
% no disk is moved in two consecutive moves
:- move(T,N), move(TM1,N), TM1=T-1.
% Specify effects of a move
on(TP1,N1,N) :- move(T,N), where(T,N1), TP1=T+1.
on(TP1,N,N1) :- time(T), steps(K), T<K,
on(T,N,N1), not move(T,N1), TP1=T+1.
% Goal description
:- not on(K,N,N1), onG(K,N,N1), steps(K).
:- on(K,N,N1), not onG(K,N,N1),steps(K).
% Solution
put(T,M,N) :- move(T,N), where(T,M), steps(K), T<K.
"""
| [
"davide@davide-All-Series"
] | davide@davide-All-Series |
286a85a4bf2a2e961e3da5764726f6960e7aef3c | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/GodOfPython/P12_File/direct/num3.py | bb276e2864e6fd000091dd1be4546d3379e9146b | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | f = open('D:/02.Python/ch12/direct/num2.txt', 'r+')
item_list = []
for line in f:
item_list.append(line.strip().split('|'))
while True:
print('-------------------------------------')
print('-- 01. 전체 물품 출력(a) ------------')
print('-- 02. 기존 물품 수량 변경(b) -------')
print('-- 03. 새로운 물품 등록(c) ----------')
print('-- 04. 종료(q) ----------------------')
print('-------------------------------------')
menu = input()
if menu=='q':
break
elif menu=='a':
for item in item_list:
print(item)
elif menu=='b':
print('물품명과 수량을 입력하세요.(물품명 수량)')
temp = input().strip().split(' ')
bol = False
for item in item_list:
if item[0]==temp[0]:
item[2]=temp[1]
bol = True
break
if bol==False:
print('입력하신 물품은 존재하지 않습니다.')
elif menu=='c':
print('새로운 물품을 등록하세요.(물품명 가격 수량)')
temp = input().strip().split(' ')
bol = False
for item in item_list:
if item[0]==temp[0]:
print('이미 존재하는 물품입니다')
bol = True
break
if bol == False:
item_list.append(temp)
else:
print('존재하지 않는 메뉴입니다.')
i=0
for item in item_list:
item_list[i]='|'.join(item_list[i])
i+=1
f.seek(0, 0)
f.write('\n'.join(item_list))
f.close() | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
0539f68bfceaf9bc8ba55c42d82bfb718fbf5247 | 8578dca588f39923b6ca3af5419cc58d627cefd8 | /牛客企业真题/网易/网易2019实习生招聘编程题集合/牛牛找工作.py | 744ea44f8e482d9fb7720b8c2636fce0023b61e2 | [] | no_license | huhudaya/leetcode- | abc6eca463fc3ce0776218147c4bbed54e92f11f | cff397cb5202277a1ae85135e91051603debde09 | refs/heads/master | 2021-07-26T01:00:02.690250 | 2020-12-25T14:21:14 | 2020-12-25T14:21:14 | 233,403,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,604 | py | '''
为了找到自己满意的工作,牛牛收集了每种工作的难度和报酬。牛牛选工作的标准是在难度不超过自身能力值的情况下,牛牛选择报酬最高的工作。在牛牛选定了自己的工作后,牛牛的小伙伴们来找牛牛帮忙选工作,牛牛依然使用自己的标准来帮助小伙伴们。牛牛的小伙伴太多了,于是他只好把这个任务交给了你。
输入描述:
每个输入包含一个测试用例。
每个测试用例的第一行包含两个正整数,分别表示工作的数量N(N<=100000)和小伙伴的数量M(M<=100000)。
接下来的N行每行包含两个正整数,分别表示该项工作的难度Di(Di<=1000000000)和报酬Pi(Pi<=1000000000)。
接下来的一行包含M个正整数,分别表示M个小伙伴的能力值Ai(Ai<=1000000000)。
保证不存在两项工作的报酬相同。
输出描述:
对于每个小伙伴,在单独的一行输出一个正整数表示他能得到的最高报酬。一个工作可以被多个人选择。
输入例子1:
3 3------>N,M
1 100
10 1000
1000000000 1001
9 10 1000000000
输出例子1:
100
1000
1001
'''
# Java
'''
public class MainCorrect {
public static void main(String[] args) {
//划重点!!!此题坑点:输入中间有空行,所以用BuffferedReader会更麻烦,所以选择用Scanner
Scanner sc = new Scanner(System.in);
int n = sc.nextInt();
int m = sc.nextInt();
//保存所有工作的键值对,即<工作能力,报酬>,而且也保存每个小伙伴的能力值键值对,其报酬为0
Map<Integer, Integer> map = new HashMap<Integer, Integer>();
//保存所有工作的能力值以及要计算的每个小伙伴的能力值
int[] ai = new int[m + n];
for(int i = 0; i < n; i++) {
int di = sc.nextInt();
ai[i] = di;
int pi = sc.nextInt();
map.put(di, pi);
}
//保存要计算的每个小伙伴的能力值
int[] bi = new int[m];
for(int i = 0; i < m; i++) {
ai[i + n] = sc.nextInt();
bi[i] = ai[i + n];
if(!map.containsKey(ai[i + n])) {
map.put(ai[i + n], 0);
}
}
//对能力值进行排序
Arrays.sort(ai);
//保存到目前的能力值为止,所能获得的最大报酬,有种dp的味道
int ma = 0;
for(int i = 0; i < m + n; i++) {
//每次都更新当前能力值所对应的最大报酬,由于ma是保存的<=当前能力值所能获得的最大报酬,所以可行
ma = Math.max(ma, map.get(ai[i])); //因为已经排好序了,相当于和前一个最大值进行比较
map.put(ai[i], ma);
}
//遍历每个小伙伴的能力值,从map中获取到其最大报酬(在上面的for循环中已经更新到了)
for(int i = 0; i < m; i++) {
System.out.println(map.get(bi[i]));
}
}
}
'''
# 超时,两层for循环
'''
public class Main {
//用一个类来记录工作能力和报酬的对应关系,其实可以用map实现的
static class Job implements Comparable<Job>{
int di, pi;
public Job(int di, int pi) {
this.di = di;
this.pi = pi;
}
//按工作能力值进行排序
public int compareTo(Job job) {
return this.di - job.di;
}
}
public static void main(String[] args) throws IOException {
Scanner sc = new Scanner(System.in);
int n = sc.nextInt();
int m = sc.nextInt();
Job[] jobs = new Job[n];
for(int i = 0; i < n; i++) {
int di = sc.nextInt();
int pi = sc.nextInt();
jobs[i] = new Job(di, pi);
}
//对工作能力进行排序
Arrays.sort(jobs);
int[] ai = new int[m];
for(int i = 0; i < m; i++) {
ai[i] = sc.nextInt();
}
//逐一计算每个小伙伴,在其工作能力之内所能获得的最大报酬
for(int i = 0; i < m; i++) {
int j = 0;
int cnt = 0;
while(j < n && jobs[j].di <= ai[i]) {
if(cnt < jobs[j].pi) {
cnt = jobs[j].pi;
}
j++;
}
System.out.println(cnt);
}
}
}
'''
# 二分
'''
解题思路:
自定义一个类Work来描述工作
所有的Work存入works数组中,根据工作的难度对works从小到大排序
定义一个dp数组,dp[i]表示难度小于等于works[i]的最大报酬。
对于输入的能力值,使用二分查找,扫描works数组,找到works数组中小于等于指定能力值,且下标最大的Work。
记该Work的下标为index
dp[index]就是结果
// dp[i]:记录难度小于等于works[i].difficulty的最大报酬
dp[0] = works[0].reward;
for (int i = 1; i < works.length; i++) {
dp[i] = dp[i - 1] > works[i].reward ? dp[i - 1] : works[i].reward;
}
'''
# Java
'''
import java.util.Scanner;
import java.util.Arrays;
import java.util.Comparator;
class Work {
int difficulty;
int reward;
public Work(int difficulty, int reward) {
super();
this.difficulty = difficulty;
this.reward = reward;
}
}
public class Main {
public static void main(String[] args) {
findwork();
}
public static void findwork() {
Scanner in = new Scanner(System.in);
int n = in.nextInt();// 工作数量
int m = in.nextInt();// 人数
Work[] works = new Work[n];// 存储n份工作
int[] dp = new int[n];// dp[n]:难度小于等于works[n].difficulty的工作的最高报酬
// 读入n份工作
for (int i = 0; i < n; i++) {
int difficulty = in.nextInt();
int reward = in.nextInt();
Work work = new Work(difficulty, reward);
works[i] = work;
}
// 根据工作的难度,对n份工作从小到大排序
Arrays.sort(works, new Comparator<Work>() {
@Override
public int compare(Work o1, Work o2) {
return o1.difficulty - o2.difficulty;
}
});
// dp[i]:记录难度小于等于works[i].difficulty的最大报酬
dp[0] = works[0].reward;
for (int i = 1; i < works.length; i++) {
dp[i] = dp[i - 1] > works[i].reward ? dp[i - 1] : works[i].reward;
}
for (int i = 0; i < m; i++) {
int capability = in.nextInt();
// 能力值小于所有的工作的难度
if (capability < works[0].difficulty) {
System.out.println(0);
continue;
}
// 能力值大于等于所有的工作的难度
if (capability >= works[n - 1].difficulty) {
System.out.println(dp[n - 1]);
continue;
}
// 二分查找,找到第一个小于capability的work
int low = 0;
int high = n - 1;
while (low <= high) {
int middle = (low + high) / 2;
// works[middle]是符合能力值,且难度最大的工作
if (works[middle].difficulty <= capability && works[middle + 1].difficulty > capability) {
System.out.println(dp[middle]);
break;
}
// 找到难度等于能力值,且下标最大的工作
if (works[middle].difficulty == capability) {
// 找到最后一个符合capability的工作
int index = middle;
while (index + 1 < n && works[index + 1].difficulty == capability) {
index++;
}
System.out.println(dp[middle]);
break;
} else if (capability > works[middle].difficulty) {
low = middle + 1;
} else if (capability < works[middle].difficulty) {
high = middle - 1;
}
}
}
}
}
'''
# 自己的版本
import sys
n, m = list(map(int, input().strip().split()))
di = []
map = dict()
for i in range(n):
line = sys.stdin.readline()
d, p = [int(i) for i in line.strip().split()]
di.append(d)
map[d] = p
cap = [int(i) for i in input().strip().split()]
# cap = [9, 10, 1000000000]
for i in cap:
di.append(i)
if i not in map:
map[i] = 0
di.sort()
# dp
dp = [0 for i in range(m + n)]
dp[0] = map[di[0]]
for i in range(1, m + n):
dp[i] = max(map[di[i]], dp[i - 1])
map[di[i]] = dp[i]
for i in cap:
print(map[i])
import sys
def main():
lines = sys.stdin.readlines()
lines = [l.strip().split() for l in lines if l.strip()]
n, m = int(lines[0][0]), int(lines[0][1])
res = [0] * (n + m)
abilities = list(map(int, lines[-1]))
maps = dict()
for index, l in enumerate(lines[1:-1]):
d, s = int(l[0]), int(l[1])
maps[d] = s
res[index] = d
for index, ability in enumerate(abilities):
res[index + n] = ability
if ability not in maps:
maps[ability] = 0
res.sort()
maxSalary = 0
for index in range(n + m):
maxSalary = max(maxSalary, maps[res[index]])
maps[res[index]] = maxSalary
for index in range(m):
print(maps[abilities[index]])
if __name__ == '__main__':
main()
| [
"457775600@qq.com"
] | 457775600@qq.com |
679c01dbd43a59d383ce9f52f744523310bd916a | 9ac405635f3ac9332e02d0c7803df757417b7fee | /cotizaciones_componentes/migrations/0021_auto_20200215_1600.py | 4b5458c6c75c7a29b644a6d2623a316422029522 | [] | no_license | odecsarrollo/07_intranet_proyectos | 80af5de8da5faeb40807dd7df3a4f55f432ff4c0 | 524aeebb140bda9b1bf7a09b60e54a02f56fec9f | refs/heads/master | 2023-01-08T04:59:57.617626 | 2020-09-25T18:01:09 | 2020-09-25T18:01:09 | 187,250,667 | 0 | 0 | null | 2022-12-30T09:36:37 | 2019-05-17T16:41:35 | JavaScript | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.2.6 on 2020-02-15 21:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cotizaciones_componentes', '0020_auto_20200215_0953'),
]
operations = [
migrations.AlterField(
model_name='itemcotizacioncomponente',
name='descripcion',
field=models.CharField(max_length=300, null=True),
),
]
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
45f139875b7ada90c52391a4a0b587f14a01e96d | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4148/codes/1723_2506.py | 4c9fd872f99d5b04e510eba1c69d4e217871649e | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | q = int(input("quant.: "))
pc = int(input("percentual: "))
qvc = int(input("quant. de venda: "))
ano = 0
cap = 12000
while(q>0 and q<cap):
q = q + (q*(pc/100))-qvc
ano = ano +1
if (q<0):
print("EXTINCAO")
if(q>cap):
print("LIMITE")
print(ano)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
1fd9fbd4ecbd63f40d60da6eeaad2a451a719921 | e2e39726195c7bc075b9bd56e757acd136527d5c | /typings/vtkmodules/vtkCommonExecutionModel/vtkMultiTimeStepAlgorithm.pyi | b8f276a14fcdccf562f6efabb3c16449399f1ca2 | [
"BSD-3-Clause"
] | permissive | gen4438/vtk-python-stubs | a652272183d2d1ee48d4639e86bcffc1ac454af0 | c9abd76362adf387af64ce5ddbd04c5d3bebe9da | refs/heads/main | 2023-04-04T02:13:15.459241 | 2021-04-15T10:47:28 | 2021-04-15T10:53:59 | 358,224,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,031 | pyi | """
This type stub file was generated by pyright.
"""
from .vtkAlgorithm import vtkAlgorithm
class vtkMultiTimeStepAlgorithm(vtkAlgorithm):
"""
vtkMultiTimeStepAlgorithm - Superclass for algorithms that would like
to
make multiple time requests
Superclass: vtkAlgorithm
This class can be inherited by any algorithm that wishes to make
multiple time requests upstream. The child class uses
UPDATE_TIME_STEPS to make the time requests and use set of
time-stamped data objects are stored in time order in a
vtkMultiBlockDataSet object.
"""
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkMultiTimeStepAlgorithm
C++: vtkMultiTimeStepAlgorithm *NewInstance()
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkMultiTimeStepAlgorithm
C++: static vtkMultiTimeStepAlgorithm *SafeDownCast(
vtkObjectBase *o)
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
| [
"g1e2n04@gmail.com"
] | g1e2n04@gmail.com |
762bbef5680c83f8136ec7bbc152abafe40ac2e2 | 59f64b5cf799e31c97b11828dba4787afb8f3f17 | /hail/python/hailtop/aiocloud/aioazure/client/network_client.py | 495771bb1ead6b26ad134d1796f132e647ffc634 | [
"MIT"
] | permissive | hail-is/hail | 2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1 | 07a483ae0f46c66f3ed6fd265b48f48c06298f98 | refs/heads/main | 2023-09-01T15:03:01.450365 | 2023-09-01T02:46:35 | 2023-09-01T02:46:35 | 45,069,467 | 913 | 262 | MIT | 2023-09-14T21:53:32 | 2015-10-27T20:55:42 | Python | UTF-8 | Python | false | false | 1,333 | py | from typing import Optional
import aiohttp
from ..session import AzureSession
from .base_client import AzureBaseClient
class AzureNetworkClient(AzureBaseClient):
def __init__(self, subscription_id, resource_group_name, session: Optional[AzureSession] = None, **kwargs):
if 'params' not in kwargs:
kwargs['params'] = {}
params = kwargs['params']
if 'api-version' not in params:
params['api-version'] = '2021-03-01'
session = session or AzureSession(**kwargs)
super().__init__(f'https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.Network',
session=session)
async def delete_nic(self, nic_name: str, ignore_not_found: bool = False):
try:
await self.delete(f'/networkInterfaces/{nic_name}')
except aiohttp.ClientResponseError as e:
if ignore_not_found and e.status == 404:
pass
raise
async def delete_public_ip(self, public_ip_name: str, ignore_not_found: bool = False):
try:
await self.delete(f'/publicIPAddresses/{public_ip_name}')
except aiohttp.ClientResponseError as e:
if ignore_not_found and e.status == 404:
pass
raise
| [
"noreply@github.com"
] | hail-is.noreply@github.com |
820951c82f77cecf7f891cf5a2edb0f60f69491b | 3e3bf98840d133e56f0d0eb16ba85678ddd6ca45 | /.history/iss_20200102123332.py | 97d3addabc223e3f815e8f462e6cae4fabebbc2d | [] | no_license | Imraj423/backend-iss-location-assessment | a05d3cc229a5fc4857483ae466348c1f8c23c234 | b0565c089a445ccffcb8d0aab3c0be3bb0c1d5b8 | refs/heads/master | 2020-12-03T17:04:58.512124 | 2020-06-24T16:02:02 | 2020-06-24T16:02:02 | 231,400,854 | 0 | 0 | null | 2020-06-24T16:02:04 | 2020-01-02T14:43:44 | null | UTF-8 | Python | false | false | 672 | py | import requests
import turtle
screen = turtle.Screen()
screen.bgpic("map.gif")
screen.screensize(800, 600)
screen.setup(720, 360)
screen.setworldcoordinates(-180, -90, 180, 90)
image = "iss.gif"
screen.addshape(image)
raf = turtle.Turtle()
raf.shape(image)
raf.setheading(45)
raf.penup()
screen.exitonclick()
def main():
pass
def location():
s = requests.get('http://api.open-notify.org/iss-now.json')
s.json
s.raise_for_status
print(s.text)
return s
def astronauts():
r = requests.get('http://api.open-notify.org/astros.json')
r.json
r.raise_for_status()
print(r.text)
return s
if __name__ == "__main__":
main()
| [
"dahqniss@gmail.com"
] | dahqniss@gmail.com |
49ef9ae27e9207fdae7537265f3119a3db58a5c0 | ba744a96d4c8fbcbaa15bcdbc5c3efe3860578b7 | /apps/user_operation/migrations/0002_auto_20190613_1536.py | 64a928bf44052bc84aab7e8ccb0cd5c3952c3bca | [] | no_license | zhangliang852469/Mx_shop_afterend | d84107887936baf122ed489de766f5d22958865b | 9d04de806d6ec87778f2ebe002459ee6a854915e | refs/heads/master | 2022-12-17T21:03:21.315285 | 2019-06-18T09:49:51 | 2019-06-18T09:49:51 | 192,023,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2019-06-13 07:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user_operation', '0001_initial'),
('goods', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='userleavingmessage',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
migrations.AddField(
model_name='userfav',
name='goods',
field=models.ForeignKey(help_text='商品id', on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品'),
),
migrations.AddField(
model_name='userfav',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
migrations.AddField(
model_name='useraddress',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
migrations.AlterUniqueTogether(
name='userfav',
unique_together=set([('user', 'goods')]),
),
]
| [
"710567585@qq.com"
] | 710567585@qq.com |
a8045b4b3a67464f1ef11694c333a468a7a44896 | f608dbe94b6e05f63d9bfa030c8ca87725957b93 | /core/src/world/actions/inventory/inventory.py | 4568dbfe32d9d472cb85f44fb7376773d9b41eda | [] | no_license | ProjectMHQ/projectm | 3336c82cbd1e330e065cb178d476c72d552fbfaf | adcb42722354ea4929300e9a4597e734b431c6e5 | refs/heads/master | 2023-04-22T18:41:48.091889 | 2021-01-30T11:28:28 | 2021-01-30T11:28:28 | 216,660,020 | 0 | 0 | null | 2021-05-06T20:33:28 | 2019-10-21T20:32:21 | Python | UTF-8 | Python | false | false | 774 | py | from core.src.world.actions.inventory.inventory_messages import InventoryMessages
from core.src.world.components.inventory import InventoryComponent
from core.src.world.components.position import PositionComponent
from core.src.world.domain.entity import Entity
from core.src.world.utils.entity_utils import load_components, search_entities_in_container_by_keyword
from core.src.world.utils.messaging import emit_sys_msg
messages = InventoryMessages()
async def inventory(entity: Entity):
await load_components(entity, PositionComponent, InventoryComponent)
inventory = entity.get_component(InventoryComponent)
items = await search_entities_in_container_by_keyword(inventory, '*')
await emit_sys_msg(entity, 'inventory', messages.items_to_message(items))
| [
"guido.dassori@gmail.com"
] | guido.dassori@gmail.com |
f4977cc67d8e72649ab03139364065bcecbaaccb | 0ff2c6b1def739e687e7acd809567558bcecd660 | /data_fix/darknet_to_coco.py | 9d159c339a04d09f2adea4326c18abbc528c4ffd | [] | no_license | koalakid1/YOLOPose | 5e7b7cc8df343ad655d070831f0fd6aa1eb45685 | 0da31dfc3bcb216b19746af1e00e3a61b9671517 | refs/heads/master | 2022-04-24T10:04:35.758904 | 2020-04-19T11:45:33 | 2020-04-19T11:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,778 | py | import os
import sys
import numpy as np
import pandas as pd
def dark_to_coco(text_dir, output_dir, output_name, img_dir):
sys.stdout = open(os.path.join(output_dir, output_name),"w")
for root, dir, files in os.walk(text_dir):
idx = 0
for f in [f for f in files if os.path.splitext(f)[-1] == ".txt"]:
txt_f = open(os.path.join(text_dir, f), "r")
cor = txt_f.readlines()
data = np.zeros([len(cor),5])
for i in range(len(cor)):
temp = cor[i].split(' ')
for j in range(5):
data[i,j] = float(temp[j])
img_name = f.replace(".txt", ".jpg")
w, h = 416, 416
col_name = ['class', 'xcenter', 'ycenter', 'width', 'height', 'xmin', 'ymin', 'xmax', 'ymax']
df = pd.DataFrame(columns=col_name)
for i in range(5):
df[col_name[i]] = data[:,i]
df['xmin'] = (df['xcenter'] - df['width'] / 2) * w
df['xmin'][df['xmin'] < 0] = 0
df['ymin'] = (df['ycenter'] - df['height'] / 2) * h
df['ymin'][df['ymin'] < 0] = 0
df['xmax'] = (df['xcenter'] + df['width'] / 2) * w
df['ymax'] = (df['ycenter'] + df['height'] / 2) * h
df = df.loc[:,['class', 'xmin', 'ymin', 'xmax', 'ymax']]
df[['class', 'xmin', 'ymin', 'xmax', 'ymax']] = df[['class', 'xmin', 'ymin', 'xmax', 'ymax']].astype(int).astype(str)
df_to_array = df.values.flatten()
data_list = df_to_array.tolist()
data = ' '.join(data_list)
if len(data) != 0:
print("%d %s %d %d %s"%(idx, os.path.join(img_dir, img_name), w, h, data))
idx += 1
sys.stdout.close()
| [
"comojin1994@gmail.com"
] | comojin1994@gmail.com |
1e593b825cab0a60fb5bff2f8ead37386a8a901b | 031b7927274f55e60d9ab004ce8ea39f34abbbca | /tensorflow_probability/python/bijectors/generalized_pareto.py | 6f4910589e1871a72124a32ef446fd0d61d187fe | [
"Apache-2.0"
] | permissive | brianwa84/probability | 8d87c96d7b8b1a885a7a7377a13978bd13ffa9c3 | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | refs/heads/master | 2021-06-19T08:58:40.276319 | 2021-05-14T21:43:14 | 2021-05-14T21:44:53 | 146,023,828 | 0 | 0 | Apache-2.0 | 2019-06-06T15:18:43 | 2018-08-24T18:00:25 | Jupyter Notebook | UTF-8 | Python | false | false | 5,582 | py | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The GeneralizedPareto bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.bijectors import chain as chain_bijector
from tensorflow_probability.python.bijectors import shift as shift_bijector
from tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.internal import auto_composite_tensor
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'GeneralizedPareto',
]
@auto_composite_tensor.auto_composite_tensor(
omit_kwargs=('name',), module_name='tfp.bijectors')
class GeneralizedPareto(bijector_lib.AutoCompositeTensorBijector):
"""Bijector mapping R**n to non-negative reals.
Forward computation maps R**n to the support of the `GeneralizedPareto`
distribution with parameters `loc`, `scale`, and `concentration`.
#### Mathematical Details
The forward computation from `y` in R**n to `x` constrains `x` as follows:
`x >= loc` if `concentration >= 0`
`x >= loc` and `x <= loc + scale / abs(concentration)` if `concentration < 0`
This bijector is used as the `_experimental_default_event_space_bijector` of
the `GeneralizedPareto` distribution.
"""
def __init__(self,
loc,
scale,
concentration,
validate_args=False,
name='generalized_pareto'):
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[loc, scale, concentration], dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(loc)
self._scale = tensor_util.convert_nonref_to_tensor(scale)
self._concentration = tensor_util.convert_nonref_to_tensor(concentration)
self._non_negative_concentration_bijector = chain_bijector.Chain([
shift_bijector.Shift(shift=self._loc, validate_args=validate_args),
softplus_bijector.Softplus(validate_args=validate_args)
], validate_args=validate_args)
super(GeneralizedPareto, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
dtype=dtype,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
concentration=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
def _is_increasing(self):
return True
@property
def loc(self):
return self._loc
@property
def scale(self):
return self._scale
@property
def concentration(self):
return self._concentration
def _negative_concentration_bijector(self):
# Constructed dynamically so that `loc + scale / concentration` is
# tape-safe.
loc = tf.convert_to_tensor(self.loc)
high = loc + tf.math.abs(self.scale / self.concentration)
return sigmoid_bijector.Sigmoid(
low=loc, high=high, validate_args=self.validate_args)
def _forward(self, x):
return tf.where(self._concentration < 0.,
self._negative_concentration_bijector().forward(x),
self._non_negative_concentration_bijector.forward(x))
def _inverse(self, y):
return tf.where(self._concentration < 0.,
self._negative_concentration_bijector().inverse(y),
self._non_negative_concentration_bijector.inverse(y))
def _forward_log_det_jacobian(self, x):
event_ndims = self.forward_min_event_ndims
return tf.where(
self._concentration < 0.,
self._negative_concentration_bijector().forward_log_det_jacobian(
x, event_ndims=event_ndims),
self._non_negative_concentration_bijector.forward_log_det_jacobian(
x, event_ndims=event_ndims))
def _inverse_log_det_jacobian(self, y):
event_ndims = self.inverse_min_event_ndims
return tf.where(
self._concentration < 0.,
self._negative_concentration_bijector().inverse_log_det_jacobian(
y, event_ndims=event_ndims),
self._non_negative_concentration_bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims))
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
19b6ed2b84c9efa20a3e5b0ba417374644185cee | 8668830f34ce260565217ea3b49e090778780b44 | /sms_gateway/tests/test_task_text_blast_coupon.py | e16bbece6a9871e7570065ff5e98477f81d5e0d4 | [] | no_license | wcirillo/ten | 72baf94da958b2ee6f34940c1fc3116660436762 | a780ccdc3350d4b5c7990c65d1af8d71060c62cc | refs/heads/master | 2016-09-06T13:39:03.966370 | 2015-07-02T12:37:36 | 2015-07-02T12:37:36 | 15,700,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | """
Tests of sms_gateway app tasks.
"""
from django.conf import settings
from coupon.models import Action, Coupon, CouponAction, SubscriberAction
from sms_gateway.tasks import text_blast_coupon
from sms_gateway.tests.sms_gateway_test_case import SMSGatewayTestCase
settings.CELERY_ALWAYS_EAGER = True
class TestTextBlast(SMSGatewayTestCase):
""" Unit tests for text blasting. """
fixtures = ['test_advertiser', 'test_coupon', 'test_subscriber', ]
def setUp(self):
"""
Tests need eager queue. Tests needs access to the request factory.
"""
super(TestTextBlast, self).setUp()
self.action = Action.objects.get(id=11)
def test_text_blast_coupon(self):
"""
Asserts that a valid coupon is blasted.
"""
coupon = Coupon.objects.get(id=1)
print(coupon)
coupon.sms = coupon.get_default_sms()
print(coupon.sms)
coupon.save()
self.assertEquals(CouponAction.objects.filter(
coupon=coupon,
action=self.action
).count(), 0)
self.assertEquals(SubscriberAction.objects.filter(
coupon=coupon,
action=self.action
).count(), 0)
text_blast_coupon(coupon)
# Check for subscriber action recorded for this coupon
self.assertEquals(str(coupon.subscriber_actions.all()[0].action),
'Text Blasted')
try:
coupon_action = CouponAction.objects.get(
coupon=coupon,
action=self.action
)
self.assertEquals(coupon_action.count, 1)
except CouponAction.DoesNotExist:
self.fail('CouponAction was not created.')
# Try blasting it again. This is not allowed.
text_blast_coupon(coupon)
try:
coupon_action = CouponAction.objects.get(
coupon=coupon,
action=self.action
)
self.assertEquals(coupon_action.count, 1)
except CouponAction.DoesNotExist:
self.fail('CouponAction was not created.')
# Try blasting a different coupon of same business now.
coupon = Coupon.objects.get(id=5)
text_blast_coupon(coupon)
self.assertEquals(CouponAction.objects.filter(
coupon=coupon,
action=self.action
).count(), 0)
def test_blast_not_sms(self):
"""
Assert a coupon that has is_redeemed_by_sms False does not blast.
"""
coupon = Coupon.objects.get(id=2)
text_blast_coupon(coupon)
self.assertEquals(CouponAction.objects.filter(
coupon=coupon,
action=self.action
).count(), 0)
def test_blast_not_approved(self):
"""
Assert a coupon that is not approved does not blast.
"""
coupon = Coupon.objects.get(id=3)
text_blast_coupon(coupon)
self.assertEquals(CouponAction.objects.filter(
coupon=coupon,
action=self.action
).count(), 0)
def test_blast_no_zip(self):
"""
Assert a coupon that has no zip code does not blast.
"""
coupon = Coupon.objects.get(id=4)
text_blast_coupon(coupon)
self.assertEquals(CouponAction.objects.filter(
coupon=coupon,
action=self.action
).count(), 0)
| [
"williamcirillo@gmail.com"
] | williamcirillo@gmail.com |
dbf5708023c1cd6e9fa27007bec608a1b0a11915 | 3b786d3854e830a4b46ee55851ca186becbfa650 | /SystemTesting/pylib/vmware/nsx/manager/csr/csr_facade.py | c748ebec7776c2f888198da7e0fcf182ec60e611 | [] | no_license | Cloudxtreme/MyProject | d81f8d38684333c22084b88141b712c78b140777 | 5b55817c050b637e2747084290f6206d2e622938 | refs/heads/master | 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import vmware.common.base_facade as base_facade
import vmware.common.constants as constants
import vmware.common.global_config as global_config
import vmware.nsx.manager.csr.api.csr_api_client as csr_api_client
import vmware.nsx.manager.csr.cli.csr_cli_client as csr_cli_client
import vmware.nsx.manager.csr.csr as csr
pylogger = global_config.pylogger
class CSRFacade(csr.CSR, base_facade.BaseFacade):
"""CSR facade class to perform CRUDAQ"""
DEFAULT_EXECUTION_TYPE = constants.ExecutionType.API
DEFAULT_IMPLEMENTATION_VERSION = "NSX70"
def __init__(self, parent=None, id_=None):
super(CSRFacade, self).__init__(parent=parent, id_=id_)
# instantiate client objects
api_client = csr_api_client.CSRAPIClient(
parent=parent.get_client(constants.ExecutionType.API))
cli_client = csr_cli_client.CSRCLIClient(
parent=parent.get_client(constants.ExecutionType.CLI))
# Maintain the list of client objects.
self._clients = {constants.ExecutionType.API: api_client,
constants.ExecutionType.CLI: cli_client}
| [
"bpei@vmware.com"
] | bpei@vmware.com |
d794b2638f627cae92847c422f07455bd2e63473 | 93cc2b7590433228444a56daf9f6e0991728867e | /backend/courses/serializer.py | b9ecbba5790f4e7f95d7d8826b694ffda60a7f9d | [] | no_license | MisterLenivec/rating_app | b50fb5353634914a914ddf36831d0fa086d04530 | 65111424159fd730a89678386d9e422fddcdcde8 | refs/heads/master | 2022-12-07T07:25:23.748013 | 2020-08-30T12:04:06 | 2020-08-30T12:04:06 | 290,445,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from rest_framework.serializers import ModelSerializer
from .models import Courses
class CourseSerializer(ModelSerializer):
"""Добавление курсов"""
class Meta:
model = Courses
fields = ['id', 'name', 'url', 'rating']
| [
"wormsom@gmail.com"
] | wormsom@gmail.com |
98802694fabaaad3e27a3a6069d9a7d0df30f372 | 0c52fefc231db4ace1c483b8a6cfd6f716072c2a | /users/migrations/0003_auto_20200903_1234.py | d2b97f81a0a76faec3e8dfff719d00301bb5ca64 | [] | no_license | BrianC68/fam-recipes | 4161849133fe47bcd589b110e24e3e7e75c80527 | 413a943710ae338c922185aaca0aa46307a3ac18 | refs/heads/master | 2022-12-24T01:50:28.304372 | 2020-10-01T14:17:35 | 2020-10-01T14:17:35 | 295,018,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | # Generated by Django 3.0.8 on 2020-09-03 17:34
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200903_1232'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='child',
field=models.ManyToManyField(blank=True, help_text='Hold Ctrl + Click to choose multiple children.', related_name='_customuser_child_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='customuser',
name='cousin',
field=models.ManyToManyField(blank=True, help_text='Hold Ctrl + Click to choose multiple cousins.', related_name='_customuser_cousin_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='customuser',
name='parent',
field=models.ManyToManyField(blank=True, help_text='Hold Ctrl + Click to choose multiple parents.', related_name='_customuser_parent_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='customuser',
name='sibling',
field=models.ManyToManyField(blank=True, help_text='Hold Ctrl + Click to choose multiple siblings.', related_name='_customuser_sibling_+', to=settings.AUTH_USER_MODEL),
),
]
| [
"brianc@wi.rr.com"
] | brianc@wi.rr.com |
e161d289aef29076b02deb3c136d91069320e6ad | c6a0862b687ff93cb593ba5a35008ebc701fdaa4 | /does_number_look_big.py | 6a2b5305ff4313ed2c34272b28a76c594be54af4 | [] | no_license | tytechortz/codewars | 77deb327cd9d9e8602228ccb246c93a5cec82fe7 | 8517c38e174fac0a8a81a0939f51a6d83ca0e355 | refs/heads/master | 2020-06-03T03:49:31.686202 | 2019-08-26T20:39:50 | 2019-08-26T20:39:50 | 191,425,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | def narcissistic( value ):
digits = [int(x) for x in str(value)]
length = len(digits)
if sum(i**length for i in digits) == value:
return True
else:
return False
narcissistic(153)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
67bcf6ef75c8622c725316804886b1e3b0041970 | 3ee2b69a81c9193dd34fdf9c587469adb52c7a6e | /contrib/oscoap-plugtest/plugtest-server | c0e380e0e35530809d366bd407d1d8e0c16e723b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pruckebusch/aiocoap | cd2ef6da4f1925e8a477baa95cfcb5173465ed21 | 021915635d912137a1d05ec37486ed4432e0f52d | refs/heads/master | 2021-01-22T20:18:18.465840 | 2017-12-20T14:45:41 | 2017-12-20T14:45:41 | 85,305,911 | 0 | 0 | null | 2017-03-17T11:46:17 | 2017-03-17T11:46:17 | null | UTF-8 | Python | false | false | 7,454 | #!/usr/bin/env python3
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""A server suitable for running the OSCOAP plug test series against it
See https://github.com/EricssonResearch/OSCOAP for the test suite
description."""
import sys
import asyncio
import logging
import argparse
import aiocoap
import aiocoap.oscoap as oscoap
import aiocoap.error as error
from aiocoap.util.cli import AsyncCLIDaemon
import aiocoap.resource as resource
from plugtest_common import *
class PleaseUseOscoap(error.ConstructionRenderableError):
code = aiocoap.UNAUTHORIZED
message = "This is an OSCOAP plugtest, please use option %d"%aiocoap.numbers.optionnumbers.OptionNumber.OBJECT_SECURITY
class HelloResource(resource.Resource):
def render_get(self, request):
testno_mode = {('first=1',): 2, ('second=2',): 3}.get(request.opt.uri_query, 1)
additional_verify("Accept as expected", 0 if testno_mode == 3 else None, request.opt.accept)
etag = b"\x2b" if testno_mode in (2, 3) else None
max_age = 5 if testno_mode == 3 else None
return aiocoap.Message(content_format=0, payload="Hello World!".encode('ascii'), etag=etag, max_age=max_age)
class CounterResource(resource.Resource):
def render_post(self, request):
additional_verify("Content-Format as expeted", 0, request.opt.content_format)
additional_verify("Payload as expected", b"\x4a"*4, request.payload)
return aiocoap.Message(code=aiocoap.CHANGED, location_path=('counter',), location_query=('first=1', 'second=2'))
def render_put(self, request):
additional_verify("Content-Format as expeted", 0, request.opt.content_format)
additional_verify("If-Match as expected", (b"\x5b\x5b",), request.opt.if_match)
additional_verify("Payload as expected", b"\x5a"*4, request.payload)
return aiocoap.Message(code=aiocoap.CHANGED)
def render_delete(self, request):
return aiocoap.Message(code=aiocoap.DELETED)
class SeqnoManager(resource.Resource):
def __init__(self, contexts):
self.contexts = contexts
def render_get(self, request):
the_context, = self.contexts.values()
# this direct access is technically outside the interface for a
# SecurityContext, but then again, there isn't one yet
text = """Next sequence number I will use: %d\n""" % the_context.my_sequence_number
text += """I've seen all sequence numbers up to including %d%s.""" % (
the_context.other_replay_window.seen[0],
", and also %s" % the_context.other_replay_window.seen[1:] if len(the_context.other_replay_window.seen) > 1 else ""
)
return aiocoap.Message(payload=text.encode('utf-8'), content_format=0)
def render_put(self, request):
try:
number = int(request.payload.decode('utf8'))
except (ValueError, UnicodeDecodeError):
raise aiocoap.error.BadRequest("Only numeric values are accepted.")
new_context = get_security_context(number, 'recipient')
self.contexts[new_context.cid] = new_context
return aiocoap.Message(code=aiocoap.CHANGED)
class PlugtestSite(resource.Site):
def __init__(self, *, contexts=[]):
super().__init__()
# by now, the testno here should only be used to initialize the sequence numbers
regular_context = get_security_context(1, 'recipient')
self.contexts = {c.cid: c for c in [regular_context, ]}
self.add_resource(('.well-known', 'core'), resource.WKCResource(self.get_resources_as_linkheader))
self.add_resource(('change-tid',), HelloResource())
self.add_resource(('helloworld',), HelloResource())
self.add_resource(('counter',), CounterResource())
self.add_resource(('sequence-numbers',), SeqnoManager(self.contexts))
whitelist = (
('.well-known', 'core'),
('sequence-numbers',)
)
# Most of this is copied from server-oscoap, and needs yet to move into the library
async def render(self, request):
try:
cid, sid = oscoap.verify_start(request)
except oscoap.NotAProtectedMessage:
if request.opt.uri_path in self.whitelist:
return await super().render(request)
else:
raise PleaseUseOscoap()
# right now we'll rely on the sid to match, especially as it's not sent
# unconditionally anyway
try:
sc = self.contexts[cid]
except KeyError:
raise PleaseUseOscoap() # may we disclose the reason?
try:
unprotected, seqno = sc.unprotect(request)
except oscoap.ProtectionInvalid as e:
print("Unprotect failed (%s)"%(e,))
# hack explanation: there is no implementation of a "no response"
# response in aiocoap yet. the string here is not a sentinel but an
# exploitation of aiocoap not handling the type error of not having
# a message early enough to send a 5.00 error instead; it just
# fails to reply at all.
# this also bypasses .response_callback (which might
# auto-increment), which is a good thing because retransmissions
# would send the auto-incrementor off and away
return "NO RESPONSE"
print("Unprotected request:", unprotected)
if unprotected.opt.uri_path == ('change-tid',):
# it seems this is the easiest way to tamper with the Tid as
# requested for test 16.
seqno = seqno + b'?'
# FIXME the render doesn't provide a way to provide context in the
# sense of "who is the user"; obviously, the render interface needs
# rework
try:
response = await super().render(unprotected)
except error.RenderableError as err:
response = err.to_message()
except Exception as rr:
response = aiocoap.Message(code=aiocoap.INTERNAL_SERVER_ERROR)
self.log.error("An exception occurred while rendering a protected resource: %r"%err)
self.log.exception(err)
if response.code is None:
# FIXME: this duplicates the default setting in aiocoap.protocol
response.code = aiocoap.CONTENT
print("Unprotected response:", response)
protected_response, _ = sc.protect(response, seqno)
# FIXME who should trigger this?
sc._store()
return protected_response
class PlugtestServerProgram(AsyncCLIDaemon):
async def start(self):
logging.root.setLevel(logging.WARNING)
p = argparse.ArgumentParser(description="Server for the OSCOAP plug test. Requires a test number to be present.")
opts = p.parse_args()
self.context = await aiocoap.Context.create_server_context(PlugtestSite())
print("Plugtest server ready.")
sys.stdout.flush() # the unit tests might wait abundantly long for this otherwise
async def shutdown(self):
await self.context.shutdown()
if __name__ == "__main__":
PlugtestServerProgram.sync_main()
| [
"chrysn@fsfe.org"
] | chrysn@fsfe.org | |
f456161888d1aada746c90888e1e578f7e93d9b4 | fefb1e9b0b736da4e49d7754f8d1dbaf37f2fa6a | /.history/6_1_20210201203845.py | ed5f98f17b9eaa43f22ca8a496997543f53e6c0b | [] | no_license | wh-debug/python | 5a78a2227874ebc400d075197de0adab9f55d187 | 1467eeda670f170e6e2d7c0a0550f713f1ee9d75 | refs/heads/master | 2023-03-12T22:08:12.608882 | 2021-02-17T09:49:52 | 2021-02-17T09:49:52 | 334,032,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | '''
Author: Daylight
Date: 2021-02-01 20:18:57
LastEditTime: 2021-02-01 20:38:45
LastEditors: Please set LastEditors
Description: Practice for dictionaries in python
FilePath: \python\6_1.py
'''
#todo 字典(简单的字典)
alien_0 = {'color': 'green', 'point': 5} #! 保存两个键值
alien_1 = {'colors': 'red'} #! 最简单的字典
print(alien_0['color']) #? 输出字典的某个键值的方法
print(alien_0['point'])
#todo 假设你射杀了一个外星人,将返回你取得的分数(访问字典中的值)
new_points = alien_0['point']
print(f"You just earned {new_points} points!\n")
'''
添加键值对(往字典中添加一个键值对):例子,假如要显示外星人在屏幕中的位置,而开始一般在屏幕的
左上方,需要显示x,y的坐标
'''
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
| [
"1813763848@qq.com"
] | 1813763848@qq.com |
ad4694defb2d1595a05f17b2c1b017829fcb623f | 70b339d0b2638a7914d0d56c5edf8a2637c9f4b0 | /Facebook-printMergedTwoBST.py | 88d3a8456edc9ece29bc9e44b1cb5338bfaad7e6 | [] | no_license | pflun/advancedAlgorithms | 9991da7514024e18ba08de8688966b9220e12571 | 5520dbcd26999b98e1229bf03c2f62dd690a2ddc | refs/heads/master | 2023-02-19T12:05:26.902535 | 2023-02-14T06:08:54 | 2023-02-14T06:08:54 | 189,055,701 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | # -*- coding: utf-8 -*-
# https://www.geeksforgeeks.org/merge-two-bsts-with-limited-extra-space/
# Given two Binary Search Trees(BST), print the elements of both BSTs in sorted
# form. The expected time complexity is O(m+n) where m is the number of nodes in
# first tree and n is the number of nodes in second tree. Maximum allowed
# auxiliary space is O(height of the first tree + height of the second tree).
#
# Examples:
#
# First BST
# 3
# / \
# 1 5
# Second BST
# 4
# / \
# 2 6
# Output: 1 2 3 4 5 6
#
#
# First BST
# 8
# / \
# 2 10
# /
# 1
# Second BST
# 5
# /
# 3
# /
# 0
# Output: 0 1 2 3 5 8 10
from sortedArrayToBST import Solution
class Solution1(object):
def __init__(self, root1, root2):
self.res = []
self.stack1 = []
self.stack2 = []
while root1:
self.stack1.append(root1)
root1 = root1.left
while root2:
self.stack2.append(root2)
root2 = root2.left
def merge(self):
curr1 = self.next1()
curr2 = self.next2()
while len(self.stack1) > 0 and len(self.stack2) > 0:
if curr1 <= curr2:
self.res.append(curr1)
curr1 = self.next1()
else:
self.res.append(curr2)
curr2 = self.next2()
while len(self.stack1) > 0:
curr = self.next1()
self.res.append(curr)
while len(self.stack2) > 0:
curr = self.next2()
self.res.append(curr)
return self.res
# 每次pop栈先检查curr有没有右节点,把右节点入栈并且继续弹入右节点的所有左节点
def next1(self):
curr = self.stack1.pop()
tmp = curr.right
while tmp:
self.stack1.append(tmp)
tmp = tmp.left
return curr.val
def next2(self):
curr = self.stack2.pop()
tmp = curr.right
while tmp:
self.stack2.append(tmp)
tmp = tmp.left
return curr.val
testBST = Solution()
root1 = testBST.sortedArrayToBST([0, 1, 4, 5, 6, 8])
root2 = testBST.sortedArrayToBST([2, 3, 5, 7, 9, 10])
test = Solution1(root1, root2)
print test.merge() | [
"zgao@gwu.edu"
] | zgao@gwu.edu |
59549cf9b5090c462054501984f18528b5eeecb8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2563/60771/286481.py | 6440860658ef460e394d3ddb3ec560656c52722f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | #08
s = eval(input())
if s == "1000000000000000000":
print("999999999999999999")
exit(0)
ones = ["1","11","111","1111","11111"]
for i in range(2,11):
for item in ones:
if int(s) == int(item,i):
print(i)
exit(0)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
d22116543afbd061fe73aff46c090483128e53a9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq220.py | ec7bc93bbdf4174fbf1914c1bd69aa6b569cdc19 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.Z.on(input_qubit[3])) # number=7
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=6
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq220.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
11e929075f1dc33ce20b126bede66b9911fa154b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686275109552128_1/Python/LooneyLarry/solution.py | 48f0fd31d05cb88218678b13fa6205aae48076c4 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,246 | py | # Google Code Jam 2015 Qualifying B.
# Moving half of stack isn't optimal, right?
# Start: 9, make 4,5, total cost: 6
# make 3,3,3, total cost: 5
# Right, halving isn't optimal.
# Can't improve on 3.
# 1: 1
# 2: 2
# 3: 3
# 4: 3
# 5: 4
# 6: 1 + C(3) = 4
# 7?: 1 + C(4) = 4<= No, we can't split the 3 and the 4 stack simultaneously.
# 7: split*2 eat*3 = 5
# Is moving off piles of 3 optimal? (Not for 4)
# 8: split*2 eat*3 = 5
# 9: split*2 eat*3 = 5
# 10: split*3 eat*3 = 6 (or split + eat*5)
# 11: split*3 eat*3 = 6
# 12: split*3 eat*3 = 6
# 13: split*4 eat*3 = 7
# n: ceil(n/3) - 1 + 3 = ceil(n/3) + 2 Doesn't work for 4.
# I'm thinking 1, 2, and 4 are special cases and everything else
# follows this, but I'm unsure. There's only a 4 minute penalty for
# failed small input attempts.
# But input is multiple plates, not 1. We can split only one at a time.
# 4 4: split*2 eat*2 = 4
# 6 6: split*2 eat*3 = 5
# sum(ceil(nsubi/3)-1) + 3
# Special case if max is 4 and no 3.
# So, break everything into stacks of 3 (or 2 when 4), then eat
# 3 (or 2 if max was 4 and no 3). Can split one plate at a time
# ignoring all others (except if max stack is 4 or less and no 3).
# Store numToEat -- size of largest stack when done splitting.
# Increase to stacksize for stack of 1 2 or 3, increase to 3 for
# stack > 4, increase to 2 for stack of 4. (But only if only one 4.)
# For each plate, count splits required and adjust numToEat to {0..3}.
# Return number of splits plus numToEat.
#
# No, ceil(n/3) + 2 is wrong for 16. That comes to 8, but split*3 to
# make stacks of 4 followed by eat*4 = 7. Because breaking 16 into
# threes is inefficient.
# what about 18? Rule says 8. Can do that with stacks of 3 or 4 or 6.
# Rule is correct (because multiple of 3).
# What about 20? Rule says 9. Stacks of 4 or 5 cost 8.
# So stacks of sqrt(n) is optimal?
# 100? split*9 eat*10 = 19, rule says 36.
# 101? I suppose split*10 eat*10 = 20. Or split*9 eat*11. Or split*8
# eat*12. Or 7,13. Or 11,9. Or 12,8. But not 13,7. Because 14*7 <
# 101. 7,13 works because 8*13 >= 101. (That's (split+1)*eat.)
# sqrt makes sense since we want to minimize split+eat for maximum
# split*eat.
# So the maximum initial stack determines the target stack size, and
# all other stacks need to be split into that size. Does that work?
# If max is 100 so target stack is 10 but 999 other plates start
# at 11, 10 loses. So this is impossible without something like
# dynamic programming.
# Wait, there are only 1000 plates. I could try all stack sizes up to
# 1000 and see which wins. Maybe calculate cost for sqrt(max()) to
# have an upper bound for pruning. Hey, sqrt(max()) is the minimum
# target stack, we don't need to consider anything smaller. So if a
# stack starts at 1000, we try stack sizes 33ish to 1000. There could
# be 999 stacks of 999, so we can't stop earlier. But if the number
# of remaining stacks larger than i is low we could quit? Nevermind,
# this should be fast enough.
#
# So. Start target stacksize at sqrt(max()) - 1. (Nevermind.)
# Iterate plates counting splits needed to reach target. Cost of
# target is splits plus target (eating). Repeat incrementing target
# up to max.
import sys
# How many splits does it take to convert num pancakes into stacks
# not taller than target?
def countSplits(num, target):
if num <= 1:
return 0
# We do (9,3) by removing 3 twice (making 3 stacks of 3).
return (num - 1) / target
def doCase(file):
file.readline() # Ignore number of plates
plates = map(int, file.readline().split())
bestCost = 1000
for targetStack in range(1, 1001):
cost = targetStack # cost of eating stack after splitting
for plate in plates:
cost += countSplits(plate, targetStack) # cost of splitting
bestCost = min(bestCost, cost)
#print "Target {0}, cost {1}".format(targetStack, cost)
return bestCost
def run():
file = open(sys.argv[1])
numCases = int(file.readline())
for case in range(1, numCases+1):
answer = doCase(file)
print 'Case #{0}: {1}'.format(case, answer)
run()
| [
"root@debian"
] | root@debian |
77c43b89e4d98e184496a139bfe0ee501aac3077 | 4da462f01398e57f07532d09becbcb737278be6b | /tape/disasm/disasm.py | f1c16dedb37c92e200cfe9e509160dcdabef7389 | [
"BSD-2-Clause"
] | permissive | meawoppl/purgatory | d740bc30e547b6d8ef1e00353747ffae6701881f | 58abd57f7bf26457798f1d518c321ee52396fb3d | refs/heads/master | 2021-01-13T01:31:20.943177 | 2015-07-26T21:09:42 | 2015-07-26T21:09:42 | 37,833,544 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | from __future__ import print_function
# test1.py
from capstone import Cs, CS_ARCH_X86, CS_MODE_64, CS_MODE_32
CODE = b"\x8d\x44\x38\x02"
md = Cs(CS_ARCH_X86, CS_MODE_32)
md.detail = True
for i in md.disasm(CODE, 0):
# print(dir(i))
print("0x%x:\t%s\t%s" % (i.address, i.mnemonic, i.op_str))
if len(i.regs_read) > 0:
print("\tImplicit registers read: "),
for r in i.regs_read:
print("%s " % i.reg_name(r)),
print
if len(i.groups) > 0:
print("\tThis instruction belongs to groups:", end="")
for g in i.groups:
print("%u" % g)
# print("%u" % g, end="")
print()
def dumpASM(flo, mode, maxAddr=1e99):
modeRef = {32: CS_MODE_32, 64: CS_MODE_64}
md = Cs(CS_ARCH_X86, modeRef[mode])
md.detail = True
for i in md.disasm(flo, 0):
# print(dir(i))
print("0x%x:\t%s\t%s" % (i.address, i.mnemonic, i.op_str))
print("\tImplicit registers read: ", end="")
for r in i.regs_read:
print("%s " % i.reg_name(r))
print()
print("\tImplicit registers written: ", end="")
for r in i.regs_write:
print("%s " % i.reg_name(r))
print()
if i.address > maxAddr:
break
| [
"meawoppl@gmail.com"
] | meawoppl@gmail.com |
e36693a654a9877c1004ae2e498dcd3df1c01fe5 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=61/sched.py | 0f9be34a7f8ee395e190f23b91fd7672f0ff05c2 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | -X FMLP -Q 0 -L 4 71 250
-X FMLP -Q 0 -L 4 67 200
-X FMLP -Q 1 -L 3 58 300
-X FMLP -Q 1 -L 3 51 300
-X FMLP -Q 2 -L 2 42 300
-X FMLP -Q 3 -L 2 35 200
34 125
32 400
28 150
15 175
9 125
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
5659fac1f5b514ddd3691759dde6533c4cbf6b48 | 4eaab9327d25f851f9e9b2cf4e9687d5e16833f7 | /problems/add_binary/solution.py | af644f463bbed2e7d591ff215c7088ac01b99e58 | [] | no_license | kadhirash/leetcode | 42e372d5e77d7b3281e287189dcc1cd7ba820bc0 | 72aea7d43471e529ee757ff912b0267ca0ce015d | refs/heads/master | 2023-01-21T19:05:15.123012 | 2020-11-28T13:53:11 | 2020-11-28T13:53:11 | 250,115,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | class Solution:
def addBinary(self, a: str, b: str) -> str:
# 0 + 0 --> 0
# 0 + 1 --> 1
# 1 + 0 --> 0
# 1 + 1 --> 0, carry 1
# Loop through a
# Loop through b
# 4 if statements
x,y = int(a,2), int(b,2) # answer, carry, respectively
while y != 0:
answer = x^y # XOR
carry = (x & y) << 1 # AND + bit-shift left
x , y = answer , carry
return bin(x)[2:] | [
"kadhirash@gmail.com"
] | kadhirash@gmail.com |
f0811e61669d74bf1815b34a8639d8024adcf499 | b7fccda9944b25c5c9b5a91253eac24e1c4c9b23 | /tests/settings.py | 4003a9bf4dae22e89994804f2dcb8b53953c8fb0 | [
"BSD-3-Clause"
] | permissive | ouhouhsami/django-geoads | 96f47fd17496a13d611b6ed2462f32dfa81c4401 | bec8f9ce8b8744775aee26b14a884598a599d9af | refs/heads/master | 2021-01-01T16:51:23.715917 | 2013-11-16T23:13:52 | 2013-11-16T23:13:52 | 4,099,272 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,638 | py | # Django settings for localized_classified_ads project.
import os
import sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
GEOADS_ASYNC = False
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = 'tmp/email-messages/'
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = ('admin@geoads.com',)
MANAGERS = ADMINS
DJANGO_MODERATION_MODERATORS = (
'test@example.com',
)
TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner'
# I exclude admin.py files from my coverage
# these files does'nt set anything spectial
COVERAGE_MODULE_EXCLUDES = ['tests$','factories', 'settings$', 'urls$', 'locale$', '__init__', 'django',
'migrations', 'admin']
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'coverage_report')
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geoads_db',
'USER': 'postgres',
}
}
GEOCODE = 'nominatim'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr-FR'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'abc'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'tests.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'django.contrib.flatpages',
'django.contrib.sitemaps',
'django_filters',
'django_rq',
'moderation',
'customads',
'geoads',
'geoads.contrib.moderation',
)
# specific test setting for coverage information
#SOUTH_TESTS_MIGRATE = False
#SKIP_SOUTH_TESTS = True
# for testing purposes, profile page = home/search page
ADS_PROFILE_URL = '/'
# for testing purposes, profile signup page = home/search page
ADS_PROFILE_SIGNUP = '/'
# QUEUE
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
},
}
if DEBUG:
for queueConfig in RQ_QUEUES.itervalues():
queueConfig['ASYNC'] = False
| [
"samuel.goldszmidt@gmail.com"
] | samuel.goldszmidt@gmail.com |
2b01732f00eccf85447635c842aeea87107b29a2 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/dask-0.8.1-py27_0/lib/python2.7/site-packages/dask/diagnostics/progress.py | 42babc30a3fe0b3d4de1caae9af138a8e30b9c1a | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 4,169 | py | from __future__ import absolute_import, division, print_function
import sys
import threading
import time
from timeit import default_timer
from ..callbacks import Callback
from ..utils import ignoring
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)
elif m:
return '{0:2.0f}min {1:4.1f}s'.format(m, s)
else:
return '{0:4.1f}s'.format(s)
class ProgressBar(Callback):
"""A progress bar for dask.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar
dt : float, optional
Update resolution in seconds, default is 0.1 seconds
Examples
--------
Below we create a progress bar with a minimum threshold of 1 second before
displaying. For cheap computations nothing is shown:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_fast_computation.compute()
But for expensive computations a full progress bar is displayed:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_slow_computation.compute()
[########################################] | 100% Completed | 10.4 s
The duration of the last computation is available as an attribute
>>> pbar = ProgressBar()
>>> with pbar: # doctest: +SKIP
... out = some_computation.compute()
[########################################] | 100% Completed | 10.4 s
>>> pbar.last_duration # doctest: +SKIP
10.4
You can also register a progress bar so that it displays for all
computations:
>>> pbar = ProgressBar() # doctest: +SKIP
>>> pbar.register() # doctest: +SKIP
>>> some_slow_computation.compute() # doctest: +SKIP
[########################################] | 100% Completed | 10.4 s
"""
def __init__(self, minimum=0, width=40, dt=0.1):
self._minimum = minimum
self._width = width
self._dt = dt
self.last_duration = 0
def _start(self, dsk):
self._state = None
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.start()
def _pretask(self, key, dsk, state):
self._state = state
sys.stdout.flush()
def _finish(self, dsk, state, errored):
self._running = False
self._timer.join()
elapsed = default_timer() - self._start_time
self.last_duration = elapsed
if elapsed < self._minimum:
return
if not errored:
self._draw_bar(1, elapsed)
else:
self._update_bar(elapsed)
sys.stdout.write('\n')
sys.stdout.flush()
def _timer_func(self):
"""Background thread for updating the progress bar"""
while self._running:
elapsed = default_timer() - self._start_time
if elapsed > self._minimum:
self._update_bar(elapsed)
time.sleep(self._dt)
def _update_bar(self, elapsed):
s = self._state
if not s:
self._draw_bar(0, elapsed)
return
ndone = len(s['finished'])
ntasks = sum(len(s[k]) for k in ['ready', 'waiting', 'running']) + ndone
self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)
def _draw_bar(self, frac, elapsed):
bar = '#' * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = '\r[{0:<{1}}] | {2}% Completed | {3}'.format(bar, self._width,
percent, elapsed)
with ignoring(ValueError):
sys.stdout.write(msg)
sys.stdout.flush()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
69f8539cd9c2a5ff7d1f58e302cc31eda63563b1 | 73b8aba05ee1424f38a8598a9f1305185588075f | /0x0B-python-input_output/9-student.py | 47edce80a94d4ef2c2595c7c27eabbf247e1dbce | [] | no_license | nicolasportela/holbertonschool-higher_level_programming | 0d176c0e56f4f703c1e9a98b430fc6120f22f675 | e1537b81f21118456e5cfa0e4ed89520b232adb6 | refs/heads/master | 2023-04-20T21:30:22.693434 | 2021-05-13T01:47:30 | 2021-05-13T01:47:30 | 319,397,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/python3
"""this module writes a class named Student"""
class Student:
"""class creation"""
def __init__(self, first_name, last_name, age):
"""instantiation"""
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self):
"""retrieves a dictionary representation"""
return self.__dict__
| [
"2103@holbertonschool.com"
] | 2103@holbertonschool.com |
7245421fc62c4bc78c583bb78c878771fd612113 | 2626f6e6803c8c4341d01f57228a0fe117e3680b | /students/alexLaws/lesson06/Calculator/unit-test.py | 4aabc968ccbaa596943a18f7b0d8b90f8d88ec8d | [] | no_license | kmsnyde/SP_Online_Course2_2018 | 9e59362da253cdec558e1c2f39221c174d6216f3 | 7fe8635b47d4792a8575e589797260ad0a2b027e | refs/heads/master | 2020-03-19T17:15:03.945523 | 2018-09-05T22:28:55 | 2018-09-05T22:28:55 | 136,750,231 | 0 | 0 | null | 2018-06-09T19:01:52 | 2018-06-09T19:01:51 | null | UTF-8 | Python | false | false | 2,831 | py | from unittest import TestCase
from unittest.mock import MagicMock
from calculator.adder import Adder
from calculator.subtracter import Subtracter
from calculator.multiplier import Multiplier
from calculator.divider import Divider
from calculator.calculator import Calculator
from calculator.exceptions import InsufficientOperands
class AdderTests(TestCase):
def test_adding(self):
adder = Adder()
for i in range(-10, 10):
for j in range(-10, 10):
self.assertEqual(i + j, adder.calc(i, j))
class SubtracterTests(TestCase):
def test_subtracting(self):
subtracter = Subtracter()
for i in range(-10, 10):
for j in range(-10, 10):
self.assertEqual(i - j, subtracter.calc(i, j))
class MultiplierTests(TestCase):
def test_multiplying(self):
multiplier = Multiplier()
for i in range(-10, 10):
for j in range(-10, 10):
self.assertEqual(i * j, multiplier.calc(i, j))
class DividerTests(TestCase):
def test_dividing(self):
divider = Divider()
for i in range(1, 10):
for j in range(1, 10):
self.assertEqual(i / j, divider.calc(i, j))
class CalculatorTests(TestCase):
def setUp(self):
self.adder = Adder()
self.subtracter = Subtracter()
self.multiplier = Multiplier()
self.divider = Divider()
self.calculator = Calculator(self.adder, self.subtracter, self.multiplier, self.divider)
def test_insufficient_operands(self):
self.calculator.enter_number(0)
with self.assertRaises(InsufficientOperands):
self.calculator.add()
def test_adder_call(self):
self.adder.calc = MagicMock(return_value=0)
self.calculator.enter_number(1)
self.calculator.enter_number(2)
self.calculator.add()
self.adder.calc.assert_called_with(1, 2)
def test_subtracter_call(self):
self.subtracter.calc = MagicMock(return_value=0)
self.calculator.enter_number(1)
self.calculator.enter_number(2)
self.calculator.subtract()
self.subtracter.calc.assert_called_with(1, 2)
def test_multiplier_call(self):
self.multiplier.calc = MagicMock(return_value=0)
self.calculator.enter_number(1)
self.calculator.enter_number(2)
self.calculator.multiply()
self.multiplier.calc.assert_called_with(1, 2)
def test_divider_call(self):
self.divider.calc = MagicMock(return_value=0)
self.calculator.enter_number(1)
self.calculator.enter_number(2)
self.calculator.divide()
self.divider.calc.assert_called_with(1, 2)
| [
"kmsnyder2@verizon.net"
] | kmsnyder2@verizon.net |
e3402ab9f154e153963567c7195d6764d66c8b2b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_6377668744314880_0/Python/hero777/15_3_1s.py | 6a14e7f31677be4fe6912de46c33060887d121b9 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | f = open('a.txt','r')
f0 = open('output.txt','w')
a0 = [int(x) for x in f.readline().split()]
for index in range (0,a0[0]):
x, = [int(x) for x in f.readline().split()]
f0.write('Case #')
f0.write(str(index+1))
f0.write(': ')
f0.write('\n')
x1 = [1]*x
y1 = [1]*x
for index2 in range (0,x):
x1[index2], y1[index2], = [int(q) for q in f.readline().split()]
for index2 in range (0,x):
counter = x-2
for index3 in range (0, x):
counter1 = 0
counter2 = 0
if index3 == index2:
continue
for index4 in range (0, x):
if (index4 == index3 or index4 == index2):
continue
z = long(y1[index3]-y1[index2])*long(x1[index4] - x1[index3]) - long(x1[index3] - x1[index2])*long(y1[index4] - y1[index3])
print(z)
if z < 0:
counter1 = counter1 +1
if z>0:
counter2 = counter2 +1
we = min(counter1,counter2)
counter = min(counter, we)
ans = max(counter,0)
f0.write(str(ans))
f0.write('\n')
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
78136ab0a5146745ae8d5ba53635c748b109beda | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p00002/s705893489.py | 07943c2872474d02f692c0b66ec4f7740cc08380 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | while 1:
try:
a,b = map(int, raw_input().split())
n = a+b
print len(str(n))
except EOFError:
break | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cefd0697efe5cc2ff06a3562f310f1133c9a5da1 | a9e81c87022fdde86d47a4ec1e74791da8aa0e30 | /python-learning/libraries/pyqt5/base/animation/qeasingcurve-demo.py | 13c911e44108f0f823b9d5eab00fd31546f00ef0 | [
"Apache-2.0"
] | permissive | ymli1997/deeplearning-notes | c5c6926431b7efc1c6823d85e3eb470f3c986494 | f2317d80cd998305814f988e5000241797205b63 | refs/heads/master | 2020-07-29T11:15:43.689307 | 2018-05-05T10:58:18 | 2018-05-05T10:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | # -*- coding: utf-8 -*-
'''
属性控件
'''
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
class Form(QMainWindow):
def __init__(self,parent=None):
super().__init__(parent)
centerWidget = QWidget()
layout = QHBoxLayout()
# 添加控件代码
button1 = QPushButton("Animation")
button1.setGeometry(10,10,100,30)
animation = QPropertyAnimation(button1,b"geometry")
animation.setDuration(3000)
animation.setStartValue(QRect(10,10,100,30))
animation.setEndValue(QRect(200,150,100,30))
easingCurve = QEasingCurve(QEasingCurve.InBack)
animation.setEasingCurve(easingCurve)
button2 = QPushButton("Start")
button2.setGeometry(120,10,100,30)
button2.clicked.connect(lambda :animation.start())
layout.addWidget(button1)
layout.addWidget(button2)
centerWidget.setLayout(layout)
self.setCentralWidget(centerWidget)
self.resize(640,480)
self.setWindowTitle("PyQt5-")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Form()
ex.show()
sys.exit(app.exec_()) | [
"kkoolerter@gmail.com"
] | kkoolerter@gmail.com |
508ba6f49b063a1b8f7951ed21108506687749d2 | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/petstore/python-experimental/petstore_api/model/class_model.pyi | c26397416a52ea3ee2cb8504d252698b9b42857c | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 2,550 | pyi | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
class ClassModel(
schemas.AnyTypeSchema,
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Model for testing model with "_class" property
"""
class MetaOapg:
class properties:
_class = schemas.StrSchema
__annotations__ = {
"_class": _class,
}
additional_properties = schemas.AnyTypeSchema
_class: typing.Union[MetaOapg.properties._class, schemas.Unset]
@typing.overload
def __getitem__(self, name: typing.Literal["_class"]) -> typing.Union[MetaOapg.properties._class, schemas.Unset]: ...
@typing.overload
def __getitem__(self, name: str) -> typing.Union[MetaOapg.additional_properties, schemas.Unset]: ...
def __getitem__(self, name: typing.Union[str, typing.Literal["_class"], ]):
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
_class: typing.Union[MetaOapg.properties._class, str, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'ClassModel':
return super().__new__(
cls,
*args,
_class=_class,
_configuration=_configuration,
**kwargs,
)
| [
"noreply@github.com"
] | FallenRiteMonk.noreply@github.com |
b3979984927341a8b493352e08d0e64e426032ae | 6ce856e7693c003a85f0e23a239907a87ecb7c89 | /cms/coltrane/models.py | 1b0f837e8209bb9eb3d22d369e6e0b5301f4886f | [] | no_license | wherby/DJango | 4776f0b92b69124be2d213f640fc12e5409f0ee2 | 2545fe7b908e5ef4f6aef2ecca78da77f4d7ccd0 | refs/heads/master | 2018-12-28T13:45:58.851730 | 2014-06-28T18:40:01 | 2014-06-28T18:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | import datetime
from django.db import models
from django.contrib.auth.models import User
from tagging.fields import TagField
from markdown import markdown
class Category(models.Model):
title = models.CharField(max_length=250,help_text='Maximum 250 characters.')
slug = models.SlugField(unique=True,help_text="Suggested value automatically generated from title. Must be unique")
description = models.TextField()
class Meta:
ordering = ['title']
verbose_name_plural ="Categories"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/categories/%s/" % self.slug
class Entry(models.Model):
LIVE_STATUS = 1
DRAFT_STATUS = 2
HIDDEN_STATUS = 3
STATUS_CHOICES = (
(LIVE_STATUS, 'Live'),
(DRAFT_STATUS, 'Draft'),
(HIDDEN_STATUS, 'Hidden'),
)
title = models.CharField(max_length=250,help_text="Maximum 250 characters.")
excerpt = models.TextField(blank=True,help_text="A short summary of the entry. Optional.")
body = models.TextField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
#MetaData
author = models.ForeignKey(User)
enable_comments = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
slug = models.SlugField(unique_for_date='pub_date',
help_text="Suggested value automatically generated from title. Must be unique.")
status = models.IntegerField(choices=STATUS_CHOICES, default=LIVE_STATUS,
help_text="Only entries with live status will be publicly displayed.")
# Categorization.
categories = models.ManyToManyField(Category)
tags = TagField(help_text="Separate tags with spaces.")
#fields to store gereated HTML
excerpt_html = models.TextField(editable=False, blank=True)
body_html = models.TextField(editable=False, blank=True)
class Meta:
verbose_name_plural = "Entries"
ordering = ['-pub_date']
def __unicode__(self):
return self.title
def save(self, force_insert=False, force_update=False):
self.body_html = markdown(self.body)
if self.excerpt:
self.excerpt_html = markdown(self.excerpt)
super(Entry, self).save(force_insert, force_update)
def get_absolute_url(self):
return "/weblog/%s/%s/" %(self.pub_date.strftime("%Y/%b/%d").lower(), self.slug)
| [
"187225577@qq.com"
] | 187225577@qq.com |
1e29a61d5f946cf138297066af08bcfa6884588c | 5e51625b5885f23a10213b373d46900eefd3312c | /torch_sparse/__init__.py | a4b3a38565aca774eb9ed9c0d1e43047d7d096d2 | [
"MIT"
] | permissive | ducptruong/pytorch_sparse | a84c7c8892dfce8a1c7e0048d1a83bea314bd1c1 | 46dac04fd29a9585ed67afe28eaa4c667eabf9e3 | refs/heads/master | 2023-02-19T06:17:50.252560 | 2021-01-19T11:21:52 | 2021-01-19T11:21:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,173 | py | import importlib
import os.path as osp
import torch
__version__ = '0.6.8'
for library in [
'_version', '_convert', '_diag', '_spmm', '_spspmm', '_metis', '_rw',
'_saint', '_sample', '_relabel'
]:
torch.ops.load_library(importlib.machinery.PathFinder().find_spec(
library, [osp.dirname(__file__)]).origin)
if torch.cuda.is_available() and torch.version.cuda: # pragma: no cover
cuda_version = torch.ops.torch_sparse.cuda_version()
if cuda_version == -1:
major = minor = 0
elif cuda_version < 10000:
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')]
if t_major != major:
raise RuntimeError(
f'Detected that PyTorch and torch_sparse were compiled with '
f'different CUDA versions. PyTorch has CUDA version '
f'{t_major}.{t_minor} and torch_sparse has CUDA version '
f'{major}.{minor}. Please reinstall the torch_sparse that '
f'matches your PyTorch install.')
from .storage import SparseStorage # noqa
from .tensor import SparseTensor # noqa
from .transpose import t # noqa
from .narrow import narrow, __narrow_diag__ # noqa
from .select import select # noqa
from .index_select import index_select, index_select_nnz # noqa
from .masked_select import masked_select, masked_select_nnz # noqa
from .permute import permute # noqa
from .diag import remove_diag, set_diag, fill_diag, get_diag # noqa
from .add import add, add_, add_nnz, add_nnz_ # noqa
from .mul import mul, mul_, mul_nnz, mul_nnz_ # noqa
from .reduce import sum, mean, min, max # noqa
from .matmul import matmul # noqa
from .cat import cat # noqa
from .rw import random_walk # noqa
from .metis import partition # noqa
from .bandwidth import reverse_cuthill_mckee # noqa
from .saint import saint_subgraph # noqa
from .padding import padded_index, padded_index_select # noqa
from .sample import sample, sample_adj # noqa
from .convert import to_torch_sparse, from_torch_sparse # noqa
from .convert import to_scipy, from_scipy # noqa
from .coalesce import coalesce # noqa
from .transpose import transpose # noqa
from .eye import eye # noqa
from .spmm import spmm # noqa
from .spspmm import spspmm # noqa
__all__ = [
'SparseStorage',
'SparseTensor',
't',
'narrow',
'__narrow_diag__',
'select',
'index_select',
'index_select_nnz',
'masked_select',
'masked_select_nnz',
'permute',
'remove_diag',
'set_diag',
'fill_diag',
'get_diag',
'add',
'add_',
'add_nnz',
'add_nnz_',
'mul',
'mul_',
'mul_nnz',
'mul_nnz_',
'sum',
'mean',
'min',
'max',
'matmul',
'cat',
'random_walk',
'partition',
'reverse_cuthill_mckee',
'saint_subgraph',
'padded_index',
'padded_index_select',
'to_torch_sparse',
'from_torch_sparse',
'to_scipy',
'from_scipy',
'coalesce',
'transpose',
'eye',
'spmm',
'spspmm',
'__version__',
]
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
3c6d3a7309e480edacabe02dab04a8023d9c7a66 | 2979f5687b5d34b4885f41062b9b901eee217771 | /meiduo_mall/wc.py | 0dd52ff1ef1cfb39d00f4f75909f501865f38886 | [] | no_license | PierreCastor18/meiduo_mall | d9aa15fa4ec0957f079763a7eb7d2bea5c6aa765 | 8de99e6d232f24cdc8be947ccda8ed536597ec94 | refs/heads/master | 2020-04-17T03:08:30.405499 | 2019-02-25T07:27:11 | 2019-02-25T07:27:11 | 166,168,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # _*_ coding: utf-8 _*_
__author__ = '其实很简单'
__date__ = '19-1-29 下午8:53'
# 类装饰器
class logger(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
print('[INFO]: the function {func}() is running...'.format(func=self.func.__name__))
return self.func(*args, **kwargs)
@logger #调用装饰器
def say(something):
print('say {}!'.format(something))
say('hello')
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
8ac7a93c8efec3f919c47f1252986d33592d05af | a8769709aeb7299fa3757f0e7bba5c617eb8cfe3 | /lesson-3/k8s/lib/python2.7/site-packages/kubernetes/client/models/v1_volume_node_affinity.py | 1679a08b6ffd499a1bbd967aeb3fa5ae2910d3e8 | [
"Apache-2.0"
] | permissive | simox-83/workshop-k8s | 2ac5e8b282bb7c3337acc726a7d972717bf649cc | 04cb18e8b5925a3cfd84ca316952a6cb64960b31 | refs/heads/master | 2020-03-31T20:52:21.421995 | 2018-10-11T14:43:08 | 2018-10-11T14:43:08 | 152,558,678 | 0 | 0 | Apache-2.0 | 2018-10-11T08:37:20 | 2018-10-11T08:37:20 | null | UTF-8 | Python | false | false | 3,247 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1VolumeNodeAffinity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'required': 'V1NodeSelector'
}
attribute_map = {
'required': 'required'
}
def __init__(self, required=None):
"""
V1VolumeNodeAffinity - a model defined in Swagger
"""
self._required = None
self.discriminator = None
if required is not None:
self.required = required
@property
def required(self):
"""
Gets the required of this V1VolumeNodeAffinity.
Required specifies hard node constraints that must be met.
:return: The required of this V1VolumeNodeAffinity.
:rtype: V1NodeSelector
"""
return self._required
@required.setter
def required(self, required):
"""
Sets the required of this V1VolumeNodeAffinity.
Required specifies hard node constraints that must be met.
:param required: The required of this V1VolumeNodeAffinity.
:type: V1NodeSelector
"""
self._required = required
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1VolumeNodeAffinity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"simone.dandreta@concur.com"
] | simone.dandreta@concur.com |
44924e9a5021295d2b49049fca70b2b8f8d2d91a | a0529a92f73c951bacfe69cc058c53394a9685d4 | /bingads/v12/bulk/entities/ad_extensions/bulk_action_ad_extensions.py | 2f884206ed01e4fa360a57370ec5cf9ec8ceac4a | [
"MIT"
] | permissive | joseftf/BingAds-Python-SDK | 06eda7d23e3141c9fcaee39a3424cf8317d472ed | 205ebf9bdd9701d5d05c5f9ac59702083754f553 | refs/heads/master | 2020-04-29T16:41:06.392989 | 2019-02-17T23:16:17 | 2019-02-17T23:16:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,825 | py | from bingads.v12.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v12.internal.bulk.string_table import _StringTable
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V12
from .common import _BulkAdExtensionBase
from .common import _BulkAdGroupAdExtensionAssociation
from .common import _BulkCampaignAdExtensionAssociation
from .common import _BulkAccountAdExtensionAssociation
from bingads.v12.internal.extensions import *
_ActionAdExtension = type(_CAMPAIGN_OBJECT_FACTORY_V12.create('ActionAdExtension'))
class BulkActionAdExtension(_BulkAdExtensionBase):
""" Represents a action ad extension.
This class exposes the :attr:`action_ad_extension` property that can be read and written
as fields of the Action Ad Extension record in a bulk file.
For more information, see Action Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, account_id=None, ad_extension=None):
if ad_extension and not isinstance(ad_extension, _ActionAdExtension):
raise ValueError('The type of ad_extension is: {0}, should be: {1}'.format(
type(ad_extension),
'ActionAdExtension'
))
super(BulkActionAdExtension, self).__init__(
account_id=account_id,
ad_extension=ad_extension
)
@property
def action_ad_extension(self):
""" The action ad extension.
see Action Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad_extension
@action_ad_extension.setter
def action_ad_extension(self, value):
self._ad_extension = value
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.ActionType,
field_to_csv=lambda c: bulk_str(c.action_ad_extension.ActionType),
csv_to_field=lambda c, v: setattr(c.action_ad_extension, 'ActionType', v)
),
_SimpleBulkMapping(
header=_StringTable.FinalUrl,
field_to_csv=lambda c: field_to_csv_Urls(c.action_ad_extension.FinalUrls),
csv_to_field=lambda c, v: csv_to_field_Urls(c.action_ad_extension.FinalUrls, v)
),
_SimpleBulkMapping(
header=_StringTable.FinalMobileUrl,
field_to_csv=lambda c: field_to_csv_Urls(c.action_ad_extension.FinalMobileUrls),
csv_to_field=lambda c, v: csv_to_field_Urls(c.action_ad_extension.FinalMobileUrls, v)
),
_SimpleBulkMapping(
header=_StringTable.TrackingTemplate,
field_to_csv=lambda c: bulk_optional_str(c.action_ad_extension.TrackingUrlTemplate),
csv_to_field=lambda c, v: setattr(c.action_ad_extension, 'TrackingUrlTemplate', v if v else '')
),
_SimpleBulkMapping(
header=_StringTable.Language,
field_to_csv=lambda c: bulk_optional_str(c.action_ad_extension.Language),
csv_to_field=lambda c, v: setattr(c.action_ad_extension, 'Language', v if v else '')
),
_SimpleBulkMapping(
header=_StringTable.CustomParameter,
field_to_csv=lambda c: field_to_csv_UrlCustomParameters(c.action_ad_extension),
csv_to_field=lambda c, v: csv_to_field_UrlCustomParameters(c.action_ad_extension, v)
)
]
def process_mappings_from_row_values(self, row_values):
self.action_ad_extension = _CAMPAIGN_OBJECT_FACTORY_V12.create('ActionAdExtension')
self.action_ad_extension.Type = 'ActionAdExtension'
super(BulkActionAdExtension, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkActionAdExtension._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.action_ad_extension, 'action_ad_extension')
super(BulkActionAdExtension, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkActionAdExtension._MAPPINGS)
class BulkAccountActionAdExtension(_BulkAccountAdExtensionAssociation):
""" Represents an account level action ad extension.
This class exposes properties that can be read and written
as fields of the Account Action Ad Extension record in a bulk file.
For more information, see Account Action Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
class BulkCampaignActionAdExtension(_BulkCampaignAdExtensionAssociation):
""" Represents a campaign level action ad extension.
This class exposes properties that can be read and written
as fields of the Campaign Action Ad Extension record in a bulk file.
For more information, see Campaign Action Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
class BulkAdGroupActionAdExtension(_BulkAdGroupAdExtensionAssociation):
""" Represents an ad group level Action ad extension.
This class exposes properties that can be read and written
as fields of the Ad Group Action Ad Extension record in a bulk file.
For more information, see Ad Group Action Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
| [
"qitia@microsoft.com"
] | qitia@microsoft.com |
33be986bba891a0a1751bb25973dd5564f906a7a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2850/60691/295113.py | 2dc388fb247c3a9fc4edd3c03a2977e028026556 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | def countone(l):
count = 0
for i in range(len(l)):
if l[i] == 1:
count += 1
return count
def convert(l, start, k):
for i in range(start, start+k):
if l[i] == 0:
l[i] = 1
else:
l[i] = 0
return countone(l)
def reverse(s, k):
num = []
for i in range(len(s)-k+1):
temp = []
for m in range(len(s)):
temp.append(int(s[m]))
num.append(convert(temp, i, k))
return max(num)
def countzero(l):
count = 0
for i in range(len(l)):
if l[i] == '1':
count += 1
return count
n = int(input())
s = input().split(' ')
l = []
for i in range(len(s)):
l.append(s[i])
if countzero(l) == 0:
print(len(l))
else:
nums = []
for i in range(len(l)):
nums.append(reverse(''.join(l), i))
print(max(nums))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
6938bb062f1e075b98f9af371123bfd4137ec234 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_06_01_preview/aio/_configuration.py | da6ab06618bea61e3ca4a9404926ddd3eacad877 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 3,336 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class KeyVaultManagementClientConfiguration(Configuration):
"""Configuration for KeyVaultManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(KeyVaultManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-06-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-keyvault/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.