blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cc35b3bea6679945d06dbe918ac491d9c50c9aa | e38c0acac7033db5ff4fcb8843bf48fe77379392 | /desktop/libs/liboozie/setup.py | 78b885c5c1a7cf7d571b097327a11205f0257086 | [
"Apache-2.0"
] | permissive | dulems/hue | 05cdced778afa1b979b2f98bc28226c3c9b2ed0f | 3931cfd5c1dba02165cccf732cc4e6cc326a6a70 | refs/heads/master | 2021-01-04T14:10:32.477934 | 2015-07-27T03:23:44 | 2015-07-27T03:23:44 | 37,165,512 | 0 | 0 | null | 2015-06-15T23:30:55 | 2015-06-10T00:06:52 | Python | UTF-8 | Python | false | false | 1,262 | py | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name = "liboozie",
version = "3.8.1",
url = 'http://github.com/cloudera/hue',
description = "Oozie Libraries",
packages = find_packages('src'),
package_dir = {'': 'src' },
install_requires = ['setuptools', 'desktop'],
# Even libraries need to be registered as desktop_apps,
# if they have configuration, like this one.
entry_points = { 'desktop.sdk.lib': 'liboozie=liboozie' },
)
| [
"romain@cloudera.com"
] | romain@cloudera.com |
f4c9a708bac850f83274e6b93266d0c857a07a67 | 1fe56144905244643dbbab69819720bc16031657 | /.history/books/admin_20210422174025.py | 974b41eccc123608d1d211f8a7c120fc32c54b59 | [] | no_license | RaghdaMadiane/django | 2052fcdd532f9678fefb034bd60e44f466bd9759 | 6ca3f87f0b72880f071d90968f0a63ea5badcca8 | refs/heads/master | 2023-04-15T17:28:25.939823 | 2021-04-24T22:33:21 | 2021-04-24T22:33:21 | 361,279,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | from django.contrib import admin
from .models import Book ,Category ,Tag,Isbn
from .forms import BookForm
from django import forms
from django.core.exceptions import ValidationError
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields='__all__'
def clean_title(self):
title=self.cleaned_data.get("title")
titleLength=len(title)
if titleLength<10:
raise ValidationError("title should be more than 10 chars!")
if titleLength>20:
raise ValidationError("title should be less than 20 chars!")
return title
def clean_category(self):
category=self.cleaned_data.get("category")
catLength=len(category)
if catLength<2:
raise ValidationError("category name length should be more than 2 chars!")
return category
class BookAdmin(admin.ModelAdmin):
form=BookForm
list_filter=("categories",)
class BookInLine(admin.StackedInline):
model=Book
max_num =3
extra = 1
class TagAdmin(admin.ModelAdmin):
inlines=[BookInLine]
admin.site.register(Book,BookAdmin)
admin.site.register(Category)
admin.site.register(Isbn)
admin.site.register(Tag,TagAdmin)
| [
"raghdamadiane@gmail.com"
] | raghdamadiane@gmail.com |
f62b38f7fe3ecd7a5044a7c77e8f64f638e862eb | 34e0865fb4915390e77336e81b2c87ec2bf52df6 | /tweet/admin.py | 18778cdb4a53c4545a3921863969b68e8c08ff99 | [] | no_license | HiPiH/local | 3702be6b140fe879188e9623ede27adfc1ce8765 | 6c3bd2c0818c780977c2081ab72906f0166625dd | refs/heads/master | 2021-01-25T04:50:29.096944 | 2011-12-24T08:21:39 | 2011-12-24T08:21:39 | 3,026,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | # -*- coding: utf-8 -*-
__author__ = 'Aleksey.Novgorodov'
from django.contrib.admin import site,ModelAdmin
from models import TweetWords,TweetLang
site.register(TweetLang)
site.register(TweetWords) | [
"admin@nvk.su"
] | admin@nvk.su |
572ac80b5a8fb2a0418cfa931e3dc7d560f7d0d6 | 127b460b1d540e6f8c3aa90dfc04e8abf84a97ff | /parler/tests/testapp/models.py | 0aa2c9f279bbb4b37ce1077dcc646bfacb091b25 | [
"Apache-2.0"
] | permissive | philippeowagner/django-parler | c10634a863088708644cbe4592bac635553e44a0 | db0ff40760759f15620954994a2e2f8584733de4 | refs/heads/master | 2021-01-09T05:31:38.302665 | 2013-09-25T13:22:07 | 2013-09-25T13:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from django.db import models
from parler.fields import TranslatedField
from parler.models import TranslatableModel, TranslatedFields, TranslatedFieldsModel
class ManualModel(TranslatableModel):
shared = models.CharField(max_length=200, default='')
class ManualModelTranslations(TranslatedFieldsModel):
master = models.ForeignKey(ManualModel, related_name='translations')
tr_title = models.CharField(max_length=200)
class SimpleModel(TranslatableModel):
shared = models.CharField(max_length=200, default='')
translations = TranslatedFields(
tr_title = models.CharField(max_length=200)
)
def __unicode__(self):
return self.tr_title
class AnyLanguageModel(TranslatableModel):
shared = models.CharField(max_length=200, default='')
tr_title = TranslatedField(any_language=True)
translations = TranslatedFields(
tr_title = models.CharField(max_length=200)
)
def __unicode__(self):
return self.tr_title
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
06c37b6866986287f6f5442dcec9d3ecc5a2d997 | 988c3065d6c475c0a90765b2245de54624325e22 | /venv/bin/pylint | 298cec919a098ab672eeb5f932e9ea3b6880dc57 | [
"MIT"
] | permissive | brayomumo/Instagram | c7e3ec061e4503320f3e1ed78096d032044f2435 | cbe932eac29afda480a0cbdea71a9f1eda01845c | refs/heads/master | 2021-09-09T13:20:07.682824 | 2019-09-02T09:06:47 | 2019-09-02T09:06:47 | 205,555,185 | 0 | 0 | null | 2021-09-08T01:15:00 | 2019-08-31T14:31:57 | Python | UTF-8 | Python | false | false | 263 | #!/home/smoucha/Desktop/projects/instagram/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"brayomumo5@gmail.com"
] | brayomumo5@gmail.com | |
b938e93bf47ff375e76e067089ecc3cb9b52a355 | 44702094bd2f512cc672ade27e39ca29755f89ac | /projects/chatbot/simple-bot.py | 6b33b0a67a0e789eb8e6fdca5a01e34e990489fd | [
"Python-2.0",
"MIT"
] | permissive | AIMLCamp/python-mini-projects | 20a1c15faa645aa4bd9a24efb3cee3da555201f5 | aaa166bb190d06b2264afc548a29c998a6664a3e | refs/heads/master | 2023-09-02T04:52:00.311157 | 2021-11-22T02:19:12 | 2021-11-22T02:19:12 | 407,899,754 | 0 | 0 | MIT | 2021-11-22T02:19:12 | 2021-09-18T15:38:25 | null | UTF-8 | Python | false | false | 268 | py | import asyncio
from wechaty import Wechaty, Message
async def on_message(msg: Message):
if msg.text() == 'ding':
await msg.say('dong')
async def main():
bot = Wechaty()
bot.on('message', on_message)
await bot.start()
asyncio.run(main())
| [
"1435130236@qq.com"
] | 1435130236@qq.com |
7b1a9539da50bb87e753173c18e83da533c2e5d9 | 46c76c7ca1d9d030606f2e3e95a2a9e6bbad2789 | /args2.py | b2b54870182508e085c37f588246b43d14263d83 | [] | no_license | KayMutale/pythoncourse | be9ff713cffc73c1b9b3c1dd2bdd6d293637ce1e | 985a747ff17133aa533b7a049f83b37fc0fed80e | refs/heads/master | 2023-04-13T07:58:00.993724 | 2021-04-16T14:19:41 | 2021-04-16T14:19:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py |
#!/usr/bin/env python3
import sys
if len(sys.argv) < 2:
arg = input("enter stop|start|restart: ")
else:
arg = sys.argv[1]
out = {
"start" : "starting",
"stop" : "stopping",
"restart" : "stopping\nstarting"
}
print out.get(arg,"usage: stops|start|restart")
| [
"mark@ledge.co.za"
] | mark@ledge.co.za |
dc182a07b673bb672258ea59918ef4ebc350823c | 2a5c0c49319989a24f9c9f18530f109bc48a8df1 | /CursesEnded/SecondYear/PythonAnaconda(BigData)/exam/pycharm/1.py | 34bc5d217826a141d75c77fb83c323b4654f861a | [] | no_license | Kamil-IT/Studies | 0ada6dd92d7ecdbd0a3164c7c80080dd715ce8fc | d70343b2b7818ce303d816443bb15d21e03b42e0 | refs/heads/master | 2022-12-22T01:55:35.047783 | 2022-10-20T18:55:29 | 2022-10-20T18:55:29 | 217,039,987 | 0 | 1 | null | 2022-12-10T06:03:55 | 2019-10-23T11:24:33 | Jupyter Notebook | UTF-8 | Python | false | false | 3,418 | py | from matplotlib import pyplot as plt
from numpy import random
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
def generate_data(quantity, cech_quantity, class_quantity):
result_to_learn = []
class_to_learn = []
for j in range(class_quantity):
for i in range(quantity // class_quantity):
row = random.rand(cech_quantity) + j - 0.5 * j
result_to_learn.append(row)
class_to_learn.append(j)
return result_to_learn, class_to_learn
def generate_data_to_plot_data(generate_data, generated_class):
result = [[] for i in set(generated_class)]
for i in range(len(generate_data)):
result[generated_class[i]].append(generate_data[i])
return result
def plot_generated_data(generate_data, generated_class):
colors = ['r+', 'b+', 'm+', 'y+', 'c+', 'g+']
unique_class = set(generated_class)
colors = [colors[i] for i in range(len(unique_class))]
data_to_plot = generate_data_to_plot_data(generate_data, generated_class)
if len(generate_data[0]) == 2:
fig = plt.figure()
fig.add_subplot(111)
def plot_figure(data, color, label):
plt.plot([i[0] for i in data], [i[1] for i in data], color, label=label)
for i in range(len(colors)):
plot_figure(data_to_plot[i], colors[i], f'class {i}')
plt.legend()
plt.show()
else:
fig = plt.figure()
fig.add_subplot(111, projection='3d')
def plot_figure(data, color, label):
plt.plot([i[0] for i in data], [i[1] for i in data], [i[2] for i in data], color, label=label)
for i in range(len(colors)):
plot_figure(data_to_plot[i], colors[i], f'class {i}')
plt.legend()
plt.show(block=False)
def tp(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 1 and classes[i] == 1:
score += 1
return score
def fp(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 1 and classes[i] == 0:
score += 1
return score
def fn(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 0 and classes[i] == 1:
score += 1
return score
def tn(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 0 and classes[i] == 0:
score += 1
return score
data, classes = generate_data(200, 2, 2)
train_data, test_data, train_class, test_class = train_test_split(data, classes, test_size=0.3)
plot_generated_data(train_data, train_class)
plot_generated_data(test_data, test_class)
clf = SVC()
clf.fit(train_data, train_class)
# Prediction
predicted = clf.predict(test_data)
tn_val = tn(predicted, test_class)
tp_val = tp(predicted, test_class)
fp_val = fp(predicted, test_class)
fn_val = fn(predicted, test_class)
dokladnosc = (tp_val / tn_val) / (tp_val + tn_val + fp_val + fn_val)
precyzja = tp_val / (tp_val + fp_val)
specyficznosc = tn_val / (tn_val + fp_val)
print("Bazowałem na klasyfikatorze SVC")
print("Dokładność " + str(dokladnosc))
print("precyzja " + str(precyzja))
print("specyficznosc " + str(specyficznosc))
fig = plt.figure()
fig.add_subplot(111)
plt.bar(['Dokładność', 'precyzja', 'specyficznosc'], [dokladnosc, precyzja, specyficznosc])
plt.show() | [
"kkwolny@vp.pl"
] | kkwolny@vp.pl |
c3bb33f234a16ed5e919d2aafe6a6b045faef2da | 9080e6e53da365b0d811099e2e992041cf4b5b47 | /0x05-python-exceptions/100-safe_print_integer_err.py | b2fcf082bb0a97016d8da2c794ed362c5a96455f | [] | no_license | benjamesian/holbertonschool-higher_level_programming | 213ad8c39d1fc2ee81843124a46914be166445d3 | 99f00414833757e3b156c148927a858ce38baa0e | refs/heads/master | 2020-07-23T00:52:46.858544 | 2020-02-11T22:03:24 | 2020-02-11T22:03:24 | 207,389,880 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | #!/usr/bin/python3
import sys
def safe_print_integer_err(value):
try:
print("{:d}".format(value))
return True
except Exception as inst:
sys.stderr.write("Exception: {}\n".format(inst))
return False
| [
"808@holbertonschool.com"
] | 808@holbertonschool.com |
f224136fc4dfb21113fa0f2acbc8d724fe512e68 | d61d0498f1dde41ec07878f1ef5da039c8351cff | /examples/DeepQNetwork/DQN.py | 6000f95fa4f600fcf01c625f56f8f83659f700ec | [
"Apache-2.0"
] | permissive | Johnson-yue/tensorpack | ca9a4e0de9d7292f696d634122d65eaa6f34d26d | a0601fb76df87f567e456ba97f3e51aa85ec50e0 | refs/heads/master | 2021-05-07T03:42:56.643626 | 2017-11-14T15:14:47 | 2017-11-14T15:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,906 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DQN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import argparse
import cv2
import tensorflow as tf
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from DQNModel import Model as DQNModel
from common import Evaluator, eval_model_multithread, play_n_episodes
from atari_wrapper import FrameStack, MapState, FireResetEnv
from expreplay import ExpReplay
from atari import AtariPlayer
BATCH_SIZE = 64
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
ACTION_REPEAT = 4 # aka FRAME_SKIP
UPDATE_FREQ = 4
GAMMA = 0.99
MEMORY_SIZE = 1e6
# will consume at least 1e6 * 84 * 84 bytes == 6.6G memory.
INIT_MEMORY_SIZE = MEMORY_SIZE // 20
STEPS_PER_EPOCH = 10000 // UPDATE_FREQ * 10 # each epoch is 100k played frames
EVAL_EPISODE = 50
NUM_ACTIONS = None
ROM_FILE = None
METHOD = None
def get_player(viz=False, train=False):
env = AtariPlayer(ROM_FILE, frame_skip=ACTION_REPEAT, viz=viz,
live_lost_as_eoe=train, max_num_frames=30000)
env = FireResetEnv(env)
env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE))
if not train:
# in training, history is taken care of in expreplay buffer
env = FrameStack(env, FRAME_HISTORY)
return env
class Model(DQNModel):
def __init__(self):
super(Model, self).__init__(IMAGE_SIZE, FRAME_HISTORY, METHOD, NUM_ACTIONS, GAMMA)
def _get_DQN_prediction(self, image):
""" image: [0,255]"""
image = image / 255.0
with argscope(Conv2D, nl=PReLU.symbolic_function, use_bias=True), \
argscope(LeakyReLU, alpha=0.01):
l = (LinearWrap(image)
# Nature architecture
.Conv2D('conv0', out_channel=32, kernel_shape=8, stride=4)
.Conv2D('conv1', out_channel=64, kernel_shape=4, stride=2)
.Conv2D('conv2', out_channel=64, kernel_shape=3)
# architecture used for the figure in the README, slower but takes fewer iterations to converge
# .Conv2D('conv0', out_channel=32, kernel_shape=5)
# .MaxPooling('pool0', 2)
# .Conv2D('conv1', out_channel=32, kernel_shape=5)
# .MaxPooling('pool1', 2)
# .Conv2D('conv2', out_channel=64, kernel_shape=4)
# .MaxPooling('pool2', 2)
# .Conv2D('conv3', out_channel=64, kernel_shape=3)
.FullyConnected('fc0', 512, nl=LeakyReLU)())
if self.method != 'Dueling':
Q = FullyConnected('fct', l, self.num_actions, nl=tf.identity)
else:
# Dueling DQN
V = FullyConnected('fctV', l, 1, nl=tf.identity)
As = FullyConnected('fctA', l, self.num_actions, nl=tf.identity)
Q = tf.add(As, V - tf.reduce_mean(As, 1, keep_dims=True))
return tf.identity(Q, name='Qvalue')
def get_config():
expreplay = ExpReplay(
predictor_io_names=(['state'], ['Qvalue']),
player=get_player(train=True),
state_shape=IMAGE_SIZE,
batch_size=BATCH_SIZE,
memory_size=MEMORY_SIZE,
init_memory_size=INIT_MEMORY_SIZE,
init_exploration=1.0,
update_frequency=UPDATE_FREQ,
history_len=FRAME_HISTORY
)
return TrainConfig(
data=QueueInput(expreplay),
model=Model(),
callbacks=[
ModelSaver(),
PeriodicTrigger(
RunOp(DQNModel.update_target_param, verbose=True),
every_k_steps=10000 // UPDATE_FREQ), # update target network every 10k steps
expreplay,
ScheduledHyperParamSetter('learning_rate',
[(60, 4e-4), (100, 2e-4)]),
ScheduledHyperParamSetter(
ObjAttrParam(expreplay, 'exploration'),
[(0, 1), (10, 0.1), (320, 0.01)], # 1->0.1 in the first million steps
interp='linear'),
PeriodicTrigger(Evaluator(
EVAL_EPISODE, ['state'], ['Qvalue'], get_player),
every_k_epochs=10),
HumanHyperParamSetter('learning_rate'),
],
steps_per_epoch=STEPS_PER_EPOCH,
max_epoch=1000,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--task', help='task to perform',
choices=['play', 'eval', 'train'], default='train')
parser.add_argument('--rom', help='atari rom', required=True)
parser.add_argument('--algo', help='algorithm',
choices=['DQN', 'Double', 'Dueling'], default='Double')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
ROM_FILE = args.rom
METHOD = args.algo
# set num_actions
NUM_ACTIONS = AtariPlayer(ROM_FILE).action_space.n
logger.info("ROM: {}, Num Actions: {}".format(ROM_FILE, NUM_ACTIONS))
if args.task != 'train':
assert args.load is not None
pred = OfflinePredictor(PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['state'],
output_names=['Qvalue']))
if args.task == 'play':
play_n_episodes(get_player(viz=0.01), pred, 100)
elif args.task == 'eval':
eval_model_multithread(pred, EVAL_EPISODE, get_player)
else:
logger.set_logger_dir(
os.path.join('train_log', 'DQN-{}'.format(
os.path.basename(ROM_FILE).split('.')[0])))
config = get_config()
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(config, SimpleTrainer())
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
ff374d319bbe1f8b52441278e5fbb540de3bd79d | 3e799066e12dbcbbce073ce9d74fc3ae43683686 | /pytablereader/__version__.py | e01e0f951c4a397fce0e5ae2b326b104aa026250 | [
"MIT"
] | permissive | nneophyt/pytablereader | 07f51f47552256211abb7c58badaa1e4c6ec3e28 | b158cf6749ce95a4602c68b1268c4e5465464760 | refs/heads/master | 2020-09-09T01:16:34.405526 | 2019-05-11T12:34:37 | 2019-05-11T12:34:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.26.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| [
"tsuyoshi.hombashi@gmail.com"
] | tsuyoshi.hombashi@gmail.com |
f050dd43c5dae9b97542bc5a13d49bf1739a90f4 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /03_Linear_Algebra_for_Machine_Learning/07/04_orthogonal_matrix.py | 7ddc8461a7f485fbbf2d7feca3783fe63b74ce9a | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # orthogonal matrix
from numpy import array
from numpy.linalg import inv
# define orthogonal matrix
Q = array([
[1, 0],
[0, -1]])
print(Q)
# inverse equivalence
V = inv(Q)
print(Q.T)
print(V)
# identity equivalence
I = Q.dot(Q.T)
print(I)
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
897e63f4f998a043ef122953c3868e09cc9ac7ca | ecd25c36474ecf404a32f2f0096b5a6898e4c396 | /python_stack/django/django_fullstack/login_registration/login_registration_app/migrations/0002_user_confirm.py | 1e0c5732eeff98c6bf46805332c3db2d7d436d26 | [] | no_license | matthew-le/Coding_Dojo_Bootcamp | cd7b4aa8e231db372da05a0a5444114b07fbfabf | 6d433d5305d2d8f4ea485206895d8f84bedeb59d | refs/heads/main | 2023-06-13T23:05:23.827556 | 2021-07-23T23:56:35 | 2021-07-23T23:56:35 | 388,947,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Generated by Django 2.2 on 2021-07-13 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_registration_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='confirm',
field=models.CharField(max_length=255, null='TRUE'),
preserve_default='TRUE',
),
]
| [
"you@example.com"
] | you@example.com |
a3c9f655f733b4654a8313f92f1838bd5fc36704 | 72e11a80587342b3f278d4df18406cd4ce7531e8 | /msrestazure/tools.py | 3f9a3209e80bcfab44f133f86b647e30171e3669 | [] | no_license | EnjoyLifeFund/Debian_py36_packages | 740666f290cef73a4f634558ccf3fd4926addeda | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | refs/heads/master | 2021-08-24T02:17:24.349195 | 2017-12-06T06:18:35 | 2017-12-06T06:18:35 | 113,167,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,653 | py | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import json
import re
import logging
import time
import uuid
_LOGGER = logging.getLogger(__name__)
_ARMID_RE = re.compile(
'/subscriptions/(?P<subscription>[^/]*)(/resource[gG]roups/(?P<resource_group>[^/]*))?'
'/providers/(?P<namespace>[^/]*)/(?P<type>[^/]*)/(?P<name>[^/]*)(?P<children>.*)')
_CHILDREN_RE = re.compile('(/providers/(?P<child_namespace>[^/]*))?/'
'(?P<child_type>[^/]*)/(?P<child_name>[^/]*)')
def register_rp_hook(r, *args, **kwargs):
"""This is a requests hook to register RP automatically.
See requests documentation for details of the signature of this function.
http://docs.python-requests.org/en/master/user/advanced/#event-hooks
"""
if r.status_code == 409 and 'msrest' in kwargs:
rp_name = _check_rp_not_registered_err(r)
if rp_name:
session = kwargs['msrest']['session']
url_prefix = _extract_subscription_url(r.request.url)
if not _register_rp(session, url_prefix, rp_name):
return
req = r.request
# Change the 'x-ms-client-request-id' otherwise the Azure endpoint
# just returns the same 409 payload without looking at the actual query
if 'x-ms-client-request-id' in req.headers:
req.headers['x-ms-client-request-id'] = str(uuid.uuid1())
return session.send(req)
def _check_rp_not_registered_err(response):
try:
response = json.loads(response.content.decode())
if response['error']['code'] == 'MissingSubscriptionRegistration':
match = re.match(r".*'(.*)'", response['error']['message'])
return match.group(1)
except Exception: # pylint: disable=broad-except
pass
return None
def _extract_subscription_url(url):
"""Extract the first part of the URL, just after subscription:
https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/
"""
match = re.match(r".*/subscriptions/[a-f0-9-]+/", url, re.IGNORECASE)
if not match:
raise ValueError("Unable to extract subscription ID from URL")
return match.group(0)
def _register_rp(session, url_prefix, rp_name):
"""Synchronously register the RP is paremeter.
Return False if we have a reason to believe this didn't work
"""
post_url = "{}providers/{}/register?api-version=2016-02-01".format(url_prefix, rp_name)
get_url = "{}providers/{}?api-version=2016-02-01".format(url_prefix, rp_name)
_LOGGER.warning("Resource provider '%s' used by this operation is not "
"registered. We are registering for you.", rp_name)
post_response = session.post(post_url)
if post_response.status_code != 200:
_LOGGER.warning("Registration failed. Please register manually.")
return False
while True:
time.sleep(10)
rp_info = session.get(get_url).json()
if rp_info['registrationState'] == 'Registered':
_LOGGER.warning("Registration succeeded.")
return True
def parse_resource_id(rid):
"""Parses a resource_id into its various parts.
Returns a dictionary with a single key-value pair, 'name': rid, if invalid resource id.
:param rid: The resource id being parsed
:type rid: str
:returns: A dictionary with with following key/value pairs (if found):
- subscription: Subscription id
- resource_group: Name of resource group
- namespace: Namespace for the resource provider (i.e. Microsoft.Compute)
- type: Type of the root resource (i.e. virtualMachines)
- name: Name of the root resource
- child_namespace_{level}: Namespace for the child resoure of that level
- child_type_{level}: Type of the child resource of that level
- child_name_{level}: Name of the child resource of that level
- resource_parent: Computed parent in the following pattern: providers/{namespace}\
/{parent}/{type}/{name}
- resource_namespace: Same as namespace. Note that this may be different than the \
target resource's namespace.
- resource_type: Type of the target resource (not the parent)
- resource_name: Name of the target resource (not the parent)
:rtype: dict
"""
if not rid:
return {}
match = _ARMID_RE.match(rid)
if match:
result = match.groupdict()
children = _CHILDREN_RE.finditer(result["children"])
count = None
for count, child in enumerate(children):
result.update({
key + '_%d' % (count + 1): group for key, group in child.groupdict().items()})
result["last_child_num"] = count + 1 if isinstance(count, int) else None
result = _populate_alternate_kwargs(result)
else:
result = dict(name=rid)
return {key: value for key, value in result.items() if value is not None}
def _populate_alternate_kwargs(kwargs):
""" Translates the parsed arguments into a format used by generic ARM commands
such as the resource and lock commands.
"""
resource_namespace = kwargs['namespace']
resource_type = kwargs.get('child_type_{}'.format(kwargs['last_child_num'])) or kwargs['type']
resource_name = kwargs.get('child_name_{}'.format(kwargs['last_child_num'])) or kwargs['name']
_get_parents_from_parts(kwargs)
kwargs['resource_namespace'] = resource_namespace
kwargs['resource_type'] = resource_type
kwargs['resource_name'] = resource_name
return kwargs
def _get_parents_from_parts(kwargs):
""" Get the parents given all the children parameters.
"""
parent_builder = []
if kwargs['last_child_num'] is not None:
parent_builder.append('{type}/{name}/'.format(**kwargs))
for index in range(1, kwargs['last_child_num']):
child_namespace = kwargs.get('child_namespace_{}'.format(index))
if child_namespace is not None:
parent_builder.append('providers/{}/'.format(child_namespace))
kwargs['child_parent_{}'.format(index)] = ''.join(parent_builder)
parent_builder.append(
'{{child_type_{0}}}/{{child_name_{0}}}/'
.format(index).format(**kwargs))
child_namespace = kwargs.get('child_namespace_{}'.format(kwargs['last_child_num']))
if child_namespace is not None:
parent_builder.append('providers/{}/'.format(child_namespace))
kwargs['child_parent_{}'.format(kwargs['last_child_num'])] = ''.join(parent_builder)
kwargs['resource_parent'] = ''.join(parent_builder)
return kwargs
def resource_id(**kwargs):
"""Create a valid resource id string from the given parts.
This method builds the resource id from the left until the next required id parameter
to be appended is not found. It then returns the built up id.
:param dict kwargs: The keyword arguments that will make up the id.
The method accepts the following keyword arguments:
- subscription (required): Subscription id
- resource_group: Name of resource group
- namespace: Namespace for the resource provider (i.e. Microsoft.Compute)
- type: Type of the resource (i.e. virtualMachines)
- name: Name of the resource (or parent if child_name is also \
specified)
- child_namespace_{level}: Namespace for the child resoure of that level (optional)
- child_type_{level}: Type of the child resource of that level
- child_name_{level}: Name of the child resource of that level
:returns: A resource id built from the given arguments.
:rtype: str
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
rid_builder = ['/subscriptions/{subscription}'.format(**kwargs)]
try:
try:
rid_builder.append('resourceGroups/{resource_group}'.format(**kwargs))
except KeyError:
pass
rid_builder.append('providers/{namespace}'.format(**kwargs))
rid_builder.append('{type}/{name}'.format(**kwargs))
count = 1
while True:
try:
rid_builder.append('providers/{{child_namespace_{}}}'
.format(count).format(**kwargs))
except KeyError:
pass
rid_builder.append('{{child_type_{0}}}/{{child_name_{0}}}'
.format(count).format(**kwargs))
count += 1
except KeyError:
pass
return '/'.join(rid_builder)
def is_valid_resource_id(rid, exception_type=None):
"""Validates the given resource id.
:param rid: The resource id being validated.
:type rid: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the id is valid.
:rtype: bool
"""
is_valid = False
try:
is_valid = rid and resource_id(**parse_resource_id(rid)).lower() == rid.lower()
except KeyError:
pass
if not is_valid and exception_type:
raise exception_type()
return is_valid
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
8166e63dfbe2a305ad01e95cf19ee7a4183d8ec8 | cdd150baa9a03daead7112f5eaab8f49f1e362c4 | /hyperas_skipthoughts.py | 808dea3a4621c911df93976d6e9f67306573f041 | [] | no_license | shayezkarim/personality_detection | 3e7bc26c2e3030514f75fd2eb8925f5b094eece9 | 4e6fb55fc1f20587c7bad8018c36057d971cc5c9 | refs/heads/master | 2021-06-02T21:37:44.299032 | 2016-04-04T11:50:03 | 2016-04-04T11:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | __author__ = 'Dimitris'
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from pprint import pprint
def keras_model():
from keras.models import Sequential
from keras.layers.core import Dense
from keras.regularizers import l2, activity_l2
from aiding_funcs.embeddings_handling import get_the_folds, join_folds
from aiding_funcs.label_handling import MaxMin, MaxMinFit
import pickle
print('loading test.p')
test = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/test.p", "rb" ) )
print('loading train.p')
train = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/train.p", "rb" ) )
no_of_folds = 10
folds = get_the_folds(train,no_of_folds)
train_data = join_folds(folds,folds.keys()[:-1])
validation_data = folds[folds.keys()[-1]]
mins, maxs = MaxMin(train_data['labels'])
T_l = MaxMinFit(train_data['labels'], mins, maxs)
t_l = MaxMinFit(validation_data['labels'], mins, maxs)
Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
out_dim = 5
activity_l2_0 = {{uniform(0, 1)}}
activity_l2_1 = {{uniform(0, 1)}}
activity_l2_2 = {{uniform(0, 1)}}
l2_0 = {{uniform(0, 1)}}
l2_1 = {{uniform(0, 1)}}
l2_2 = {{uniform(0, 1)}}
model = Sequential()
model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2(l2_0),activity_regularizer=activity_l2(activity_l2_0),input_dim = train_data['skipthoughts'].shape[-1] ))
model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2(l2_1),activity_regularizer=activity_l2(activity_l2_1)))
model.add(Dense(out_dim, activation='linear',W_regularizer=l2(l2_2),activity_regularizer=activity_l2(activity_l2_2)))
model.compile(loss='rmse', optimizer=opt)
#model.fit(train_data['skipthoughts'], train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
#score = model.evaluate( train_data['skipthoughts'], train_data['labels'])
model.fit(train_data['skipthoughts'], T_l, nb_epoch=500, show_accuracy=False, verbose=2)
score = model.evaluate( train_data['skipthoughts'], T_l)
print("score : " +str(score))
return {'loss': score, 'status': STATUS_OK}
if __name__ == '__main__':
best_run = optim.minimize(keras_model, algo=tpe.suggest, max_evals=2000, trials=Trials())
pprint(best_run)
'''
{'Dense_size': 3, 200
'Dense_size2': 5, 300
'activity_l2_0': 0.05188918775936191,
'activity_l2_1': 0.45047635433513034,
'activity_l2_2': 0.0005117368813977515,
'l2_0': 0.8718331552337388,
'l2_1': 0.5807575417209597,
'l2_2': 0.48965647861094225,
'opt': 5} 'adam'
''' | [
"dvpappas89@gmail.com"
] | dvpappas89@gmail.com |
343766aa0e9faba33b9e0181f38968f53cf96a5e | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenPublicLifeLabelDeleteModel.py | b8a8e95bad811ee04d68d3c7a776e80cfeb2813d | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 899 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenPublicLifeLabelDeleteModel(object):
def __init__(self):
self._label_id = None
@property
def label_id(self):
return self._label_id
@label_id.setter
def label_id(self, value):
self._label_id = value
def to_alipay_dict(self):
params = dict()
if self.label_id:
if hasattr(self.label_id, 'to_alipay_dict'):
params['label_id'] = self.label_id.to_alipay_dict()
else:
params['label_id'] = self.label_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicLifeLabelDeleteModel()
if 'label_id' in d:
o.label_id = d['label_id']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
48a93f055fff5c07977f8c95c1b0ff1694abb04e | d6018cbcbf01b72bb420309ddf0fa38c85b01de6 | /fulledits_ratio2_combine.py | 5ec6929e38ca73d46be7511f7a27894d360ce2f6 | [] | no_license | maxwshen/lib-analysis | daf57019ea6727719212b4fbe5741fe4d52adb5a | 6234a17883268b31ac71dabd509bc70183c4f99b | refs/heads/master | 2022-03-14T00:04:02.154164 | 2019-11-21T19:32:13 | 2019-11-21T19:32:13 | 175,507,042 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess, pickle
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + 'fulledits_ratio2/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
import _data
nts = list('ACGT')
nt_to_idx = {nts[s]: s for s in range(len(nts))}
treat_control_df = pd.read_csv(_config.DATA_DIR + 'treatment_control_design.csv', index_col = 0)
##
# Main
##
@util.time_dec
def main():
print(NAME)
import glob
mdf = pd.DataFrame()
fns = glob.glob(inp_dir + '*bootstrap*')
timer = util.Timer(total = len(fns))
for fn in fns:
cond = fn.split('/')[-1].replace('_bootstrap.csv', '')
df = pd.read_csv(fn, index_col = 0)
df['Condition'] = cond
mdf = mdf.append(df, ignore_index = True)
timer.update()
mdf.to_csv(out_dir + '_combined_gmean_bootstrap.csv')
# Not bootstrap
mdf = pd.DataFrame()
fns = [fn for fn in os.listdir(inp_dir) if 'bootstrap' not in fn]
timer = util.Timer(total = len(fns))
for fn in fns:
df = pd.read_csv(inp_dir + fn)
cond = fn.replace('.csv', '')
df['Condition'] = cond
n = len(df)
df['Regression weight'] = 1 / n
mdf = mdf.append(df, ignore_index = True)
timer.update()
mdf.to_csv(out_dir + '_all_ratios.csv')
return
if __name__ == '__main__':
main() | [
"maxwshen@gmail.com"
] | maxwshen@gmail.com |
231ba5d2ac5db3833dad59496e76c2080e56b2c5 | 2967f6fe104b2ed9cd3f02b855b36a4dced9edc8 | /src/zope/server/tests/test_dualmodechannel.py | f51e7de9f0a694588b8c7dc4368b154edaddd1f4 | [
"ZPL-2.1"
] | permissive | cjwatson/zope.server | 935ff4ab8e5d65decc1c5d95b23305d57684166f | 9c40c8a1ae57d28f1e0fa21e740826befefc30d5 | refs/heads/master | 2023-05-04T15:33:24.972288 | 2019-07-11T12:03:41 | 2019-07-11T12:03:41 | 260,460,082 | 0 | 0 | NOASSERTION | 2020-05-01T13:00:40 | 2020-05-01T13:00:39 | null | UTF-8 | Python | false | false | 3,432 | py | # -*- coding: utf-8 -*-
"""
Tests for dualmodechannel.py.
"""
import unittest
from zope.server.dualmodechannel import DualModeChannel
class TestDualModeChannel(unittest.TestCase):
def test_handle_write_non_async(self):
channel = DualModeChannel(None, None)
channel.set_sync()
# Does nothing, no side effects
channel.handle_write()
def test_handle_read_non_async(self):
channel = DualModeChannel(None, None)
channel.set_sync()
# Does nothing, no side effects
channel.handle_read()
def test_handle_read_will_close(self):
channel = DualModeChannel(None, None)
channel.close_when_done()
# Does nothing, no side effects
channel.handle_read()
def test_handle_write_flush_error(self):
import socket
class C(DualModeChannel):
error_called = False
def __init__(self):
DualModeChannel.__init__(self, None, None)
def _flush_some(self):
raise socket.error()
def handle_error(self):
self.error_called = True
channel = C()
channel.outbuf.append(b'data')
channel.handle_write()
self.assertTrue(channel.error_called)
def test_handle_read_recv_error(self):
import socket
class C(DualModeChannel):
error_called = False
def __init__(self):
DualModeChannel.__init__(self, None, None)
def recv(self, _count):
raise socket.error()
def handle_error(self):
self.error_called = True
channel = C()
channel.handle_read()
self.assertTrue(channel.error_called)
def test_write_flushes(self):
class C(DualModeChannel):
flush_called = False
def _flush_some(self):
self.flush_called = True
return False
class A(object):
send_bytes = 1
outbuf_overflow = 100
channel = C(None, None, A())
channel.write(b'some bytes')
self.assertTrue(channel.flush_called)
def test_channels_accept_iterables(self):
# Channels accept iterables (they special-case strings).
from zope.server.tests.test_serverbase import FakeSocket
socket = FakeSocket()
channel = DualModeChannel(socket, ('localhost', 42))
written = channel.write(b"First")
self.assertEqual(5, written)
channel.flush()
self.assertEqual(socket.data.decode('ascii'),
'First')
written = channel.write([b"\n", b"Second", b"\n", b"Third"])
self.assertEqual(13, written)
channel.flush()
self.assertEqual(socket.data.decode('ascii'),
"First\n"
"Second\n"
"Third")
def count():
yield b'\n1\n2\n3\n'
yield b'I love to count. Ha ha ha.'
written = channel.write(count())
self.assertEqual(written, 33)
channel.flush()
self.assertEqual(socket.data.decode('ascii'),
"First\n"
"Second\n"
"Third\n"
"1\n"
"2\n"
"3\n"
"I love to count. Ha ha ha.")
| [
"jamadden@gmail.com"
] | jamadden@gmail.com |
b7ebcf3e574b150fdfafd4aa4c3e105ec366084d | d80f3cbc26695590895b46c3049e40437a289f70 | /dataset_evaluate/mxnet_evaluate_lib/lib/dataset/pycocotools/coco.py | 07123656c856e89177a4204d1dc3765e271d165c | [] | no_license | unsky/unsky_tools | 704d308e56c7bdde68999e0f64636122418b6a13 | 413bd0f45c9e03dc6c72db7b4bf5b2b540fc45ad | refs/heads/master | 2021-01-01T16:22:07.102959 | 2017-09-05T02:37:29 | 2017-09-05T02:37:29 | 97,814,180 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,008 | py | __author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
#import matplotlib.pyplot as plt
#from matplotlib.collections import PatchCollection
#from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]).clip(max=h-1), \
np.array(s[0:N:2]).clip(max=w-1)) # (y, x)
M[rr, cc] = 1
return M
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask.frPyObjects(segm, h, w)
rle = mask.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = mask.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = mask.decode(rle)
return m
| [
"2081264@qq.com"
] | 2081264@qq.com |
7182a4e9f4a9770d1dc95f0bb348efded753d61c | 00f3f33b977e87d23c2158b906402ccb3cc4c42e | /venv/lightomatic-env/bin/pyreverse | 9a144a67653b5fed84c2f5f16d8a8f21450d76f3 | [] | no_license | cosmicRover/lightoMatic | 9591499ca82e5fa5afad4d008307c6187e671aca | 9d5b1b4a61c735fd2331e23e834a8f05b961c97f | refs/heads/master | 2020-05-31T19:27:08.456380 | 2019-06-07T03:26:21 | 2019-06-07T03:26:21 | 190,455,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/home/joy/pythondevelopments/lightoMatic/venv/lightomatic-env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"jpaul3250@gmail.com"
] | jpaul3250@gmail.com | |
5698e5a876e761689f9c520384b1f7e6870ac36f | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/weapon/shared_pistol_blaster_dl44.py | a31cf79b61d98013892eb7aa11c560b2892efa92 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 455 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/weapon/shared_pistol_blaster_dl44.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
a7287d052f82a938605f40dd7adeb10780d563db | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_tries.py | af8c17ef049137b1628b04c5efd273459fedff18 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py |
from xai.brain.wordbase.verbs._try import _TRY
#calss header
class _TRIES(_TRY, ):
def __init__(self,):
_TRY.__init__(self)
self.name = "TRIES"
self.specie = 'verbs'
self.basic = "try"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cc05aa4ae344528373cbfecd81a1db58068e7630 | 54d17336ca03801bd9c9ef37be8642b332ab71c4 | /osm/SO/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py | b7759435f71e74e0e62c6d2f40532748e11cad31 | [] | no_license | dennis-me/Pishahang | 2428379c4f7d3ee85df4b85727ce92e8fe69957a | cdd0abe80a76d533d08a51c7970d8ded06624b7d | refs/heads/master | 2020-09-07T12:35:54.734782 | 2020-01-24T20:11:33 | 2020-01-24T20:11:33 | 220,782,212 | 2 | 0 | null | 2019-11-10T11:46:44 | 2019-11-10T11:46:43 | null | UTF-8 | Python | false | false | 51 | py | from .rwmonparam import MonitoringParameterTasklet
| [
"github@OrangeOnBlack.de"
] | github@OrangeOnBlack.de |
ec55cd3ac6ec5285cd52aceae1ef4ae1e62ffc2d | c4a119311ac01bbe7d5ab81b1d3d663ad0900ab6 | /python-build/python-libs/xmpppy/setup.py | 1e145617a61190a81d79af3c4ed04773925b3d8d | [
"Apache-2.0",
"GPL-3.0-only"
] | permissive | kuri65536/python-for-android | 1d8d99e81e64bc87805c2c58ee0dcf43d413e72e | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | refs/heads/master | 2021-06-02T01:17:29.685199 | 2018-05-05T00:12:13 | 2018-05-05T01:36:22 | 32,235,625 | 280 | 122 | Apache-2.0 | 2020-05-15T06:47:36 | 2015-03-14T22:44:36 | Python | UTF-8 | Python | false | false | 1,478 | py | #!/usr/bin/python
# -*- coding: koi8-r -*-
from distutils.core import setup,sys
from setuptools import setup
import os
if sys.version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
# Set proper release version in source code also!!!
setup(name='xmpppy',
version='0.5.0rc1',
author='Alexey Nezhdanov',
author_email='snakeru@users.sourceforge.net',
url='http://xmpppy.sourceforge.net/',
description='XMPP-IM-compliant library for jabber instant messenging.',
long_description="""This library provides functionality for writing xmpp-compliant
clients, servers and/or components/transports.
It was initially designed as a \"rework\" of the jabberpy library but
has become a separate product.
Unlike jabberpy it is distributed under the terms of GPL.""",
download_url='http://sourceforge.net/project/showfiles.php?group_id=97081&package_id=103821',
packages=['xmpp'],
license="GPL",
platforms="All",
keywords=['jabber','xmpp'],
classifiers = [
'Topic :: Communications :: Chat',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
],
)
| [
"manuel@aircable.net"
] | manuel@aircable.net |
f2a5dbbfad5429b613f7fbd0482e5f5d3441bdb3 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/aliyun/aliyun-openapi-python-sdk/aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/CreateLoadBalancerUDPListenerRequest.py | 0374135d7ac8fdca7269fcdf3feba84679d20533 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 3,984 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateLoadBalancerUDPListenerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'CreateLoadBalancerUDPListener')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_LoadBalancerId(self):
return self.get_query_params().get('LoadBalancerId')
def set_LoadBalancerId(self,LoadBalancerId):
self.add_query_param('LoadBalancerId',LoadBalancerId)
def get_ListenerPort(self):
return self.get_query_params().get('ListenerPort')
def set_ListenerPort(self,ListenerPort):
self.add_query_param('ListenerPort',ListenerPort)
def get_BackendServerPort(self):
return self.get_query_params().get('BackendServerPort')
def set_BackendServerPort(self,BackendServerPort):
self.add_query_param('BackendServerPort',BackendServerPort)
def get_Bandwidth(self):
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self,Bandwidth):
self.add_query_param('Bandwidth',Bandwidth)
def get_Scheduler(self):
return self.get_query_params().get('Scheduler')
def set_Scheduler(self,Scheduler):
self.add_query_param('Scheduler',Scheduler)
def get_PersistenceTimeout(self):
return self.get_query_params().get('PersistenceTimeout')
def set_PersistenceTimeout(self,PersistenceTimeout):
self.add_query_param('PersistenceTimeout',PersistenceTimeout)
def get_HealthyThreshold(self):
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self,HealthyThreshold):
self.add_query_param('HealthyThreshold',HealthyThreshold)
def get_UnhealthyThreshold(self):
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self,UnhealthyThreshold):
self.add_query_param('UnhealthyThreshold',UnhealthyThreshold)
def get_HealthCheckConnectTimeout(self):
return self.get_query_params().get('HealthCheckConnectTimeout')
def set_HealthCheckConnectTimeout(self,HealthCheckConnectTimeout):
self.add_query_param('HealthCheckConnectTimeout',HealthCheckConnectTimeout)
def get_HealthCheckConnectPort(self):
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self,HealthCheckConnectPort):
self.add_query_param('HealthCheckConnectPort',HealthCheckConnectPort)
def get_healthCheckInterval(self):
return self.get_query_params().get('healthCheckInterval')
def set_healthCheckInterval(self,healthCheckInterval):
self.add_query_param('healthCheckInterval',healthCheckInterval)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount) | [
"rares.begu@gmail.com"
] | rares.begu@gmail.com |
08874ff08accc44a3c0a0e0a92d886914cee7c0c | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appplatform/azure-mgmt-appplatform/generated_samples/api_portals_validate_domain.py | f8f3c175988334e6b10e93453d559364436bf476 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,688 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.appplatform import AppPlatformManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-appplatform
# USAGE
python api_portals_validate_domain.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AppPlatformManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.api_portals.validate_domain(
resource_group_name="myResourceGroup",
service_name="myservice",
api_portal_name="default",
validate_payload={"name": "mydomain.io"},
)
print(response)
# x-ms-original-file: specification/appplatform/resource-manager/Microsoft.AppPlatform/stable/2022-12-01/examples/ApiPortals_ValidateDomain.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e3fade9d6427a0fd859bce2452a69b4b456812e4 | 93ab050518092de3a433b03744d09b0b49b541a6 | /iniciante/Mundo 03/Exercícios Corrigidos/Exercício 086.py | 025478d7b517f7bbf55da3cbbcb5478b7fce08ae | [
"MIT"
] | permissive | ggsant/pyladies | 1e5df8772fe772f8f7d0d254070383b9b9f09ec6 | 37e11e0c9dc2fa2263ed5b42df5a395169408766 | refs/heads/master | 2023-01-02T11:49:44.836957 | 2020-11-01T18:36:43 | 2020-11-01T18:36:43 | 306,947,105 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | """
EXERCÍCIO 086: Matriz em Python
Crie um programa que crie uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado.
0 [_][_][_]
1 [_][_][_]
2 [_][_][_]
0 1 2
No final, mostre a matriz na tela, com a formatação correta.
"""
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para [{l}, {c}]: '))
print('-=' * 30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
print()
| [
"61892998+ggsant@users.noreply.github.com"
] | 61892998+ggsant@users.noreply.github.com |
5d5bc7602ec344e62021ba7cb378c8157ab4bec7 | b8ef3a93cb9fa4d60a785e7f7b5adafc907e6f65 | /ruman/cron/manipulate_model/calculate.py | a15427d01093edd20c307abcaa9e2c424d4f7b34 | [] | no_license | lvleilei/screen | 46289b8a80850ac4db8a08b58eb4fc3fd65a0e01 | 13d4f9ac91a5e610f68525ff2a2306372810b681 | refs/heads/master | 2020-03-19T13:05:55.207809 | 2018-06-08T03:37:22 | 2018-06-08T03:37:22 | 136,560,545 | 0 | 0 | null | 2018-06-08T03:17:20 | 2018-06-08T03:17:20 | null | UTF-8 | Python | false | false | 20,090 | py | #-*-coding: utf-8-*-
import sys
reload(sys)
sys.path.append("../../")
import pandas as pd
import math
import numpy as np
from config import *
from time_utils import *
from sql_utils import *
from elasticsearch import Elasticsearch
#from createframe import es_search
#from create import table1,table2
def what_quarter(theday):
year = int(str(theday).split('-')[0])
month = int(str(theday).split('-')[1])
if month in [1,2,3]:
return '%d-07-01' % (year - 1),'%d-04-01' % (year - 1),'%d-01-01' % (year)
elif month in [4,5,6]:
return '%d-10-01' % (year - 1),'%d-07-01' % (year - 1),'%d-04-01' % (year)
elif month in [7,8,9]:
return '%d-01-01' % (year),'%d-10-01' % (year - 1),'%d-07-01' % (year)
else:
return '%d-04-01' % (year),'%d-01-01' % (year),'%d-10-01' % (year)
class one2six_frame: #生成历史各个数据框的类
def __init__(self,tablename,datelist,datelistlong):
self.tablename = tablename
#self.data_frame = pd.read_json(es_search(tablename)[1])
readframe = pd.read_json('dataframe/' + tablename + '.json')
readframe = readframe.sort_index(axis=1) #因为json读取问题而必须重新按列排序一下
self.data_frame = readframe
self.datelist = [pd.Timestamp(int(date.split('-')[0]),int(date.split('-')[1]),int(date.split('-')[2])) for date in datelist]
self.datelistlong = [pd.Timestamp(int(date.split('-')[0]),int(date.split('-')[1]),int(date.split('-')[2])) for date in datelistlong]
#if tablename == 'Investment_announcement':
#self.data_frame.to_csv(r'/home/lfz/python/invest.csv',encoding='utf_8_sig')
#print self.data_frame
def other_towhatday(self,num): #除收益率换手率其他的都是计算频率
framewhatday = pd.DataFrame(columns=self.data_frame.columns)
if num == 1: #如果要一天的数据直接返回原数据框
return self.data_frame.loc[self.datelist]
else:
for datenum in range(len(self.datelist)):
sumlist = []
for count in range(num):
if self.datelistlong[self.datelistlong.index(self.datelist[datenum]) - count] in self.data_frame.index:
sumlist.append(self.data_frame.loc[self.datelistlong[self.datelistlong.index(self.datelist[datenum]) - count]])
else:
break
framewhatday.loc[self.datelist[datenum]] = list(sum(sumlist)) #获取上述n列的和,切记转化为列表,否则因为读取的json的数据框股票代码是整数,会导致匹配错误
return framewhatday
def market_towhatday(self,num): #计算对数收益率或换手率增长率
framewhatday = pd.DataFrame(columns=self.data_frame.columns)
for datenum in range(len(self.datelist)):
#datestr = date.strftime('%Y-%m-%d')
if self.datelistlong[self.datelistlong.index(self.datelist[datenum]) - num] not in self.data_frame.index:
framewhatday.loc[self.datelist[datenum]] = None
else:
a = self.data_frame.loc[self.datelist[datenum]]
b = self.data_frame.loc[self.datelistlong[self.datelistlong.index(self.datelist[datenum]) - num]]
#print a,b
if self.tablename == MARKET_PRICE_FU:
framewhatday.loc[self.datelist[datenum]] = list(pd.Series([math.log(i) for i in a]) - pd.Series([math.log(i) for i in b])) #输出对数收益率
else:
framewhatday.loc[self.datelist[datenum]] = list(a / b - 1) #输出换手率增长率
#print framewhatday
return framewhatday
def simu(self): #私募不需要进行天数分别,只计算相比于上一季度的新增数(约为60个交易日)
framewhatday = pd.DataFrame(columns=self.data_frame.columns)
for datenum in range(len(self.datelist)):
if self.datelistlong[self.datelistlong.index(self.datelist[datenum]) - 60] not in self.data_frame.index:
framewhatday.loc[self.datelist[datenum]] = 0
else:
framewhatday.loc[self.datelist[datenum]] = list(self.data_frame.loc[self.datelist[datenum]] - self.data_frame.loc[self.datelistlong[self.datelistlong.index(self.datelist[datenum]) - 60]])
return framewhatday
def jiejin_quarter(self): #利用解禁数据的时间序列合成解禁数据
framewhatday1 = pd.DataFrame(columns=self.data_frame.columns)
framewhatday2 = pd.DataFrame(columns=self.data_frame.columns)
framewhatday3 = pd.DataFrame(columns=self.data_frame.columns)
netprofit = pd.read_json('dataframe/' + NETPROFIT_NETPROFIT + '.json')
netprofit = netprofit.sort_index(axis=1)
holder_top10pct = pd.read_json('dataframe/' + ES_HOLDERS_PCT_HOLDER_TOP10PCT + '.json')
holder_top10pct = holder_top10pct.sort_index(axis=1)
holder_pctbyinst = pd.read_json('dataframe/' + ES_HOLDERS_PCT_HOLDER_PCTBYINST + '.json')
holder_pctbyinst = holder_pctbyinst.sort_index(axis=1)
for date in self.datelist:
quarterday = what_quarter(str(date).split()[0])
if quarterday[1] in netprofit.index:
framewhatday1.loc[date] = (netprofit.loc[quarterday[0]] / netprofit.loc[quarterday[1]] - 1)
else:
framewhatday1.loc[date] = None
if quarterday[2] in holder_top10pct.index:
framewhatday2.loc[date] = holder_top10pct.loc[quarterday[2]] / 100
else:
framewhatday2.loc[date] = 0
if quarterday[2] in holder_pctbyinst.index:
framewhatday3.loc[date] = holder_pctbyinst.loc[quarterday[2]] / 100
else:
framewhatday3.loc[date] = 0
framewhatday1 = framewhatday1.fillna(0) #净利润为空的直接设为0
return {JIEJIN_DATE:self.data_frame,NETPROFIT_NETPROFIT:framewhatday1,ES_HOLDERS_PCT_HOLDER_TOP10PCT:framewhatday2,ES_HOLDERS_PCT_HOLDER_PCTBYINST:framewhatday3}
'''
def towhatday(self,num): #针对不同的数据框调用不同函数
if self.tablename == 'market_daily':
market_towhatday(self,num)
elif self.tablename == 'simu':
simu(self)
else:
other_towhatday(self,num)'''
class one2six_frame_theday: #生成历史各个数据框的类
def __init__(self,tablename,theday):
self.tablename = tablename
#readframe = pd.read_json(es_search(tablename)[1])
readframe = pd.read_json('dataframe/' + tablename + '.json')
readframe = readframe.sort_index(axis=1)
indexlist = []
self.theday = theday
for index in readframe.index:
indexlist.append(str(index).split()[0])
try:
self.data_frame = readframe.loc[readframe.index[indexlist.index(theday) - 250:indexlist.index(theday)+1]] #通过查找对应日期选取出该日期前251交易日的记录
except:
raise IndexError
#if tablename == 'Investment_announcement':
#self.data_frame.to_csv(r'/home/lfz/python/invest.csv',encoding='utf_8_sig')
#print self.data_frame
def other_towhatday(self,num): #除收益率换手率其他的都是计算频率
framewhatday = pd.DataFrame(columns=self.data_frame.columns)
if num == 1: #如果要一天的数据直接返回原数据框
return self.data_frame
else:
datenum = 250
sumlist = []
for count in range(num):
sumlist.append(self.data_frame.loc[self.data_frame.index[datenum - count]])
framewhatday.loc[self.data_frame.index[datenum]] = list(sum(sumlist)) #获取上述n列的和,切记转化为列表,否则因为读取的json的数据框股票代码是整数,会导致匹配错误
return framewhatday
def market_towhatday(self,num): #计算对数收益率
framewhatday = pd.DataFrame(columns=self.data_frame.columns)
datenum = 250
#datestr = date.strftime('%Y-%m-%d')
a = self.data_frame.loc[self.data_frame.index[datenum]]
b = self.data_frame.loc[self.data_frame.index[datenum - num]]
if self.tablename == MARKET_PRICE_FU:
framewhatday.loc[self.data_frame.index[datenum]] = list(pd.Series([math.log(i) for i in a]) - pd.Series([math.log(i) for i in b])) #输出对数收益率
else:
l = list(a / b - 1)
'''
for i in range(len(l)):
if l[i] == float('nan'):
l[i] =0'''
framewhatday.loc[self.data_frame.index[datenum]] = l #输出换手率增长率(如果这一天没有就会导致之前都没有,需询问)
#print framewhatday
return framewhatday
def simu(self): #私募不需要进行天数分别,只计算相比于上一季度的新增数(约为60个交易日)
framewhatday = pd.DataFrame(columns=self.data_frame.columns)
datenum = 250
framewhatday.loc[self.data_frame.index[datenum]] = list(self.data_frame.loc[self.data_frame.index[datenum]] - self.data_frame.loc[self.data_frame.index[datenum - 60]])
return framewhatday
def jiejin_quarter(self): #利用解禁数据的时间序列合成解禁数据
framewhatday1 = pd.DataFrame(columns=self.data_frame.columns)
framewhatday2 = pd.DataFrame(columns=self.data_frame.columns)
framewhatday3 = pd.DataFrame(columns=self.data_frame.columns)
netprofit = pd.read_json('dataframe/' + NETPROFIT_NETPROFIT + '.json')
netprofit = netprofit.sort_index(axis=1)
holder_top10pct = pd.read_json('dataframe/' + ES_HOLDERS_PCT_HOLDER_TOP10PCT + '.json')
holder_top10pct = holder_top10pct.sort_index(axis=1)
holder_pctbyinst = pd.read_json('dataframe/' + ES_HOLDERS_PCT_HOLDER_PCTBYINST + '.json')
holder_pctbyinst = holder_pctbyinst.sort_index(axis=1)
quarterday = what_quarter(self.theday)
framewhatday1.loc[self.theday] = list((netprofit.loc[quarterday[0]] / netprofit.loc[quarterday[1]] - 1))
framewhatday2.loc[self.theday] = holder_top10pct.loc[quarterday[2]] / 100
framewhatday3.loc[self.theday] = holder_pctbyinst.loc[quarterday[2]] / 100
framewhatday1 = framewhatday1.fillna(0)
return {JIEJIN_DATE:self.data_frame[self.data_frame.index == self.theday],NETPROFIT_NETPROFIT:framewhatday1,ES_HOLDERS_PCT_HOLDER_TOP10PCT:framewhatday2,ES_HOLDERS_PCT_HOLDER_PCTBYINST:framewhatday3}
def get_all(reason,year1,month1,day1,year2,month2,day2):
conn = default_db()
cur = conn.cursor()
readframe = pd.read_json('dataframe/%s.json' % (MARKET_PRICE_FU)) #对于需要使用的json进行读取,需更改
codelist = readframe.columns
codelists = []
for code in codelist:
codelists.append('%06d' % code)
codelists.sort()
while 1:
try:
datelist = get_tradelist(year1,month1,day1,year2,month2,day2) #获得交易时间列表
datelistlong = get_tradelist(year1 - 2,month1,day1,year2,month2,day2)
break
except:
pass
datelist_frame = datelist*len(codelists) #生成股票数量的时间序列
codelists_frame = []
for code in codelists:
codelists_frame += [code]*len(datelist) #每个股票对应时间序列的个数
df = pd.DataFrame()
df['date'] = datelist_frame
df['code'] = codelists_frame
if reason == 1:
tablelist = [i[1] for i in table1]
print 'Ready to create DataFrame...'
for tablename in tablelist: #对于31个数据框循环计算
if tablename == tablelist[0]: #对于不同的表采用不同的算法
calculate = one2six_frame(tablename,datelist,datelistlong)
for n in [1,5,20,60,125,250]:
print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.market_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True) #将每一列取出后重排,作为新数据框的一列
elif tablename == tablelist[1]:
print 'Creating DataFrame ' + tablename + ' ...'
calculate = one2six_frame(tablename,datelist,datelistlong)
frame = calculate.simu()
df[tablename] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True)
else:
calculate = one2six_frame(tablename,datelist,datelistlong)
for n in [1,5,20,60,125,250]:
print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.other_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True)
return df
elif reason == 2:
tablelist = [i[1] for i in table2]
print 'Ready to create DataFrame...'
for tablename in tablelist: #对于31个数据框循环计算
if tablename == tablelist[0] or tablename == tablelist[1]: #对于不同的表采用不同的算法
calculate = one2six_frame(tablename,datelist,datelistlong)
for n in [1,5,20,60,125,250]:
print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.market_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True) #将每一列取出后重排,作为新数据框的一列
elif tablename == tablelist[2]:
calculate = one2six_frame(tablename,datelist,datelistlong)
frame = calculate.jiejin_quarter()
for i in frame.keys():
print 'Creating DataFrame ' + i + ' ...'
df[i] = pd.concat([frame[i][code] for code in frame[i].columns]).reset_index(drop=True)
else:
calculate = one2six_frame(tablename,datelist,datelistlong)
for n in [1,5,20,60,125,250]:
print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.other_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True)
return df
#print df
#df.to_csv('test3.csv',encoding='utf_8_sig')
print 'Finish creating DataFrame!'
def get_all_theday(reason,theday):
conn = default_db()
cur = conn.cursor()
readframe = pd.read_json('dataframe/%s.json' % (MARKET_PRICE_FU)) #对于需要使用的json进行读取,需更改
codelist = readframe.columns
codelists = []
for code in codelist:
codelists.append('%06d' % code)
codelists.sort()
datelist = [theday]
datelist_frame = datelist*len(codelists) #生成股票数量的时间序列
codelists_frame = []
for code in codelists:
codelists_frame += [code]*len(datelist) #每个股票对应时间序列的个数
df = pd.DataFrame()
df['date'] = datelist_frame
df['code'] = codelists_frame
if reason == 1:
tablelist = [i[1] for i in table1]
print 'Ready to create DataFrame...'
for tablename in tablelist: #对于31个数据框循环计算
if tablename == tablelist[0]: #对于不同的表采用不同的算法
calculate = one2six_frame_theday(tablename,theday)
for n in [1,5,20,60,125,250]:
#print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.market_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True) #将每一列取出后重排,作为新数据框的一列
elif tablename == tablelist[1]:
#print 'Creating DataFrame ' + tablename + ' ...'
calculate = one2six_frame_theday(tablename,theday)
frame = calculate.simu()
df[tablename] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True)
else:
calculate = one2six_frame_theday(tablename,theday)
for n in [1,5,20,60,125,250]:
#print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.other_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True)
#df.to_csv('day1.csv',encoding='utf_8_sig')
print 'Finish creating DataFrame!'
#print df
return df
elif reason == 2:
tablelist = [i[1] for i in table2]
print 'Ready to create DataFrame...'
for tablename in tablelist: #对于31个数据框循环计算
if tablename == tablelist[0] or tablename == tablelist[1]: #对于不同的表采用不同的算法
calculate = one2six_frame_theday(tablename,theday)
for n in [1,5,20,60,125,250]:
#print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.market_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True) #将每一列取出后重排,作为新数据框的一列
elif tablename == tablelist[2]:
#print 'Creating DataFrame ' + tablename + ' ...'
calculate = one2six_frame_theday(tablename,theday)
frame = calculate.jiejin_quarter()
for i in frame.keys():
#print 'Creating DataFrame ' + i + ' ...'
df[i] = pd.concat([frame[i][code] for code in frame[i].columns]).reset_index(drop=True)
else:
calculate = one2six_frame_theday(tablename,theday)
for n in [1,5,20,60,125,250]:
#print 'Creating DataFrame ' + tablename + str(n) + 'day' + ' ...'
frame = calculate.other_towhatday(n)
df[tablename + str(n) + 'day'] = pd.concat([frame[code] for code in frame.columns]).reset_index(drop=True)
#df.to_csv('day2.csv',encoding='utf_8_sig')
print 'Finish creating DataFrame!'
#print df
return df
'''
def get_all_theday_pro(reason,theday):
trade_before = ts2datetimestr(datetimestr2ts(theday) - 2592000).split('-') #获取前30天日期
trade_after = ts2datetimestr(datetimestr2ts(theday) + 2592000).split('-') #获取后30天日期
trade_list = get_tradelist(int(trade_before[0]),int(trade_before[1]),int(trade_before[2]),int(trade_after[0]),int(trade_after[1]),int(trade_after[2])) #获取可能包含当天的交易日列表
if theday in trade_list:
df = get_all_theday(reason,theday)
#df.to_csv('/home/lfz/python/yaoyan/modelcode/gettoday1.csv',encoding='utf_8_sig')
return df
else:
print '貌似你输入的日期并不是交易日'
'''
if __name__=="__main__":
#get_all('table2',2014,5,1,2014,5,31)
get_all_theday(1,'2016-01-04')
'''
总统计表输出成功
日度统计表输出成功
#但需要询问两处有疑问的地方
#净利润为空的问题,换手率为空的问题
净利润置0,换手率处理方式同收益率
''' | [
"1257819385@qq.com"
] | 1257819385@qq.com |
6f75e2758605c523dbba4a1707b229552ae59f5c | 877866345067cc6e356bcaaaa29a27b335cc4095 | /bulmaio_jinja2/sidebar/page/models.py | ea57b04810ae78a2ca1400d9086f386d6aa56ba2 | [
"MIT"
] | permissive | pauleveritt/bulmaio_jinja2 | 28a6e3da3dd577075cd9e658a6e7d7eace765fd8 | 97e09e539469337e05aa6c7a268264f2ca523da6 | refs/heads/master | 2020-03-23T16:51:11.894880 | 2018-10-04T19:10:11 | 2018-10-04T19:10:11 | 141,830,196 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | from typing import List, Optional
from bulmaio_jinja2.author.models import Author
from bulmaio_jinja2.base_model import CustomBaseModel
class SidebarPublished(CustomBaseModel):
published_date: str = None
published_time: str = None
author: Optional[Author] = None
class SidebarPrevNextItem(CustomBaseModel):
href: str
title: str
class SidebarPrevNext(CustomBaseModel):
prev: SidebarPrevNextItem = None
next: SidebarPrevNextItem = None
class SidebarReference(CustomBaseModel):
label: str
href: str
class SidebarReferenceGroup(CustomBaseModel):
reftype: str
entries: List[SidebarReference]
class SidebarReferences(CustomBaseModel):
entries: List[SidebarReferenceGroup] = []
class PageSidebar(CustomBaseModel):
published: SidebarPublished = None
prev_next: SidebarPrevNext
references: SidebarReferences
| [
"pauleveritt@me.com"
] | pauleveritt@me.com |
469fe4540152fbd03607bf2825896ac72329ff43 | 5c1746c4ae9f5eb4c94c9b3a70a4d3feb966ceda | /pcapkit/vendor/reg/__init__.py | 31108b29a8037d07f9367c776cb95fa3d91292ad | [
"BSD-3-Clause"
] | permissive | JarryShaw/PyPCAPKit | 8b53c76cf54f2ef1a9e4d0a7aeb3d52605dc1d5a | a6fe49ec58f09e105bec5a00fb66d9b3f22730d9 | refs/heads/main | 2023-08-29T12:49:58.611378 | 2023-08-28T14:05:43 | 2023-08-28T14:05:43 | 109,791,841 | 204 | 29 | BSD-3-Clause | 2023-09-11T17:09:06 | 2017-11-07T05:41:56 | Python | UTF-8 | Python | false | false | 1,411 | py | # -*- coding: utf-8 -*-
# pylint: disable=unused-import
"""Protocol Type Registry Vendor Crawlers
============================================
.. module:: pcapkit.vendor.reg
This module contains all vendor crawlers of protocol type registry
implementations. Available enumerations include:
.. list-table::
* - :class:`LINKTYPE <pcapkit.vendor.reg.linktype.LinkType>`
- Link-Layer Header Type Values [*]_
* - :class:`ETHERTYPE <pcapkit.vendor.reg.ethertype.EtherType>`
- Ethertype IEEE 802 Numbers [*]_
* - :class:`TRANSTYPE <pcapkit.vendor.reg.transtype.TransType>`
- Transport Layer Protocol Numbers [*]_
* - :class:`APPTYPE <pcapkit.vendor.reg.apptype.AppType>`
- Application Layer Protocol Numbers (Service Name and Transport Protocol Port Number Registry) [*]_
.. [*] http://www.tcpdump.org/linktypes.html
.. [*] https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml#ieee-802-numbers-1
.. [*] https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml#protocol-numbers-1
.. [*] https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?
"""
from pcapkit.vendor.reg.apptype import AppType
from pcapkit.vendor.reg.ethertype import EtherType
from pcapkit.vendor.reg.linktype import LinkType
from pcapkit.vendor.reg.transtype import TransType
__all__ = ['EtherType', 'LinkType', 'TransType', 'AppType']
| [
"jarryshaw@icloud.com"
] | jarryshaw@icloud.com |
95dc71f5773650d76824c1b85d404dc7f364db8c | ff00d19fbb2510b9ea127105896cfd13fc7be7bf | /Add_doctor/views.py | 603ad6caa951f3be94d0b7bd99d1e3d09ab91308 | [] | no_license | joypaulgmail/E_Health | ba43200ef6be39050fd934a83076b8acf702fe83 | b51569b9ce97fe775926a246ae4675ce561f72cf | refs/heads/master | 2023-03-01T23:04:02.510395 | 2021-02-06T07:33:43 | 2021-02-06T07:33:43 | 336,480,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.shortcuts import render
def add_doctor(request):
return render(request,'ADDDOCTOR/add_doctor.html')
| [
"joypaul650@gmail.com"
] | joypaul650@gmail.com |
d5b1438af7bbd961f911a25b4daf23155058dadb | 6296a2a73121271ae01a644b4bcd82c1aaff1899 | /worksheets/helpers/ex03.py | bfa7d8bbf3974b724ce44779140499d6eab43b15 | [
"MIT"
] | permissive | widdowquinn/Teaching-EMBL-Plant-Path-Genomics | 570de0234a9bf7a2dfc45d834cb775b3c837b314 | 5cb03893ab145ee51891ccddcef9ebffe3f9bb1e | refs/heads/master | 2021-01-18T23:27:08.243564 | 2017-11-16T10:02:18 | 2017-11-16T10:02:18 | 21,601,799 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | # ex03.py
#
# Functions and data useful in worksheet 3 of the Plant and Pathogen
# Bioinformatics course at EMBL
import pylab
def p_correct_given_pos(sens, fpr, b):
"""Returns a simple Bayesian probability for the probability
that a prediction is correct, given that the prediction
was positive, for the prevailing sensitivity (sens),
false positive rate (fpr) and base rate of positive
examples.
"""
assert 0 <= sens <= 1, "Sensitivity must be in range [0,1]"
assert 0 <= fpr <= 1, "FPR must be in range [0,1]"
return sens * b / (sens * b + fpr * (1 - b))
def plot_prob_effector(sens, fpr, xmax=1, baserate=0.1):
"""Plots a line graph of P(effector|positive test) against
the baserate of effectors in the input set to the classifier.
The baserate argument draws an annotation arrow
indicating P(pos|+ve) at that baserate
"""
assert 0.1 <= xmax <= 1, "Max x axis value must be in range [0,1]"
assert 0.01 <= baserate <= 1, "Baserate annotation must be in range [0,1]"
baserates = pylab.arange(0, 1.05, xmax * 0.005)
probs = [p_correct_given_pos(sens, fpr, b) for b in baserates]
pylab.plot(baserates, probs, 'r')
pylab.title("P(eff|pos) vs baserate; sens: %.2f, fpr: %.2f" % (sens, fpr))
pylab.ylabel("P(effector|positive)")
pylab.xlabel("effector baserate")
pylab.xlim(0, xmax)
pylab.ylim(0, 1)
# Add annotation arrow
xpos, ypos = (baserate, p_correct_given_pos(sens, fpr, baserate))
if baserate < xmax:
if xpos > 0.7 * xmax:
xtextpos = 0.05 * xmax
else:
xtextpos = xpos + (xmax-xpos)/5.
if ypos > 0.5:
ytextpos = ypos - 0.05
else:
ytextpos = ypos + 0.05
pylab.annotate('baserate: %.2f, P(pos|+ve): %.3f' % (xpos, ypos),
xy=(xpos, ypos),
xytext=(xtextpos, ytextpos),
arrowprops=dict(facecolor='black', shrink=0.05))
else:
pylab.text(0.05 * xmax, 0.95, 'baserate: %.2f, P(pos|+ve): %.3f' % \
(xpos, ypos))
| [
"leighton.pritchard@hutton.ac.uk"
] | leighton.pritchard@hutton.ac.uk |
649893fe6b57e5ff6de93d3ae907446cf0b4c8ad | aa6c1bd093eddea65fb2f4ccc2a47020bb512a47 | /swimprotocol/address.py | e50be239ae4e81eab4e496065ac81a6f89ce33bf | [
"MIT"
] | permissive | chlin501/swim-protocol | 162606070388432ae616689d0dcd0e20f796f854 | 6f2cd3d4d4d35b5ea2a0060d225c6c469d7642ae | refs/heads/main | 2023-04-21T23:59:08.124099 | 2021-05-15T20:44:31 | 2021-05-15T20:44:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py |
from __future__ import annotations
from dataclasses import dataclass
from typing import Final, Optional
__all__ = ['Address', 'AddressParser']
@dataclass(frozen=True, order=True)
class Address:
"""Manages an address for socket connections.
Args:
host: The address hostname string.
port: The address port number.
"""
host: str
port: int
@classmethod
def get(cls, addr: tuple[str, int]) -> Address:
"""Return an :class:`Address` from a ``(host, port)`` tuple.
Args:
addr: The address tuple from :mod:`socket` functions.
"""
return cls(addr[0], addr[1])
def __str__(self) -> str:
return ':'.join((self.host, str(self.port)))
class AddressParser:
"""Manages the defaults to use when parsing an address string.
Args:
address_type: Override the :class:`Address` implementation.
default_host: The default hostname, if missing from the address string
(e.g. ``:1234:``).
default_port: The default port number, if missing from the address
string (e.g. ``example.tld``).
"""
def __init__(self, address_type: type[Address] = Address, *,
default_host: Optional[str] = None,
default_port: Optional[int] = None) -> None:
super().__init__()
self.address_type: Final = address_type
self.default_host: Final = default_host
self.default_port: Final = default_port
def parse(self, address: str) -> Address:
host, sep, port = address.rpartition(':')
if sep != ':':
default_port = self.default_port
if default_port is not None:
return self.address_type(host, default_port)
else:
default_host = self.default_host
if host:
return self.address_type(host, int(port))
elif default_host is not None:
return self.address_type(default_host, int(port))
raise ValueError(address)
| [
"ian@icgood.net"
] | ian@icgood.net |
724835a373b84b4476b57505c51a85b02c24ce3f | 89812f6ab80008222bcf93a9b2ca614a60291738 | /river/metrics/cluster/sd_validation.py | 93c41b6f2291198d07c65091eed03baab201b2d7 | [
"BSD-3-Clause"
] | permissive | Pandinosaurus/river | 47135f5b7e612f83d96f4a50f9d746dec834b16d | 09a24d35c1f548239c54c1244973241bfe5c4edc | refs/heads/master | 2023-08-27T21:08:12.553115 | 2021-11-09T22:10:17 | 2021-11-09T22:10:17 | 409,610,355 | 0 | 0 | BSD-3-Clause | 2021-11-10T04:13:30 | 2021-09-23T13:47:27 | Python | UTF-8 | Python | false | false | 4,665 | py | import math
from river import stats, utils
from . import base
class SD(base.InternalMetric):
"""The SD validity index (SD).
The SD validity index (SD) [^1] is a more recent clustering validation measure. It is composed of
two terms:
* Scat(NC) stands for the scattering within clusters,
* Dis(NC) stands for the dispersion between clusters.
Like DB and SB, SD measures the compactness with variance of clustered objects and separation
with distance between cluster centers, but uses them in a different way. The smaller the value
of SD, the better.
In the original formula for SD validation index, the ratio between the maximum and the actual
number of clusters is taken into account. However, due to the fact that metrics are updated in
an incremental fashion, this ratio will be automatically set to default as 1.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.SD()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
SD: 2.339016
References
----------
[^1]: Halkidi, M., Vazirgiannis, M., & Batistakis, Y. (2000). Quality Scheme Assessment in the
Clustering Process. Principles Of Data Mining And Knowledge Discovery, 265-276.
DOI: 10.1007/3-540-45372-5_26
"""
def __init__(self):
super().__init__()
self._center_all_points = {}
self._overall_variance = {}
self._cluster_variance = {}
self._centers = {}
self._initialized = False
@staticmethod
def _calculate_dispersion_nc(centers):
min_distance_clusters = math.inf
max_distance_clusters = -math.inf
sum_inverse_distances = 0
n_clusters = len(centers)
for i in range(n_clusters):
for j in range(i + 1, n_clusters):
distance_ij = math.sqrt(
utils.math.minkowski_distance(centers[i], centers[j], 2)
)
if distance_ij > max_distance_clusters:
max_distance_clusters = distance_ij
if distance_ij < min_distance_clusters:
min_distance_clusters = distance_ij
sum_inverse_distances += 1 / distance_ij
try:
return (
max_distance_clusters / min_distance_clusters
) * sum_inverse_distances
except ZeroDivisionError:
return math.inf
@staticmethod
def _norm(x):
origin = {i: 0 for i in x}
return math.sqrt(utils.math.minkowski_distance(x, origin, 2))
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._overall_variance = {i: stats.Var() for i in x}
self._initialized = True
if y_pred not in self._cluster_variance:
self._cluster_variance[y_pred] = {i: stats.Var() for i in x}
for i in x:
self._cluster_variance[y_pred][i].update(x[i], w=sample_weight)
self._overall_variance[i].update(x[i], w=sample_weight)
self._centers = centers
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
for i in x:
self._overall_variance[i].update(x[i], w=-sample_weight)
self._cluster_variance[y_pred][i].update(x[i], w=-sample_weight)
self._centers = centers
return self
def get(self):
dispersion_nc = self._calculate_dispersion_nc(self._centers)
overall_variance = {
i: self._overall_variance[i].get() for i in self._overall_variance
}
cluster_variance = {}
for i in self._cluster_variance:
cluster_variance[i] = {
j: self._cluster_variance[i][j].get() for j in self._cluster_variance[i]
}
scat_nc = 0
for i in cluster_variance:
scat_nc += self._norm(cluster_variance[i]) / self._norm(overall_variance)
try:
return scat_nc + dispersion_nc
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False
| [
"noreply@github.com"
] | Pandinosaurus.noreply@github.com |
2b9506dfc10e5e9c3b64a86ebcfe9e8106bd68fc | ba54b70f93fe7f9d114623d76b1ad3f88309d66f | /uimg/migrations/0001_initial.py | 70d0c465b8a1a916e9e82b8faabf34ef9bfaf92e | [] | no_license | loobinsk/newprj | 9769b2f26092ce7dd8612fce37adebb307b01b8b | c6aa6a46973fb46375f4b05a86fe76207a8ae16d | refs/heads/master | 2023-05-07T00:28:44.242163 | 2021-05-25T08:22:05 | 2021-05-25T08:22:05 | 370,617,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import uimg.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(upload_to=uimg.models.get_user_image_path, verbose_name=b'\xd0\x98\xd0\xb7\xd0\xbe\xd0\xb1\xd1\x80\xd0\xb0\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('date', models.DateTimeField(default=datetime.datetime(2015, 6, 11, 15, 25, 9, 540983), verbose_name=b'\xd0\x94\xd0\xb0\xd1\x82\xd0\xb0')),
('desc', models.TextField(default=b'', max_length=250, null=True, verbose_name=b'\xd0\x9e\xd0\xbf\xd0\xb8\xd1\x81\xd0\xb0\xd0\xbd\xd0\xb8\xd0\xb5', blank=True)),
],
options={
'verbose_name': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435',
'verbose_name_plural': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f',
},
),
]
| [
"root@bazavashdom.ru"
] | root@bazavashdom.ru |
06d353baa11398faddd39afac93ccc41d6c7e529 | 941cb76fde4fed6a85d804421f9deee5934a6684 | /yolanda/services/urls.py | cd88bb79eddbeb67be16a8f841f4e184318be6b8 | [] | no_license | ingenieroariel/yolanda | 0e27346afc96374e8c8f29af13b0e7218b2670f6 | b8038f04d32847ed74bdc44e9ff4f694d7bb0637 | refs/heads/master | 2021-01-13T01:59:22.243342 | 2013-12-19T12:00:10 | 2013-12-19T12:00:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.conf.urls.defaults import patterns, url
from yolanda.services.views import DigitalGlobeProxy
urlpatterns = patterns("yolanda.services.views",
url(r"^dg/?", DigitalGlobeProxy.as_view(), name="dg_service"),
)
| [
"garnertb@gmail.com"
] | garnertb@gmail.com |
64559c6f29bd350bb9da38fc03ab2979fef8ba4c | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/eqptcapacity/mcastentryhist1qtr.py | fcabdb6dadac00eabd3652b3a046c41cb11a9f9e | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 11,028 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class McastEntryHist1qtr(Mo):
"""
A class that represents historical statistics for Multicast entry in a 1 quarter sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.McastEntryHist1qtr", "Multicast entry")
counter = CounterMeta("normalized", CounterCategory.GAUGE, "percentage", "Multicast entries usage")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedTr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityMcastEntryHist1qtr"
meta.rnFormat = "HDeqptcapacityMcastEntry1qtr-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Multicast entry stats in 1 quarter"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.eqptcapacity.McastEntryHist")
meta.rnPrefixes = [
('HDeqptcapacityMcastEntry1qtr-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 6370, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "normalizedAvg", "normalizedAvg", 9061, PropCategory.IMPLICIT_AVG)
prop.label = "Multicast entries usage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedAvg", prop)
prop = PropMeta("str", "normalizedMax", "normalizedMax", 9060, PropCategory.IMPLICIT_MAX)
prop.label = "Multicast entries usage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedMax", prop)
prop = PropMeta("str", "normalizedMin", "normalizedMin", 9059, PropCategory.IMPLICIT_MIN)
prop.label = "Multicast entries usage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedMin", prop)
prop = PropMeta("str", "normalizedSpct", "normalizedSpct", 9062, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Multicast entries usage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedSpct", prop)
prop = PropMeta("str", "normalizedThr", "normalizedThr", 9063, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Multicast entries usage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedThr", prop)
prop = PropMeta("str", "normalizedTr", "normalizedTr", 9064, PropCategory.IMPLICIT_TREND)
prop.label = "Multicast entries usage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
85bf3fd33da87dfd622556ff0779eb6f9f315ff1 | 66b1748a1238eda820345f914f60da434c668cf0 | /CodeUp/CodeUp1064.py | 77ad0d37f5e24b106e755038bf661c7f1848e046 | [] | no_license | kwangminini/Algorhitm | 5d3140021584239e30468d3dcb353b119b935e76 | 4d9a3b9284c90d141c1a73e14329152455373c53 | refs/heads/master | 2023-09-03T07:33:51.228150 | 2023-08-28T13:39:52 | 2023-08-28T13:39:52 | 225,879,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | a,b,c=input().split()
a=int(a)
b=int(b)
c=int(c)
print (((a if b<c else (a if a<c else c) )if a<b else (b if b<c else c))) | [
"rhkdals7362@gmail.com"
] | rhkdals7362@gmail.com |
0fdb42f90603cc164bd7435a2bc8f96429a8aa96 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/odtjoh001/question3.py | db78f68e48dd183436aca1d79122ba904d28af82 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | """Program to check if sudoku grid is valid
John Odetokun
14 May 2014"""
#create 2d-array
grid = []
list = []
for i in range (9):
list.append(input())
for c in range(9):
for i in range (9):
inpt = list[c]
gridline = []
for j in range(9):
gridline.append(inpt[j])
grid.append(gridline)
n = 0
#horizontal and vertical checks
for a in range(9):
for w in range(8):
value = grid[a][w]
value2 = grid[w][a]
for z in range(w+1, 9):
if value == grid[a][z] or value2 == grid[z][a]:
n+=1
if n!= 0:
print("Sudoku grid is not valid")
else:
#check 3 by 3 grids within grid
for j in range(3,10,3):
for k in range(3,10,3):
arr = []
for x in range(j-3,j):
for y in range(k-3,k):
arr.append(grid[x][y])
for r in range (9):
val = arr[r]
for t in range(r+1,8):
if val == arr[t]:
n+=1
if n == 0:
print("Sudoku grid is valid")
else:
print("Sudoku grid is not valid")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
b07984eef9b46c502f3ffefbdc0893d6f0773d9c | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/rubicund.py | b2a8d52712df9b1f42c6576c35321bc7fa16e6e1 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 63 | py | ii = [('CarlTFR.py', 2), ('WestJIT2.py', 1), ('AinsWRR.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
523339f3f723af86067fda3b7161b1ad59725180 | 2a6412a9359a1df5f8f12e319e73b9e4e46fd64c | /code/PythonINIAD/IPv4Converter.py | 1f3f0dd3fefe4e3730ff7d3b00238580eff77027 | [] | no_license | danganhvu1998/myINIAD | 504d1147a02f12e593f30e369daf82f85aa01bfd | 01547673dd3065efb6c7cc8db77ec93a5a4f5d98 | refs/heads/master | 2022-03-17T12:58:34.647229 | 2022-02-08T06:34:19 | 2022-02-08T06:34:19 | 143,675,719 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | import re
def biToDe(biStr):
ans=0
for bit in biStr:
ans=ans*2+int(bit)
return ans
def deToBi(deStr):
ans = ""
currValue = 128
deInt = int(deStr)
for i in range(0,8):
if(deInt>=currValue):
ans+="1"
deInt-=currValue
else:
ans+="0"
currValue /= 2
return ans
def biAddressToDeAddess(biAddress):
ans = ""
biAddressParts = re.findall("[0-9]+", biAddress)
for biAddressPart in biAddressParts:
ans+=str(biToDe(biAddressPart))+"."
ans = ans[0:-1]
return ans
def deAddressToBiAddess(biAddress):
ans = ""
deAddressParts = re.findall("[0-9]+", biAddress)
for deAddressPart in deAddressParts:
ans+=str(deToBi(deAddressPart))+"."
ans = ans[0:-1]
return ans
def announce(biAddress, text):
print("*********")
print(text, biAddress)
print(text, biAddressToDeAddess(biAddress))
print("*********")
print()
def networkAddress(biAddress, networkPart):
currBit = 0
ans = ""
for bit in biAddress:
if(bit!="."):
currBit+=1
if(currBit>networkPart):
ans+="0"
else:
ans+=bit
else:
ans+=bit
announce(ans, "Network Address")
def broadcastAddress(biAddress, networkPart):
currBit = 0
ans = ""
for bit in biAddress:
if(bit!="."):
currBit+=1
if(currBit>networkPart):
ans+="1"
else:
ans+=bit
else:
ans+=bit
announce(ans, "Broadcast Address")
def subnetMaskAddress(biAddress, networkPart):
currBit = 0
ans = ""
for bit in biAddress:
if(bit!="."):
currBit+=1
if(currBit<=networkPart):
ans+="1"
else:
ans+="0"
else:
ans+=bit
announce(ans, "Subnet mask Address")
def __main__():
IPv4 = input("Input IPv4 Address (In any format is okay):");
#IPv4 = "128.226.170.3"
networkPart = -1;
#Calculate Network Part
if("/" in IPv4):
ipAddress = re.findall("(.*)/", IPv4)[0]
networkPart = int(re.findall("/(.*)", IPv4)[0])
else:
ipAddress = IPv4
#Convert Ip Address to both Bi and De
if(len(ipAddress)>32):
ipAddressBi = ipAddress
else:
ipAddressBi = deAddressToBiAddess(ipAddress)
announce(ipAddressBi, "IPv4 Address")
if(networkPart>=0):
networkAddress(ipAddressBi, networkPart)
broadcastAddress(ipAddressBi, networkPart)
subnetMaskAddress(ipAddressBi, networkPart)
__main__() | [
"danganhvu1998@gmail.com"
] | danganhvu1998@gmail.com |
722a7efc2f0ee638c965bda8fc21e4e61f6e9a20 | bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062 | /deploy/python/mot_jde_infer.py | 793d5271bf0a30c8c496efd0e3a12d6679260513 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleDetection | e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961 | bd83b98342b0a6bc8d8dcd5936233aeda1e32167 | refs/heads/release/2.6 | 2023-08-31T07:04:15.357051 | 2023-08-18T02:24:45 | 2023-08-18T02:24:45 | 217,475,193 | 12,523 | 3,096 | Apache-2.0 | 2023-09-10T10:05:56 | 2019-10-25T07:21:14 | Python | UTF-8 | Python | false | false | 14,853 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import yaml
import cv2
import numpy as np
from collections import defaultdict
import paddle
from benchmark_utils import PaddleInferBenchmark
from preprocess import decode_image
from utils import argsparser, Timer, get_current_memory_mb
from infer import Detector, get_test_images, print_arguments, bench_log, PredictConfig
# add python path
import sys
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
from pptracking.python.mot import JDETracker
from pptracking.python.mot.utils import MOTTimer, write_mot_results
from pptracking.python.mot.visualize import plot_tracking_dict
# Global dictionary
MOT_JDE_SUPPORT_MODELS = {
'JDE',
'FairMOT',
}
class JDE_Detector(Detector):
"""
Args:
model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml
device (str): Choose the device you want to run, it can be: CPU/GPU/XPU/NPU, default is CPU
run_mode (str): mode of running(paddle/trt_fp32/trt_fp16)
batch_size (int): size of pre batch in inference
trt_min_shape (int): min shape for dynamic shape in trt
trt_max_shape (int): max shape for dynamic shape in trt
trt_opt_shape (int): opt shape for dynamic shape in trt
trt_calib_mode (bool): If the model is produced by TRT offline quantitative
calibration, trt_calib_mode need to set True
cpu_threads (int): cpu threads
enable_mkldnn (bool): whether to open MKLDNN
output_dir (string): The path of output, default as 'output'
threshold (float): Score threshold of the detected bbox, default as 0.5
save_images (bool): Whether to save visualization image results, default as False
save_mot_txts (bool): Whether to save tracking results (txt), default as False
"""
def __init__(
self,
model_dir,
tracker_config=None,
device='CPU',
run_mode='paddle',
batch_size=1,
trt_min_shape=1,
trt_max_shape=1088,
trt_opt_shape=608,
trt_calib_mode=False,
cpu_threads=1,
enable_mkldnn=False,
output_dir='output',
threshold=0.5,
save_images=False,
save_mot_txts=False, ):
super(JDE_Detector, self).__init__(
model_dir=model_dir,
device=device,
run_mode=run_mode,
batch_size=batch_size,
trt_min_shape=trt_min_shape,
trt_max_shape=trt_max_shape,
trt_opt_shape=trt_opt_shape,
trt_calib_mode=trt_calib_mode,
cpu_threads=cpu_threads,
enable_mkldnn=enable_mkldnn,
output_dir=output_dir,
threshold=threshold, )
self.save_images = save_images
self.save_mot_txts = save_mot_txts
assert batch_size == 1, "MOT model only supports batch_size=1."
self.det_times = Timer(with_tracker=True)
self.num_classes = len(self.pred_config.labels)
# tracker config
assert self.pred_config.tracker, "The exported JDE Detector model should have tracker."
cfg = self.pred_config.tracker
min_box_area = cfg.get('min_box_area', 0.0)
vertical_ratio = cfg.get('vertical_ratio', 0.0)
conf_thres = cfg.get('conf_thres', 0.0)
tracked_thresh = cfg.get('tracked_thresh', 0.7)
metric_type = cfg.get('metric_type', 'euclidean')
self.tracker = JDETracker(
num_classes=self.num_classes,
min_box_area=min_box_area,
vertical_ratio=vertical_ratio,
conf_thres=conf_thres,
tracked_thresh=tracked_thresh,
metric_type=metric_type)
def postprocess(self, inputs, result):
# postprocess output of predictor
np_boxes = result['pred_dets']
if np_boxes.shape[0] <= 0:
print('[WARNNING] No object detected.')
result = {'pred_dets': np.zeros([0, 6]), 'pred_embs': None}
result = {k: v for k, v in result.items() if v is not None}
return result
def tracking(self, det_results):
pred_dets = det_results['pred_dets'] # cls_id, score, x0, y0, x1, y1
pred_embs = det_results['pred_embs']
online_targets_dict = self.tracker.update(pred_dets, pred_embs)
online_tlwhs = defaultdict(list)
online_scores = defaultdict(list)
online_ids = defaultdict(list)
for cls_id in range(self.num_classes):
online_targets = online_targets_dict[cls_id]
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
if tlwh[2] * tlwh[3] <= self.tracker.min_box_area: continue
if self.tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
3] > self.tracker.vertical_ratio:
continue
online_tlwhs[cls_id].append(tlwh)
online_ids[cls_id].append(tid)
online_scores[cls_id].append(tscore)
return online_tlwhs, online_scores, online_ids
def predict(self, repeats=1):
'''
Args:
repeats (int): repeats number for prediction
Returns:
result (dict): include 'pred_dets': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
FairMOT(JDE)'s result include 'pred_embs': np.ndarray:
shape: [N, 128]
'''
# model prediction
np_pred_dets, np_pred_embs = None, None
for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_handle(output_names[0])
np_pred_dets = boxes_tensor.copy_to_cpu()
embs_tensor = self.predictor.get_output_handle(output_names[1])
np_pred_embs = embs_tensor.copy_to_cpu()
result = dict(pred_dets=np_pred_dets, pred_embs=np_pred_embs)
return result
def predict_image(self,
image_list,
run_benchmark=False,
repeats=1,
visual=True,
seq_name=None):
mot_results = []
num_classes = self.num_classes
image_list.sort()
ids2names = self.pred_config.labels
data_type = 'mcmot' if num_classes > 1 else 'mot'
for frame_id, img_file in enumerate(image_list):
batch_image_list = [img_file] # bs=1 in MOT model
if run_benchmark:
# preprocess
inputs = self.preprocess(batch_image_list) # warmup
self.det_times.preprocess_time_s.start()
inputs = self.preprocess(batch_image_list)
self.det_times.preprocess_time_s.end()
# model prediction
result_warmup = self.predict(repeats=repeats) # warmup
self.det_times.inference_time_s.start()
result = self.predict(repeats=repeats)
self.det_times.inference_time_s.end(repeats=repeats)
# postprocess
result_warmup = self.postprocess(inputs, result) # warmup
self.det_times.postprocess_time_s.start()
det_result = self.postprocess(inputs, result)
self.det_times.postprocess_time_s.end()
# tracking
result_warmup = self.tracking(det_result)
self.det_times.tracking_time_s.start()
online_tlwhs, online_scores, online_ids = self.tracking(
det_result)
self.det_times.tracking_time_s.end()
self.det_times.img_num += 1
cm, gm, gu = get_current_memory_mb()
self.cpu_mem += cm
self.gpu_mem += gm
self.gpu_util += gu
else:
self.det_times.preprocess_time_s.start()
inputs = self.preprocess(batch_image_list)
self.det_times.preprocess_time_s.end()
self.det_times.inference_time_s.start()
result = self.predict()
self.det_times.inference_time_s.end()
self.det_times.postprocess_time_s.start()
det_result = self.postprocess(inputs, result)
self.det_times.postprocess_time_s.end()
# tracking process
self.det_times.tracking_time_s.start()
online_tlwhs, online_scores, online_ids = self.tracking(
det_result)
self.det_times.tracking_time_s.end()
self.det_times.img_num += 1
if visual:
if len(image_list) > 1 and frame_id % 10 == 0:
print('Tracking frame {}'.format(frame_id))
frame, _ = decode_image(img_file, {})
im = plot_tracking_dict(
frame,
num_classes,
online_tlwhs,
online_ids,
online_scores,
frame_id=frame_id,
ids2names=ids2names)
if seq_name is None:
seq_name = image_list[0].split('/')[-2]
save_dir = os.path.join(self.output_dir, seq_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(
os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)
mot_results.append([online_tlwhs, online_scores, online_ids])
return mot_results
def predict_video(self, video_file, camera_id):
video_out_name = 'mot_output.mp4'
if camera_id != -1:
capture = cv2.VideoCapture(camera_id)
else:
capture = cv2.VideoCapture(video_file)
video_out_name = os.path.split(video_file)[-1]
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("fps: %d, frame_count: %d" % (fps, frame_count))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
out_path = os.path.join(self.output_dir, video_out_name)
video_format = 'mp4v'
fourcc = cv2.VideoWriter_fourcc(*video_format)
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
frame_id = 1
timer = MOTTimer()
results = defaultdict(list) # support single class and multi classes
num_classes = self.num_classes
data_type = 'mcmot' if num_classes > 1 else 'mot'
ids2names = self.pred_config.labels
while (1):
ret, frame = capture.read()
if not ret:
break
if frame_id % 10 == 0:
print('Tracking frame: %d' % (frame_id))
frame_id += 1
timer.tic()
seq_name = video_out_name.split('.')[0]
mot_results = self.predict_image(
[frame[:, :, ::-1]], visual=False, seq_name=seq_name)
timer.toc()
online_tlwhs, online_scores, online_ids = mot_results[0]
for cls_id in range(num_classes):
results[cls_id].append(
(frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],
online_ids[cls_id]))
fps = 1. / timer.duration
im = plot_tracking_dict(
frame,
num_classes,
online_tlwhs,
online_ids,
online_scores,
frame_id=frame_id,
fps=fps,
ids2names=ids2names)
writer.write(im)
if camera_id != -1:
cv2.imshow('Mask Detection', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if self.save_mot_txts:
result_filename = os.path.join(
self.output_dir, video_out_name.split('.')[-2] + '.txt')
write_mot_results(result_filename, results, data_type, num_classes)
writer.release()
def main():
detector = JDE_Detector(
FLAGS.model_dir,
tracker_config=None,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
batch_size=1,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
output_dir=FLAGS.output_dir,
threshold=FLAGS.threshold,
save_images=FLAGS.save_images,
save_mot_txts=FLAGS.save_mot_txts)
# predict from video file or camera video stream
if FLAGS.video_file is not None or FLAGS.camera_id != -1:
detector.predict_video(FLAGS.video_file, FLAGS.camera_id)
else:
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)
if not FLAGS.run_benchmark:
detector.det_times.info(average=True)
else:
mode = FLAGS.run_mode
model_dir = FLAGS.model_dir
model_info = {
'model_name': model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(detector, img_list, model_info, name='MOT')
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU', 'NPU'
], "device should be CPU, GPU, NPU or XPU"
main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
aaeda1c90c18d3d74453e7657b0af315e5024ae3 | f71f44d5ddc17e3c30e2bfd7988e5111a55a8b9a | /diplom/source/src/lib/interpolation/__init__.py | 375be80432c4755fbe11bc0dee04cceaae888a25 | [] | no_license | Yashchuk/diplom | 5ed1998d4b3d1fe568599973ec134f7ca13e8417 | 4029ed91ce93a41af44f03bcce365fdaecb64a37 | refs/heads/master | 2021-01-15T17:02:03.723007 | 2014-01-21T13:42:48 | 2014-01-21T13:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,813 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# file interpolation/__init__.py
#
#############################################################################
# Copyright (c) 2013 by Panagiotis Mavrogiorgos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name(s) of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#############################################################################
#
# @license: http://opensource.org/licenses/BSD-3-Clause
# @authors: see AUTHORS.txt
""" A package containing Interpolation related classes. """
# Package imports
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# Version
__major__ = 0 # for major interface/format changes
__minor__ = 1 # for minor interface/format changes
__release__ = 0 # for tweaks, bug-fixes, or development
# package information
__package_name__ = "interpolation"
__version__ = "%d.%d.%d" % (__major__, __minor__, __release__)
__license__ = "BSD"
__description__ = __doc__.split(".")[0]
__url__ = "http://github.com/pmav99/%s" % __package_name__
__download_url__ = "http://github.com/pmav99/%s/downloads" % __package_name__
__author__ = "Panagiotis Mavrogiorgos"
__author_email__ = "gmail pmav99"
# Package imports
from .linear import LinearInterpolation
from .bilinear import BilinearInterpolation
__all__ = ["LinearInterpolation", "BilinearInterpolation"]
| [
"andrew.freelance@i.ua"
] | andrew.freelance@i.ua |
896eb2ea8561b2a2c07d720ca69366a09fe1d5ac | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03418/s781538433.py | 5d531f3c4ae6cffe8a4ee7db60e768bb5e17a08c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from fractions import gcd
def NK():
return map(int,input().split())
def main():
n,k = NK()
ans = 0
for i in range(1,n+1):
ans += (n//i)*max((i-k),0) + max(n%i-max((k-1),0),0)
print(ans)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9fc75b632c2f6956fff01f5c2be76f1e9601666d | 1e8c805e96bc854b5acf4282c47c16ce6e1980e2 | /examples/Laplace_equation_1D.py | 173ac8080a56f7f3cc23dd09d86dfdedf85d3146 | [] | no_license | LaplaceKorea/DWave-Quantum-Annealing | d0a3058ee6b4e1e5163be28fa5dfb77e0f85c51f | 16f934e995b72aaf618480aeaf3f09dd07c2ff47 | refs/heads/master | 2023-02-22T08:49:28.588459 | 2021-01-22T02:58:38 | 2021-01-22T02:58:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,251 | py | """Solve 1D Laplace's equation"""
# Import packages
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from neal import SimulatedAnnealingSampler
from dwave.system import EmbeddingComposite, DWaveSampler
from dwaveutils import bl_lstsq
# Define function
def get_laplace_1D(N, num_bits,
fixed_point=0, exact_x=True, random_seed=None):
"""Get information about 1D Laplace's equation."""
# number of predictor and number of response
num_predictor_discrete = num_bits * N
num_response = N
# matrix `A`
A = (np.eye(num_response, k=-1)
- 2 * np.eye(num_response, k=0)
+ np.eye(num_response, k=1))
# set the bit value to discrete the actual value as a fixed point
bit_value = bl_lstsq.get_bit_value(num_bits, fixed_point=fixed_point)
# discretized version of matrix `A`
A_discrete = bl_lstsq.discretize_matrix(A, bit_value)
if random_seed is None:
rng = np.random.default_rng()
else:
rng = np.random.default_rng(random_seed)
if exact_x:
# binary vector `q`
q = rng.choice([0, 1], size=num_predictor_discrete)
# vector `x`
x = q2x(q, bit_value)
else:
# vector `x`
x = (rng.choice([-1, 1], size=num_response)
* (2 ** fixed_point) * rng.random(num_response))
# calculate vector `b`
b = A @ x
output = {
'A': A,
'x': x,
'b': b,
'A_discrete': A_discrete,
'bit_value': bit_value
}
return output
# Setting variables
# size of symmetric matrix `A`
N = 3
# number of bits (include sign bit)
num_bits = 4
# n-vector bit value is defined by
# [-2**(fixed_point), 2**(fixed_point-1), ..., 2**(fixed_point-n)]
fixed_point = 0
# whether x can be perfectly discrete
exact_x = False
random_seed = 19937
# scaling factor for QUBO
eq_scaling_val = 1/8
# number of reads for Simulated annealing (SA) or Quantum annealing (QA)
num_reads = 1000
# sampler type must be one of {'SA', 'QA'}
sampler_type = 'SA'
# setup A, x, b, A_discrete, bit_value
output = get_laplace_1D(
N, num_bits,
fixed_point=fixed_point, exact_x=exact_x, random_seed=random_seed
)
A = output['A']
true_x = output['x']
true_b = output['b']
A_discrete = output['A_discrete']
bit_value = output['bit_value']
# Solve A*x=b by `numpy.linalg.lstsq`
np_x = np.linalg.lstsq(A, true_b, rcond=None)[0]
# Solve A_discrete*q=b problem as BQM optimization
# through simulated annealing or quantum annealing
Q = bl_lstsq.get_qubo(A_discrete, true_b, eq_scaling_val=eq_scaling_val)
if sampler_type == 'QA':
try:
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
_sampler_args = {}
if 'num_reads' in sampler.parameters:
_sampler_args['num_reads'] = num_reads
if 'answer_mode' in sampler.parameters:
_sampler_args['answer_mode'] = 'raw'
sampleset = sampler.sample_qubo(Q, **_sampler_args)
except ValueError:
warnings.warn('Cannot access QPU, use \
SimulatedAnnealingSampler instead.')
sampler = SimulatedAnnealingSampler()
sampleset = sampler.sample_qubo(Q, num_reads=num_reads)
elif sampler_type == 'SA':
sampler = SimulatedAnnealingSampler()
sampleset = sampler.sample_qubo(Q, num_reads=num_reads)
else:
raise(ValueError("The sampler_type is wrong, \
please enter 'SA' or 'QA'"))
# Solve A_discrete*q=b by brute force
# Warning: this may take a lot of time!
best_q, best_x, min_norm = bl_lstsq.bruteforce(A_discrete, true_b, bit_value)
# Prepare for showing results and plotting
# convert sampleset and its aggregate version to dataframe
sampleset_pd = sampleset.to_pandas_dataframe()
sampleset_pd_agg = sampleset.aggregate().to_pandas_dataframe()
num_states = len(sampleset_pd_agg)
num_b_entry = len(true_b)
num_x_entry = len(true_x)
num_q_entry = A_discrete.shape[1]
# concatnate `sampleset_pd` and `x_at_each_read`
x_at_each_read = pd.DataFrame(
np.row_stack(
[(sampleset_pd.iloc[i][:num_q_entry]).values.reshape(
(num_x_entry, -1)) @ bit_value
for i in range(num_reads)]
),
columns=['x' + str(i) for i in range(num_x_entry)]
)
sampleset_pd = pd.concat([sampleset_pd, x_at_each_read], axis=1)
sampleset_pd.rename(
columns=lambda c: c if isinstance(c, str) else 'q'+str(c),
inplace=True
)
# concatnate `sampleset_pd_agg` and `x_at_each_state`
x_at_each_state = pd.DataFrame(
np.row_stack(
[(sampleset_pd_agg.iloc[i][:num_q_entry]).values.reshape(
(num_x_entry, -1)) @ bit_value
for i in range(num_states)]
),
columns=['x' + str(i) for i in range(num_x_entry)]
)
sampleset_pd_agg = pd.concat([sampleset_pd_agg, x_at_each_state], axis=1)
sampleset_pd_agg.rename(
columns=lambda c: c if isinstance(c, str) else 'q'+str(c),
inplace=True
)
# lowest energy state x and q
lowest_q = sampleset_pd_agg.sort_values(
'energy').iloc[0, :num_q_entry].values
lowest_x = bl_lstsq.q2x(lowest_q, bit_value)
# frequently occurring x and q
frequent_q = sampleset_pd_agg.sort_values(
'num_occurrences', ascending=False).iloc[0, :num_q_entry].values
frequent_x = bl_lstsq.q2x(frequent_q, bit_value)
# calculate expected x from x
expected_x = sampleset_pd_agg.apply(
lambda row: row.iloc[-num_x_entry:]
* (row.num_occurrences / num_reads),
axis=1
).sum().values
# calculate excepted x from q
tmp_q = sampleset_pd_agg.apply(
lambda row: row.iloc[:num_q_entry]
* (row.num_occurrences / num_reads),
axis=1
).sum() > 0.5 # bool
expected_x_discrete = bl_lstsq.q2x(tmp_q, bit_value)
# Show results
print('='*50)
print('true x:', true_x)
print('true b:', true_b)
print('bit value:', bit_value)
print('='*50)
print('# numpy solver')
print('np_x: ', np_x)
print('b:', A @ np_x)
print('2-norm:', np.linalg.norm(A @ np_x - true_b))
print('='*50)
print('# brute force')
print('best x:', best_x)
print('best q:', best_q)
print('b:', A @ best_x)
print('2-norm:', min_norm)
print('='*50)
print('# Simulated annealing/Quantum annealing')
print('lowest energy state x:')
print(lowest_x)
print('lowest energy state q:')
print(lowest_q)
print('b:', A @ lowest_x)
print('2-norm:', np.linalg.norm(A @ lowest_x - true_b))
print('-'*50)
print('most frequently occurring x:')
print(frequent_x)
print('most frequently occurring q:')
print(frequent_q)
print('b:', A @ frequent_x)
print('2-norm:', np.linalg.norm(A @ frequent_x - true_b))
print('-'*50)
print('expected x (from real value):')
print(expected_x)
print('b:', A @ expected_x)
print('2-norm:', np.linalg.norm(A @ expected_x - true_b))
print('-'*50)
print('expected x (from discrete value):')
print(expected_x_discrete)
print('b:', A @ expected_x_discrete)
print('2-norm:', np.linalg.norm(A @ expected_x_discrete - true_b))
print('-'*50)
print('Sample set:')
print(sampleset_pd_agg.sort_values('num_occurrences', ascending=False))
print('='*50)
# Plot histogram
axes = sampleset_pd.hist(
figsize=(8, 6), bins=30,
column=['x' + str(i) for i in range(num_x_entry)],
)
axes = axes.ravel()
for i in range(num_x_entry):
ax = axes[i]
ax.set_ylabel('counts')
plt.tight_layout()
plt.show()
| [
"supon3060@gmail.com"
] | supon3060@gmail.com |
6ae24ce5a38fadb2705eb4cf461cc71939a0abca | a9063fd669162d4ce0e1d6cd2e35974274851547 | /test/test_group_member.py | cc3e314546f87473aa1601c72e04b3590d97de44 | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.group_member import GroupMember # noqa: E501
from swagger_client.rest import ApiException
class TestGroupMember(unittest.TestCase):
"""GroupMember unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGroupMember(self):
"""Test GroupMember"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.group_member.GroupMember() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"github@rootalley.com"
] | github@rootalley.com |
be15dd2c3d2b11acfae14de866b23bd69f9750e6 | dde8b97eee29cd6af17082cf84773d50bea7ca42 | /WHAnalysis/Configuration/test/CRAB/patTuple_standard_MC2_cfg.py | 1d4fadcebe1c85cd57ad983209b0173401d0ad8c | [] | no_license | calabria/WHAnalysis | 557cee96fe1dfe221a3a76f99b92f59c0800a8eb | 6cdcc0b73d94261f5ff7822b8bf5e48bc08268ae | refs/heads/master | 2021-01-23T13:36:11.593683 | 2014-04-12T10:39:44 | 2014-04-12T10:39:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,798 | py | ## import skeleton process
from PhysicsTools.PatAlgos.patTemplate_cfg import *
from WHAnalysis.Configuration.customizePAT import *
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.load("RecoTauTag.Configuration.RecoPFTauTag_cff")
## ------------------------------------------------------
# NOTE: you can use a bunch of core tools of PAT to
# taylor your PAT configuration; for a few examples
# uncomment the lines below
## ------------------------------------------------------
from PhysicsTools.PatAlgos.tools.coreTools import *
#--------------------------------------------------------------------------------
from PhysicsTools.PatAlgos.tools.jetTools import *
jec = [ 'L1FastJet', 'L2Relative', 'L3Absolute' ]
#if not isMC:
# jec.extend([ 'L2L3Residual' ])
addJetCollection(process, cms.InputTag('ak5PFJets'),
'AK5', 'PF',
doJTA = True,
doBTagging = True,
jetCorrLabel = ('AK5PF', cms.vstring(jec)),
doType1MET = False,
doL1Cleaning = True,
doL1Counters = False,
genJetCollection = cms.InputTag("ak5GenJets"),
doJetID = True,
jetIdLabel = "ak5",
outputModule = ''
)
#--------------------------------------------------------------------------------
process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
#--------------------------------------------------------------------------------
#
# configure Jet Energy Corrections
#
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.jec = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_Jec11V2_AK5PF'),
label = cms.untracked.string('AK5PF')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_Jec11V2_AK5Calo'),
label = cms.untracked.string('AK5Calo')
)
),
connect = cms.string('sqlite_fip:TauAnalysis/Configuration/data/Jec11V2.db')
)
process.es_prefer_jec = cms.ESPrefer('PoolDBESSource', 'jec')
#--------------------------------------------------------------------------------
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.kt6PFJets.doRhoFastjet = True
process.kt6PFJets.Rho_EtaMax = cms.double(4.4)
#process.kt6PFJets.Ghost_EtaMax = cms.double(5.0)
process.ak5PFJets.doAreaFastjet = True
process.ak5PFJets.Rho_EtaMax = cms.double(4.4)
#process.ak5PFJets.Ghost_EtaMax = cms.double(5.0)
## re-run kt4PFJets within lepton acceptance to compute rho
process.load('RecoJets.JetProducers.kt4PFJets_cfi')
process.kt6PFJetsCentral = process.kt4PFJets.clone( rParam = 0.6, doRhoFastjet = True )
process.kt6PFJetsCentral.Rho_EtaMax = cms.double(2.5)
process.fjSequence = cms.Sequence(process.kt6PFJets+process.ak5PFJets+process.kt6PFJetsCentral)
## remove certain objects from the default sequence
#removeAllPATObjectsBut(process, ['Muons', 'Electrons', 'Taus', 'METs'])
# removeSpecificPATObjects(process, ['Electrons', 'Muons', 'Taus'])
from PhysicsTools.PatAlgos.tools.tauTools import *
switchToPFTauHPS(process) # For HPS Taus
#switchToPFTauHPSpTaNC(process) # For HPS TaNC Taus
# require scraping filter
process.scrapingVeto = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.2)
)
addSelectedPFlowParticle(process)
process.tauVariables = cms.EDProducer('TausUserEmbedded',
tauTag = cms.InputTag("patTaus"),
vertexTag = cms.InputTag("offlinePrimaryVerticesWithBS")
)
process.muonVariables = cms.EDProducer('MuonsUserEmbedded',
muonTag = cms.InputTag("patMuons"),
vertexTag = cms.InputTag("offlinePrimaryVerticesWithBS")
)
process.electronVariables = cms.EDProducer('ElectronsUserEmbedder',
electronTag = cms.InputTag("patElectrons"),
vertexTag = cms.InputTag("offlinePrimaryVerticesWithBS"),
isMC = cms.bool(True),
doMVA = cms.bool(True),
inputFileName0 = cms.FileInPath("UserCode/MitPhysics/data/ElectronMVAWeights/Subdet0LowPt_NoIPInfo_BDTG.weights.xml"),
inputFileName1 = cms.FileInPath("UserCode/MitPhysics/data/ElectronMVAWeights/Subdet1LowPt_NoIPInfo_BDTG.weights.xml"),
inputFileName2 = cms.FileInPath("UserCode/MitPhysics/data/ElectronMVAWeights/Subdet2LowPt_NoIPInfo_BDTG.weights.xml"),
inputFileName3 = cms.FileInPath("UserCode/MitPhysics/data/ElectronMVAWeights/Subdet0HighPt_NoIPInfo_BDTG.weights.xml"),
inputFileName4 = cms.FileInPath("UserCode/MitPhysics/data/ElectronMVAWeights/Subdet1HighPt_NoIPInfo_BDTG.weights.xml"),
inputFileName5 = cms.FileInPath("UserCode/MitPhysics/data/ElectronMVAWeights/Subdet2HighPt_NoIPInfo_BDTG.weights.xml"),
)
## let it run
process.p = cms.Path(
process.scrapingVeto *
process.PFTau *
process.fjSequence *
process.patDefaultSequence *
process.muonVariables *
process.electronVariables *
process.tauVariables
)
################################################################################################
### P r e p a r a t i o n o f t h e P A T O b j e c t s f r o m A O D ###
################################################################################################
## pat sequences to be loaded:
#process.load("PhysicsTools.PFCandProducer.PF2PAT_cff")
process.load("PhysicsTools.PatAlgos.patSequences_cff")
#process.load("PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cff")
# load the coreTools of PAT
from PhysicsTools.PatAlgos.tools.metTools import *
addTcMET(process, 'TC')
addPfMET(process, 'PF')
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## modify the final pat sequence: keep only electrons + METS (muons are needed for met corrections)
process.load("RecoEgamma.EgammaIsolationAlgos.egammaIsolationSequence_cff")
#process.patElectronIsolation = cms.Sequence(process.egammaIsolationSequence)
process.patElectrons.isoDeposits = cms.PSet()
process.patElectrons.userIsolation = cms.PSet()
process.patElectrons.addElectronID = cms.bool(True)
process.patElectrons.electronIDSources = cms.PSet(
simpleEleId95relIso= cms.InputTag("simpleEleId95relIso"),
simpleEleId90relIso= cms.InputTag("simpleEleId90relIso"),
simpleEleId85relIso= cms.InputTag("simpleEleId85relIso"),
simpleEleId80relIso= cms.InputTag("simpleEleId80relIso"),
simpleEleId70relIso= cms.InputTag("simpleEleId70relIso"),
simpleEleId60relIso= cms.InputTag("simpleEleId60relIso"),
simpleEleId95cIso= cms.InputTag("simpleEleId95cIso"),
simpleEleId90cIso= cms.InputTag("simpleEleId90cIso"),
simpleEleId85cIso= cms.InputTag("simpleEleId85cIso"),
simpleEleId80cIso= cms.InputTag("simpleEleId80cIso"),
simpleEleId70cIso= cms.InputTag("simpleEleId70cIso"),
simpleEleId60cIso= cms.InputTag("simpleEleId60cIso"),
)
##
process.patElectrons.addGenMatch = cms.bool(False)
process.patElectrons.embedGenMatch = cms.bool(False)
#process.patElectrons.usePV = cms.bool(False)
##
process.load("ElectroWeakAnalysis.WENu.simpleEleIdSequence_cff")
# you have to tell the ID that it is data
process.simpleEleId95relIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId90relIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId85relIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId80relIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId70relIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId60relIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId95cIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId90cIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId85cIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId80cIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId70cIso.dataMagneticFieldSetUp = cms.bool(True)
process.simpleEleId60cIso.dataMagneticFieldSetUp = cms.bool(True)
#
process.patElectronIDs = cms.Sequence(process.simpleEleIdSequence)
process.makePatElectrons = cms.Sequence(process.patElectronIDs*process.patElectrons)
# process.makePatMuons may be needed depending on how you calculate the MET
#process.makePatCandidates = cms.Sequence(process.makePatElectrons+process.makePatMETs)
#process.patDefaultSequence = cms.Sequence(process.makePatCandidates)
##
## ################################################################################
## remove MC matching from the default sequence
#removeMCMatching(process, ['All'])
#runOnData(process)
addPFMuonIsolation(process,process.patMuons)
addPFElectronIsolation(process,process.patElectrons)
#
#process.GlobalTag.globaltag = "START41_V0::All" ## (according to https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions)
# ##
process.source.fileNames = [ ##
#'/store/mc/Summer11/DYJetsToLL_TuneZ2_M-50_7TeV-madgraph-tauola/AODSIM/PU_S4_START42_V11-v1/0000/FED85C0E-A89C-E011-A90D-E0CB4E19F9B7.root',
#'file:vh120_emt_emu_events.root'
'/store/mc/Fall11/WH_ZH_TTH_HToTauTau_M-120_7TeV-pythia6-tauola/AODSIM/PU_S6_START42_V14B-v1/0000/0870F5FC-B9F8-E011-B97A-E0CB4EA0A8EA.root',
'/store/mc/Fall11/WH_ZH_TTH_HToTauTau_M-120_7TeV-pythia6-tauola/AODSIM/PU_S6_START42_V14B-v1/0000/12D19FBC-9EF8-E011-8FA3-90E6BAE8CC1C.root',
'/store/mc/Fall11/WH_ZH_TTH_HToTauTau_M-120_7TeV-pythia6-tauola/AODSIM/PU_S6_START42_V14B-v1/0000/167A26E4-A7F8-E011-B653-00261834B5B1.root',
'/store/mc/Fall11/WH_ZH_TTH_HToTauTau_M-120_7TeV-pythia6-tauola/AODSIM/PU_S6_START42_V14B-v1/0000/0E9F1594-96F8-E011-9F0A-E0CB4E1A118D.root',
] ## (e.g. 'file:AOD.root')
# ##
process.maxEvents.input = -1 ## (e.g. -1 to run on all events)
# ##
process.out.outputCommands = [ 'keep *'
] ## (e.g. taken from PhysicsTools/PatAlgos/python/patEventContent_cff.py)
# ##
process.out.fileName = '/lustre/cms/store/user/calabria/Data/PAT2011_NoSkim_DATA_New/patTuple_WH120_lustre_2.root' ## (e.g. 'myTuple.root')
# ##
process.options.wantSummary = False ## (to suppress the long output at the end of the job)
| [
"cesare.calabria23@gmail.com"
] | cesare.calabria23@gmail.com |
80678ceb51847e3b26747328eba3de5a01e8f40b | 27c94d7e040902d3cdadd5862b15e67ec2ee4b6e | /xautodl/models/__init__.py | 5f57daf2c399af9b12b76b52aea6db637943ebb6 | [
"MIT"
] | permissive | D-X-Y/AutoDL-Projects | 8a0779a7710d809af2b052787928d8d34c14d0d9 | f46486e21b71ae6459a700be720d7648b5429569 | refs/heads/main | 2023-08-13T10:53:49.550889 | 2022-04-24T22:18:16 | 2022-04-24T22:18:16 | 168,538,768 | 989 | 197 | MIT | 2022-04-24T22:16:21 | 2019-01-31T14:30:50 | Python | UTF-8 | Python | false | false | 12,249 | py | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
from os import path as osp
from typing import List, Text
import torch
__all__ = [
"change_key",
"get_cell_based_tiny_net",
"get_search_spaces",
"get_cifar_models",
"get_imagenet_models",
"obtain_model",
"obtain_search_model",
"load_net_from_checkpoint",
"CellStructure",
"CellArchitectures",
]
# useful modules
from xautodl.config_utils import dict2config
from .SharedUtils import change_key
from .cell_searchs import CellStructure, CellArchitectures
# Cell-based NAS Models
def get_cell_based_tiny_net(config):
if isinstance(config, dict):
config = dict2config(config, None) # to support the argument being a dict
super_type = getattr(config, "super_type", "basic")
group_names = ["DARTS-V1", "DARTS-V2", "GDAS", "SETN", "ENAS", "RANDOM", "generic"]
if super_type == "basic" and config.name in group_names:
from .cell_searchs import nas201_super_nets as nas_super_nets
try:
return nas_super_nets[config.name](
config.C,
config.N,
config.max_nodes,
config.num_classes,
config.space,
config.affine,
config.track_running_stats,
)
except:
return nas_super_nets[config.name](
config.C, config.N, config.max_nodes, config.num_classes, config.space
)
elif super_type == "search-shape":
from .shape_searchs import GenericNAS301Model
genotype = CellStructure.str2structure(config.genotype)
return GenericNAS301Model(
config.candidate_Cs,
config.max_num_Cs,
genotype,
config.num_classes,
config.affine,
config.track_running_stats,
)
elif super_type == "nasnet-super":
from .cell_searchs import nasnet_super_nets as nas_super_nets
return nas_super_nets[config.name](
config.C,
config.N,
config.steps,
config.multiplier,
config.stem_multiplier,
config.num_classes,
config.space,
config.affine,
config.track_running_stats,
)
elif config.name == "infer.tiny":
from .cell_infers import TinyNetwork
if hasattr(config, "genotype"):
genotype = config.genotype
elif hasattr(config, "arch_str"):
genotype = CellStructure.str2structure(config.arch_str)
else:
raise ValueError(
"Can not find genotype from this config : {:}".format(config)
)
return TinyNetwork(config.C, config.N, genotype, config.num_classes)
elif config.name == "infer.shape.tiny":
from .shape_infers import DynamicShapeTinyNet
if isinstance(config.channels, str):
channels = tuple([int(x) for x in config.channels.split(":")])
else:
channels = config.channels
genotype = CellStructure.str2structure(config.genotype)
return DynamicShapeTinyNet(channels, genotype, config.num_classes)
elif config.name == "infer.nasnet-cifar":
from .cell_infers import NASNetonCIFAR
raise NotImplementedError
else:
raise ValueError("invalid network name : {:}".format(config.name))
# obtain the search space, i.e., a dict mapping the operation name into a python-function for this op
def get_search_spaces(xtype, name) -> List[Text]:
if xtype == "cell" or xtype == "tss": # The topology search space.
from .cell_operations import SearchSpaceNames
assert name in SearchSpaceNames, "invalid name [{:}] in {:}".format(
name, SearchSpaceNames.keys()
)
return SearchSpaceNames[name]
elif xtype == "sss": # The size search space.
if name in ["nats-bench", "nats-bench-size"]:
return {"candidates": [8, 16, 24, 32, 40, 48, 56, 64], "numbers": 5}
else:
raise ValueError("Invalid name : {:}".format(name))
else:
raise ValueError("invalid search-space type is {:}".format(xtype))
def get_cifar_models(config, extra_path=None):
super_type = getattr(config, "super_type", "basic")
if super_type == "basic":
from .CifarResNet import CifarResNet
from .CifarDenseNet import DenseNet
from .CifarWideResNet import CifarWideResNet
if config.arch == "resnet":
return CifarResNet(
config.module, config.depth, config.class_num, config.zero_init_residual
)
elif config.arch == "densenet":
return DenseNet(
config.growthRate,
config.depth,
config.reduction,
config.class_num,
config.bottleneck,
)
elif config.arch == "wideresnet":
return CifarWideResNet(
config.depth, config.wide_factor, config.class_num, config.dropout
)
else:
raise ValueError("invalid module type : {:}".format(config.arch))
elif super_type.startswith("infer"):
from .shape_infers import InferWidthCifarResNet
from .shape_infers import InferDepthCifarResNet
from .shape_infers import InferCifarResNet
from .cell_infers import NASNetonCIFAR
assert len(super_type.split("-")) == 2, "invalid super_type : {:}".format(
super_type
)
infer_mode = super_type.split("-")[1]
if infer_mode == "width":
return InferWidthCifarResNet(
config.module,
config.depth,
config.xchannels,
config.class_num,
config.zero_init_residual,
)
elif infer_mode == "depth":
return InferDepthCifarResNet(
config.module,
config.depth,
config.xblocks,
config.class_num,
config.zero_init_residual,
)
elif infer_mode == "shape":
return InferCifarResNet(
config.module,
config.depth,
config.xblocks,
config.xchannels,
config.class_num,
config.zero_init_residual,
)
elif infer_mode == "nasnet.cifar":
genotype = config.genotype
if extra_path is not None: # reload genotype by extra_path
if not osp.isfile(extra_path):
raise ValueError("invalid extra_path : {:}".format(extra_path))
xdata = torch.load(extra_path)
current_epoch = xdata["epoch"]
genotype = xdata["genotypes"][current_epoch - 1]
C = config.C if hasattr(config, "C") else config.ichannel
N = config.N if hasattr(config, "N") else config.layers
return NASNetonCIFAR(
C, N, config.stem_multi, config.class_num, genotype, config.auxiliary
)
else:
raise ValueError("invalid infer-mode : {:}".format(infer_mode))
else:
raise ValueError("invalid super-type : {:}".format(super_type))
def get_imagenet_models(config):
super_type = getattr(config, "super_type", "basic")
if super_type == "basic":
from .ImageNet_ResNet import ResNet
from .ImageNet_MobileNetV2 import MobileNetV2
if config.arch == "resnet":
return ResNet(
config.block_name,
config.layers,
config.deep_stem,
config.class_num,
config.zero_init_residual,
config.groups,
config.width_per_group,
)
elif config.arch == "mobilenet_v2":
return MobileNetV2(
config.class_num,
config.width_multi,
config.input_channel,
config.last_channel,
"InvertedResidual",
config.dropout,
)
else:
raise ValueError("invalid arch : {:}".format(config.arch))
elif super_type.startswith("infer"): # NAS searched architecture
assert len(super_type.split("-")) == 2, "invalid super_type : {:}".format(
super_type
)
infer_mode = super_type.split("-")[1]
if infer_mode == "shape":
from .shape_infers import InferImagenetResNet
from .shape_infers import InferMobileNetV2
if config.arch == "resnet":
return InferImagenetResNet(
config.block_name,
config.layers,
config.xblocks,
config.xchannels,
config.deep_stem,
config.class_num,
config.zero_init_residual,
)
elif config.arch == "MobileNetV2":
return InferMobileNetV2(
config.class_num, config.xchannels, config.xblocks, config.dropout
)
else:
raise ValueError("invalid arch-mode : {:}".format(config.arch))
else:
raise ValueError("invalid infer-mode : {:}".format(infer_mode))
else:
raise ValueError("invalid super-type : {:}".format(super_type))
# Try to obtain the network by config.
def obtain_model(config, extra_path=None):
if config.dataset == "cifar":
return get_cifar_models(config, extra_path)
elif config.dataset == "imagenet":
return get_imagenet_models(config)
else:
raise ValueError("invalid dataset in the model config : {:}".format(config))
def obtain_search_model(config):
if config.dataset == "cifar":
if config.arch == "resnet":
from .shape_searchs import SearchWidthCifarResNet
from .shape_searchs import SearchDepthCifarResNet
from .shape_searchs import SearchShapeCifarResNet
if config.search_mode == "width":
return SearchWidthCifarResNet(
config.module, config.depth, config.class_num
)
elif config.search_mode == "depth":
return SearchDepthCifarResNet(
config.module, config.depth, config.class_num
)
elif config.search_mode == "shape":
return SearchShapeCifarResNet(
config.module, config.depth, config.class_num
)
else:
raise ValueError("invalid search mode : {:}".format(config.search_mode))
elif config.arch == "simres":
from .shape_searchs import SearchWidthSimResNet
if config.search_mode == "width":
return SearchWidthSimResNet(config.depth, config.class_num)
else:
raise ValueError("invalid search mode : {:}".format(config.search_mode))
else:
raise ValueError(
"invalid arch : {:} for dataset [{:}]".format(
config.arch, config.dataset
)
)
elif config.dataset == "imagenet":
from .shape_searchs import SearchShapeImagenetResNet
assert config.search_mode == "shape", "invalid search-mode : {:}".format(
config.search_mode
)
if config.arch == "resnet":
return SearchShapeImagenetResNet(
config.block_name, config.layers, config.deep_stem, config.class_num
)
else:
raise ValueError("invalid model config : {:}".format(config))
else:
raise ValueError("invalid dataset in the model config : {:}".format(config))
def load_net_from_checkpoint(checkpoint):
assert osp.isfile(checkpoint), "checkpoint {:} does not exist".format(checkpoint)
checkpoint = torch.load(checkpoint)
model_config = dict2config(checkpoint["model-config"], None)
model = obtain_model(model_config)
model.load_state_dict(checkpoint["base-model"])
return model
| [
"280835372@qq.com"
] | 280835372@qq.com |
956ba6af02f0df809334dadc3cfd857eb38648f2 | a9ce7176631ebc3bb8188d6aa3c2be09137fb43a | /migrate/0002_add_column_locationrating_hand_aligned.py | 3c4d8d3b146229be084bab19514da5cf584021a0 | [
"MIT"
] | permissive | andrewhead/Search-Task-Analysis | 0081c8c0ad6682c7e4a87aad1af4a57d18287137 | ef73745a760b5c2ec7060488219bb29237c26464 | refs/heads/master | 2020-05-21T20:04:36.299697 | 2016-09-19T00:39:06 | 2016-09-19T00:39:06 | 60,795,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from playhouse.migrate import migrate
from peewee import BooleanField
logger = logging.getLogger('data')
def forward(migrator):
migrate(
migrator.add_column('locationrating', 'hand_aligned', BooleanField(default=False)),
)
| [
"head.andrewm@gmail.com"
] | head.andrewm@gmail.com |
c765e708dfb60a128b9ad5a48c0653d87f25b641 | aac5982c8dcf26221419086fb90c399b9f4324ef | /DFTB/MolecularIntegrals/hmi_continuum.py | 291a9f0fcdc3962b4c49d702ff72f7bc88311c0f | [] | no_license | by-student-2017/DFTBaby-0.1.0-31Jul2019 | 99184d3fa2976d4e02f7f1bddee97e56526d9365 | 92cb73f1a6472f88588986561349d7f2ad1b1c15 | refs/heads/master | 2022-12-12T00:12:50.449505 | 2020-09-01T21:05:59 | 2020-09-01T21:05:59 | 290,116,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | #!/usr/bin/env python
from DFTB.MolecularIntegrals import settings
from DFTB.MolecularIntegrals.BasissetFreeDFT import BasissetFreeDFT
import numpy as np
def hmi_continuum(l, m, E):
"""
compute continuum orbitals of the hydrogen molecular ion H2+
Parameters
----------
l,m : angular quantum numbers of asymptotic solution
e.g. l=0,m=0 s-orbital
l=1,m=+1 px-orbital
E : energy (in a.u.) of continuum orbital, E = 1/2 k^2
"""
# H2^+
# bond length in bohr
R = 2.0
atomlist = [(1, (0.0, 0.0, -R/2.0)),
(1, (0.0, 0.0, +R/2.0))]
# choose resolution of multicenter grids for continuum orbitals
settings.radial_grid_factor = 120 # controls size of radial grid
settings.lebedev_order = 25 # controls size of angular grid
RDFT = BasissetFreeDFT(atomlist, None, charge=+1)
# This is a one-electron system, so there are no other occupied orbitals
def rho(x,y,z):
return 0*x
def homo(x,y,z):
return 0*x
delta, phi = RDFT.solveScatteringProblem(rho, homo, E, l, m)
if __name__ == "__main__":
import sys
import os.path
args = sys.argv[1:]
if len(args) < 3:
usage = """
Usage:
%s l m E
compute the continuum orbital of H2^+ (hydrogen molecular ion)
Parameters:
l,m - integers, -l <= m <= l, angular quantum numbers
of asymptotic solution
E - float, energy of continuum orbital is E = 1/2 k^2
""" % os.path.basename(sys.argv[0])
print usage
exit(-1)
l = int(args[0])
m = int(args[1])
E = float(args[2])
hmi_continuum(l, m, E)
| [
"studentsctest@gmail.com"
] | studentsctest@gmail.com |
310e89e57e8b49add71c684c7faba652eff81f6b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/hIndex_20200730204441.py | 7a49df44b99367a612d1fa52a7d722fd611b7328 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | def Hindex(citations):
result = 0
citations.sort()
for i in range(len(citations)-1,0,-1):
cnt = len(citations) -i
if citations[i] >
print('i',i,'cnt',cnt)
Hindex([3,0,6,1,5]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
36807336601be9e76fa772e435e3ca35fe6b9a9f | 7f523c407d45d116860eff67f079e807f2b53339 | /src/third_party/capstone/bindings/python/capstone/ppc.py | 6ab177db1b88c484534778579eefa4aa0e8e5be7 | [
"MIT",
"BSD-3-Clause",
"NCSA"
] | permissive | 0vercl0k/rp | a352c96bfe3715eb9ce8c5942831123e65289dac | b24e7f58a594aaf0ce3771745bf06862f6ecc074 | refs/heads/master | 2023-08-30T08:03:14.842828 | 2023-08-09T00:41:00 | 2023-08-09T00:41:00 | 3,554,173 | 1,557 | 239 | MIT | 2023-08-09T00:41:02 | 2012-02-26T19:26:33 | C++ | UTF-8 | Python | false | false | 1,321 | py | # Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
import ctypes
from . import copy_ctypes_list
from .ppc_const import *
# define the API
class PpcOpMem(ctypes.Structure):
_fields_ = (
('base', ctypes.c_uint),
('disp', ctypes.c_int32),
)
class PpcOpCrx(ctypes.Structure):
_fields_ = (
('scale', ctypes.c_uint),
('reg', ctypes.c_uint),
('cond', ctypes.c_uint),
)
class PpcOpValue(ctypes.Union):
_fields_ = (
('reg', ctypes.c_uint),
('imm', ctypes.c_int64),
('mem', PpcOpMem),
('crx', PpcOpCrx),
)
class PpcOp(ctypes.Structure):
_fields_ = (
('type', ctypes.c_uint),
('value', PpcOpValue),
)
@property
def imm(self):
return self.value.imm
@property
def reg(self):
return self.value.reg
@property
def mem(self):
return self.value.mem
@property
def crx(self):
return self.value.crx
class CsPpc(ctypes.Structure):
_fields_ = (
('bc', ctypes.c_uint),
('bh', ctypes.c_uint),
('update_cr0', ctypes.c_bool),
('op_count', ctypes.c_uint8),
('operands', PpcOp * 8),
)
def get_arch_info(a):
return (a.bc, a.bh, a.update_cr0, copy_ctypes_list(a.operands[:a.op_count]))
| [
"noreply@github.com"
] | 0vercl0k.noreply@github.com |
602750c19d6f198161d370a3d29d26e2b4708df9 | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/01_LeetCode/781_RabbitsinForest.py | d63e85b18090554a3b65338b566a90711bd3ff96 | [
"MIT"
] | permissive | KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | """
In a forest, each rabbit has some color. Some subset of rabbits (possibly all of them) tell you how many other rabbits have the same color as them. Those answers are placed in an array.
Return the minimum number of rabbits that could be in the forest.
Examples:
Input: answers = [1, 1, 2]
Output: 5
Explanation:
The two rabbits that answered "1" could both be the same color, say red.
The rabbit than answered "2" can't be red or the answers would be inconsistent.
Say the rabbit that answered "2" was blue.
Then there should be 2 other blue rabbits in the forest that didn't answer into the array.
The smallest possible number of rabbits in the forest is therefore 5: 3 that answered plus 2 that didn't.
Input: answers = [10, 10, 10]
Output: 11
Input: answers = []
Output: 0
"""
class Solution:
def numRabbits(self, answers):
"""
:type answers: List[int]
:rtype: int
"""
"""
Method 1:
"""
hasSeen = {}
result = 0
for num in answers:
if num not in hasSeen:
result += (num + 1)
hasSeen[num] = num
else:
hasSeen[num] -= 1
if hasSeen[num] == 0:
del hasSeen[num]
return result
| [
"kartikkannapur@gmail.com"
] | kartikkannapur@gmail.com |
376b7555eaeb64dd1701034102e96eb1f46a0cd8 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/fleet/azext_fleet/vendored_sdks/v2022_07_02_preview/operations/_fleets_operations.py | 1017f9c132abcef964e35bd610d30a18a06d7af6 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 51,536 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str,
fleet_name: str,
subscription_id: str,
*,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"fleetName": _SERIALIZER.url(
"fleet_name", fleet_name, "str", max_length=63, min_length=1, pattern=r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if if_none_match is not None:
_headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, fleet_name: str, subscription_id: str, *, if_match: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"fleetName": _SERIALIZER.url(
"fleet_name", fleet_name, "str", max_length=63, min_length=1, pattern=r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(resource_group_name: str, fleet_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"fleetName": _SERIALIZER.url(
"fleet_name", fleet_name, "str", max_length=63, min_length=1, pattern=r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, fleet_name: str, subscription_id: str, *, if_match: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"fleetName": _SERIALIZER.url(
"fleet_name", fleet_name, "str", max_length=63, min_length=1, pattern=r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/fleets")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_credentials_request(
resource_group_name: str, fleet_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}/listCredentials",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"fleetName": _SERIALIZER.url(
"fleet_name", fleet_name, "str", max_length=63, min_length=1, pattern=r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class FleetsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_07_02_preview.ContainerServiceClient`'s
:attr:`fleets` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
fleet_name: str,
parameters: Union[_models.Fleet, IO],
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> _models.Fleet:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Fleet]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Fleet")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
fleet_name=fleet_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
if_none_match=if_none_match,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Fleet", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Fleet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
fleet_name: str,
parameters: _models.Fleet,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Fleet]:
"""Creates or updates a Fleet.
Creates or updates a Fleet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param parameters: The Fleet to create or update. Required.
:type parameters: ~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:param if_none_match: Set to '*' to allow a new resource to be created and prevent updating an
existing resource. Other values will result in a 412 Pre-condition Failed response. Default
value is None.
:type if_none_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Fleet or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
fleet_name: str,
parameters: IO,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Fleet]:
"""Creates or updates a Fleet.
Creates or updates a Fleet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param parameters: The Fleet to create or update. Required.
:type parameters: IO
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:param if_none_match: Set to '*' to allow a new resource to be created and prevent updating an
existing resource. Other values will result in a 412 Pre-condition Failed response. Default
value is None.
:type if_none_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Fleet or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
fleet_name: str,
parameters: Union[_models.Fleet, IO],
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller[_models.Fleet]:
"""Creates or updates a Fleet.
Creates or updates a Fleet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param parameters: The Fleet to create or update. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet or IO
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:param if_none_match: Set to '*' to allow a new resource to be created and prevent updating an
existing resource. Other values will result in a 412 Pre-condition Failed response. Default
value is None.
:type if_none_match: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Fleet or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Fleet]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
fleet_name=fleet_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Fleet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}"} # type: ignore
@overload
def update(
self,
resource_group_name: str,
fleet_name: str,
if_match: Optional[str] = None,
parameters: Optional[_models.FleetPatch] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Fleet:
"""Patches a fleet resource.
Patches a fleet resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:param parameters: The properties of a Fleet to update. Default value is None.
:type parameters: ~azure.mgmt.containerservice.v2022_07_02_preview.models.FleetPatch
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Fleet or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update(
self,
resource_group_name: str,
fleet_name: str,
if_match: Optional[str] = None,
parameters: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Fleet:
"""Patches a fleet resource.
Patches a fleet resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:param parameters: The properties of a Fleet to update. Default value is None.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Fleet or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update(
self,
resource_group_name: str,
fleet_name: str,
if_match: Optional[str] = None,
parameters: Optional[Union[_models.FleetPatch, IO]] = None,
**kwargs: Any
) -> _models.Fleet:
"""Patches a fleet resource.
Patches a fleet resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:param parameters: The properties of a Fleet to update. Is either a model type or a IO type.
Default value is None.
:type parameters: ~azure.mgmt.containerservice.v2022_07_02_preview.models.FleetPatch or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Fleet or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Fleet]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
if parameters is not None:
_json = self._serialize.body(parameters, "FleetPatch")
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
fleet_name=fleet_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Fleet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, fleet_name: str, **kwargs: Any) -> _models.Fleet:
"""Gets a Fleet.
Gets a Fleet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Fleet or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Fleet]
request = build_get_request(
resource_group_name=resource_group_name,
fleet_name=fleet_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Fleet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, fleet_name: str, if_match: Optional[str] = None, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
fleet_name=fleet_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, fleet_name: str, if_match: Optional[str] = None, **kwargs: Any
) -> LROPoller[None]:
"""Deletes a Fleet.
Deletes a Fleet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:param if_match: Omit this value to always overwrite the current resource. Specify the
last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is
None.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
fleet_name=fleet_name,
if_match=if_match,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}"} # type: ignore
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Fleet"]:
"""Lists fleets in the specified subscription and resource group.
Lists fleets in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Fleet or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.FleetListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FleetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets"} # type: ignore
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Fleet"]:
"""Lists fleets in the specified subscription.
Lists fleets in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Fleet or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_07_02_preview.models.Fleet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.FleetListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FleetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/fleets"} # type: ignore
@distributed_trace
def list_credentials(
self, resource_group_name: str, fleet_name: str, **kwargs: Any
) -> _models.FleetCredentialResults:
"""Lists the user credentials of a Fleet.
Lists the user credentials of a Fleet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param fleet_name: The name of the Fleet resource. Required.
:type fleet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FleetCredentialResults or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_02_preview.models.FleetCredentialResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-07-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.FleetCredentialResults]
request = build_list_credentials_request(
resource_group_name=resource_group_name,
fleet_name=fleet_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_credentials.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("FleetCredentialResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_credentials.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/fleets/{fleetName}/listCredentials"} # type: ignore
| [
"noreply@github.com"
] | VSChina.noreply@github.com |
fc470ad6da33016499e446b9ece69470a9b8d9a7 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200414/codes/output/code090.py | 1bc796c03085d8ba177161a12962daf89860121c | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 180 | py | import pygal
chart = pygal.Line()
chart.add('line', [.0002, .0005, .00035], dots_size=4)
chart.add('line', [.0004, .0009, .001], dots_size=12)
print(chart.render(is_unicode=True))
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
a11a26a952f57d707df35bd758411f0eb76a9b4d | 9f84d91a8ae3df53b07fe3267992fba00a99ac9e | /torch_geometric/transforms/add_remaining_self_loops.py | 4150f30254ba8d1d07584b91f4e382795bd524ef | [
"MIT"
] | permissive | pyg-team/pytorch_geometric | ebea601eae228f3905465b5c2349d3fb3bb5cb26 | a52af694b8ce6a80811e20966fe6d08a3e7511fe | refs/heads/master | 2023-08-31T04:13:40.943308 | 2023-08-30T12:48:42 | 2023-08-30T12:48:42 | 106,024,057 | 6,775 | 1,563 | MIT | 2023-09-14T17:10:18 | 2017-10-06T16:03:03 | Python | UTF-8 | Python | false | false | 2,087 | py | from typing import Optional, Union
from torch import Tensor
from torch_geometric.data import Data, HeteroData
from torch_geometric.data.datapipes import functional_transform
from torch_geometric.transforms import BaseTransform
from torch_geometric.utils import add_remaining_self_loops
@functional_transform('add_remaining_self_loops')
class AddRemainingSelfLoops(BaseTransform):
r"""Adds remaining self-loops to the given homogeneous or heterogeneous
graph (functional name: :obj:`add_remaining_self_loops`).
Args:
attr (str, optional): The name of the attribute of edge weights
or multi-dimensional edge features to pass to
:meth:`torch_geometric.utils.add_remaining_self_loops`.
(default: :obj:`"edge_weight"`)
fill_value (float or Tensor or str, optional): The way to generate
edge features of self-loops (in case :obj:`attr != None`).
If given as :obj:`float` or :class:`torch.Tensor`, edge features of
self-loops will be directly given by :obj:`fill_value`.
If given as :obj:`str`, edge features of self-loops are computed by
aggregating all features of edges that point to the specific node,
according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`,
:obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`)
"""
def __init__(self, attr: Optional[str] = 'edge_weight',
fill_value: Union[float, Tensor, str] = 1.0):
self.attr = attr
self.fill_value = fill_value
def forward(
self,
data: Union[Data, HeteroData],
) -> Union[Data, HeteroData]:
for store in data.edge_stores:
if store.is_bipartite() or 'edge_index' not in store:
continue
store.edge_index, edge_weight = add_remaining_self_loops(
store.edge_index, getattr(store, self.attr, None),
fill_value=self.fill_value, num_nodes=store.size(0))
setattr(store, self.attr, edge_weight)
return data
| [
"noreply@github.com"
] | pyg-team.noreply@github.com |
2fc6e64bdf4d69197a40ec1baedf053a5dbf7047 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/anp.py | 615ee34b3b608f8f3a4e5a96c301c57a86ea8e13 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'aNP':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
081dcfaf35eab2dc448a2db49ecc3d1ae03e2589 | 31681488e69da3c7e00b0eda28e5cb720ef2299c | /liteiclink/serwb/datapath.py | dbdb5ea0b5f764e6f447754d0c099fba7da78b88 | [
"BSD-2-Clause"
] | permissive | zsipos/liteiclink | 4e9bdf6a819f490461cb33d0837247041203071d | 864cd831f3475dffd1c92d6d4a1b86608680bcf2 | refs/heads/master | 2021-07-08T07:43:10.897604 | 2020-01-28T09:40:17 | 2020-01-28T09:40:17 | 245,119,569 | 0 | 0 | NOASSERTION | 2020-03-05T09:25:16 | 2020-03-05T09:25:15 | null | UTF-8 | Python | false | false | 6,030 | py | # This file is Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
from migen import *
from migen.genlib.io import *
from migen.genlib.misc import BitSlip, WaitTimer
from litex.soc.interconnect import stream
from litex.soc.cores.code_8b10b import Encoder, Decoder
from liteiclink.serwb.scrambler import Scrambler, Descrambler
def K(x, y):
return (y << 5) | x
class _8b10bEncoder(Module):
def __init__(self):
self.sink = sink = stream.Endpoint([("d", 32), ("k", 4)])
self.source = source = stream.Endpoint([("data", 40)])
# # #
encoder = CEInserter()(Encoder(4, True))
self.submodules += encoder
# control
self.comb += [
source.valid.eq(sink.valid),
sink.ready.eq(source.ready),
encoder.ce.eq(source.valid & source.ready)
]
# datapath
for i in range(4):
self.comb += [
encoder.k[i].eq(sink.k[i]),
encoder.d[i].eq(sink.d[8*i:8*(i+1)]),
source.data[10*i:10*(i+1)].eq(encoder.output[i])
]
class _8b10bDecoder(Module):
def __init__(self):
self.sink = sink = stream.Endpoint([("data", 40)])
self.source = source = stream.Endpoint([("d", 32), ("k", 4)])
# # #
decoders = [CEInserter()(Decoder(True)) for _ in range(4)]
self.submodules += decoders
# control
self.comb += [
source.valid.eq(sink.valid),
sink.ready.eq(source.ready)
]
self.comb += [decoders[i].ce.eq(source.valid & source.ready) for i in range(4)]
# datapath
for i in range(4):
self.comb += [
decoders[i].input.eq(sink.data[10*i:10*(i+1)]),
source.k[i].eq(decoders[i].k),
source.d[8*i:8*(i+1)].eq(decoders[i].d)
]
class _Bitslip(Module):
def __init__(self):
self.value = value = Signal(6)
self.sink = sink = stream.Endpoint([("data", 40)])
self.source = source = stream.Endpoint([("data", 40)])
# # #
bitslip = CEInserter()(BitSlip(40))
self.submodules += bitslip
# control
self.comb += [
source.valid.eq(sink.valid),
sink.ready.eq(source.ready),
bitslip.value.eq(value),
bitslip.ce.eq(source.valid & source.ready)
]
# datapath
self.comb += [
bitslip.i.eq(sink.data),
source.data.eq(bitslip.o)
]
class TXDatapath(Module):
def __init__(self, phy_dw, with_scrambling=True):
self.idle = idle = Signal()
self.comma = comma = Signal()
self.sink = sink = stream.Endpoint([("data", 32)])
self.source = source = stream.Endpoint([("data", phy_dw)])
# # #
# scrambler
if with_scrambling:
self.submodules.scrambler = scrambler = Scrambler()
# line coding
self.submodules.encoder = encoder = _8b10bEncoder()
# converter
self.submodules.converter = converter = stream.Converter(40, phy_dw)
# dataflow
if with_scrambling:
self.comb += [
sink.connect(scrambler.sink),
If(comma,
encoder.sink.valid.eq(1),
encoder.sink.k.eq(1),
encoder.sink.d.eq(K(28,5))
).Else(
scrambler.source.connect(encoder.sink)
)
]
else:
self.comb += [
If(comma,
encoder.sink.valid.eq(1),
encoder.sink.k.eq(1),
encoder.sink.d.eq(K(28,5))
).Else(
sink.connect(encoder.sink, omit={"data"}),
encoder.sink.d.eq(sink.data)
),
]
self.comb += [
If(idle,
converter.sink.valid.eq(1),
converter.sink.data.eq(0)
).Else(
encoder.source.connect(converter.sink),
),
converter.source.connect(source)
]
class RXDatapath(Module):
def __init__(self, phy_dw, with_scrambling=True):
self.bitslip_value = bitslip_value = Signal(6)
self.sink = sink = stream.Endpoint([("data", phy_dw)])
self.source = source = stream.Endpoint([("data", 32)])
self.idle = idle = Signal()
self.comma = comma = Signal()
# # #
# converter
self.submodules.converter = converter = stream.Converter(phy_dw, 40)
# bitslip
self.submodules.bitslip = bitslip = _Bitslip()
self.comb += bitslip.value.eq(bitslip_value)
# line coding
self.submodules.decoder = decoder = _8b10bDecoder()
# descrambler
if with_scrambling:
self.submodules.descrambler = descrambler = Descrambler()
# dataflow
self.comb += [
sink.connect(converter.sink),
converter.source.connect(bitslip.sink),
bitslip.source.connect(decoder.sink)
]
if with_scrambling:
self.comb += [
decoder.source.connect(descrambler.sink),
descrambler.source.connect(source)
]
else:
self.comb += [
decoder.source.connect(source, omit={"d", "k"}),
source.data.eq(decoder.source.d)
]
# idle decoding
idle_timer = WaitTimer(32)
self.submodules += idle_timer
self.sync += [
If(converter.source.valid,
idle_timer.wait.eq((converter.source.data == 0) | (converter.source.data == (2**40-1)))
),
idle.eq(idle_timer.done)
]
# comma decoding
self.sync += \
If(decoder.source.valid,
comma.eq((decoder.source.k == 1) & (decoder.source.d == K(28, 5)))
)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
3fe587d3479f35c248d556caac968306f606b220 | 2881dcaa58b2acbb56fe7ecdf30f1f31ec53798f | /sliding-window/max-distinct-substring/max-distinct-substring-iterative.py | 188babf47d7c25a5c63ecc49803d8c18766a160d | [] | no_license | aratik711/grokking-the-coding-interview | 2ec8791c5c1f65a752e795bded4f66b79bf8e3cc | 95a3c477d3ebd49c2d1f9d51394b61680f05a38b | refs/heads/main | 2023-07-03T10:04:39.613221 | 2021-08-11T03:08:11 | 2021-08-11T03:08:11 | 343,046,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | """
Given a string, find the length of the longest substring in it with no more than K distinct characters.
Example 1:
Input: String="araaci", K=2
Output: 4
Explanation: The longest substring with no more than '2' distinct characters is "araa".
Example 2:
Input: String="araaci", K=1
Output: 2
Explanation: The longest substring with no more than '1' distinct characters is "aa".
Example 3:
Input: String="cbbebi", K=3
Output: 5
Explanation: The longest substrings with no more than '3' distinct characters are "cbbeb" & "bbebi".
Time complexity O(n*n)
"""
def longest_substring_with_k_distinct(str, k):
str_count = []
for i in range(len(str)):
char_arr = [str[i]]
char_count = 1
for j in range(i+1, len(str)):
char_count += 1
if len(char_arr) == k:
str_count.append(char_count)
break
if str[j] not in char_arr:
char_arr.append(str[j])
continue
return max(str_count)
str = "cbbebi"
K = 3
print(longest_substring_with_k_distinct(str, K))
| [
"arati.kulkarni@phonepe.com"
] | arati.kulkarni@phonepe.com |
caa8a343b37b913dc10f18a9cd8223fb7b89c06a | e87f369bf5642d25990d7e1b72d9fda9eab39fea | /invoke_commands/release.py | 0c8d2359956e60429dabfae22afe5d3a59e6511d | [
"MIT"
] | permissive | rajk-apps/riki | 612fc4716b842562447d9f7163cb8681e7e1e7c3 | 58257bffe7d7f00fc0cb8dc266d783c00cc16070 | refs/heads/master | 2022-10-05T21:48:17.285899 | 2022-09-18T16:31:31 | 2022-09-18T16:31:31 | 162,050,032 | 1 | 2 | MIT | 2020-07-15T21:19:40 | 2018-12-16T23:19:49 | JavaScript | UTF-8 | Python | false | false | 1,329 | py | from invoke import task
import io
from .vars import mymodule
@task
def new(c):
version = mymodule.__version__
c.run("python setup.py sdist")
c.run("twine check dist/*")
c.run(
f"twine upload dist/*{version}.tar.gz -u __token__ -p $TWINE_PASSWORD"
)
@task
def tag(c):
version = mymodule.__version__
f = io.StringIO()
c.run("git rev-parse --abbrev-ref HEAD", out_stream=f)
branch = f.getvalue().strip()
f.close()
if branch == "master":
tag_version = "v{}".format(version)
f2 = io.StringIO()
c.run("git tag", out_stream=f2)
tags = f2.getvalue().split()
print(tags)
if tag_version not in tags:
current_release_path = "docs_config/current_release.rst"
with open(current_release_path) as fp:
notes = fp.read()
with open(
"docs_config/release_notes/{}.rst".format(tag_version), "w"
) as fp:
fp.write(notes)
c.run(f"git tag -a {tag_version} -m '{notes}'")
with open(current_release_path, "w") as fp:
fp.write("")
c.run("git push --tags")
else:
print("{} version already tagged".format(tag_version))
else:
print("only master branch can be tagged")
| [
"endremborza@gmail.com"
] | endremborza@gmail.com |
84bb2ef6d0e866019e761834acd91b17ecfa1556 | c35b8c8ece7757943d93748fbdc6f4d54539daa6 | /poloniex/logger.py | d4d0f0e9d5aa72ff43545d70fd87b16508855ef5 | [
"MIT"
] | permissive | absortium/poloniex-api | 0b674a6fe11c60263f596049d274a7f45095b989 | e1786e8edf9116990dc2291f343ed965e9d0f5ae | refs/heads/master | 2020-12-25T16:24:52.723504 | 2017-09-27T21:13:48 | 2017-09-27T21:13:48 | 51,717,220 | 99 | 40 | null | 2017-09-27T21:13:49 | 2016-02-14T22:27:07 | Python | UTF-8 | Python | false | false | 1,273 | py | import inspect
import logging
from functools import wraps
import pp
__author__ = 'andrew.shvv@gmail.com'
def get_prev_method_name():
return inspect.stack()[2][3]
def pretty_wrapper(func):
@wraps(func)
def decorator(msg, *args, **kwargs):
pretty_msg = "Func: %s\n" % get_prev_method_name()
if type(msg) == str:
pretty_msg += msg
else:
pretty_msg += pp.fmt(msg)
pretty_msg += "\n+ " + "- " * 30 + "+\n"
func(pretty_msg, *args, **kwargs)
return decorator
def wrap_logger(logger):
logger.info = pretty_wrapper(logger.info)
logger.debug = pretty_wrapper(logger.debug)
logger.warning = pretty_wrapper(logger.warning)
logger.exception = pretty_wrapper(logger.exception)
return logger
def getLogger(name, level=logging.DEBUG):
# create logger
logger = logging.getLogger(name)
logger = wrap_logger(logger)
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('\nLevel: %(levelname)s - %(name)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.setLevel(level)
return logger
| [
"andrew.shvv@gmail.com"
] | andrew.shvv@gmail.com |
8464fa813408dff6410bbc7f7c32ecb9dba9d7bc | 222367d17e0567a5d02a8391bc6954a57989b3eb | /main.py | 5dc926504d8fec28d4e21b9c0ed8f902f7240a38 | [] | no_license | turian/hydra-notebook | 8fe6a38aad2dda75ea4425b824fb527e8ca5d090 | faf499dd0c6ad0da75e9f03898fc08b731505502 | refs/heads/master | 2022-11-10T11:37:18.824841 | 2020-06-21T00:09:22 | 2020-06-21T00:09:22 | 273,799,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import hydra.experimental
hydra.experimental.initialize(config_path="conf")
#hydra.experimental.initialize_with_module(module="module", config_path="conf")
cfg=hydra.experimental.compose(config_name="config.yaml")
cfg=hydra.experimental.compose(config_name="config.yaml")
import module
| [
"turian@gmail.com"
] | turian@gmail.com |
882fbb1f343f673995324914119c91a1e80e22b5 | 039c5b793ace774bb815f4061a273ff098efd475 | /in_dev/send/sender/migrations/0002_documents.py | f567f03c6f4c0c553e1c78268b62e5823994e086 | [] | no_license | zzyzx4/soft | b7872a1c1e2dc91912f22aaaf96f2cedaf1423c1 | 264c399ddef2b55efd8a1a8b796320f72c6dec7c | refs/heads/master | 2022-12-16T20:50:45.512689 | 2019-07-01T11:38:12 | 2019-07-01T11:38:12 | 184,214,960 | 0 | 0 | null | 2022-12-08T05:07:18 | 2019-04-30T07:38:24 | null | UTF-8 | Python | false | false | 958 | py | # Generated by Django 2.2.1 on 2019-05-15 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sender', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Documents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Название')),
('description', models.CharField(max_length=100, verbose_name='Описание')),
('document', models.FileField(upload_to='Документы//%Y/%m/%d/%t')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Документ1',
'verbose_name_plural': 'Документы1',
},
),
]
| [
"dastik0101@gmail.com"
] | dastik0101@gmail.com |
436534c28628cdbcb5a0a11c881988de9e4ff2d4 | 93115f6d768793e70f496cf9c00e20f35d7b0978 | /manage.py | 233bdae7d0dba27fea07f41ff64191b1b6425849 | [] | no_license | RedSnip8/venu_menu.py | d884b888135edd8fee4e6a39ddd082e41a4a6109 | 59aa957c62288770eab9dbbfd859f7f751dab3bf | refs/heads/master | 2020-07-06T17:39:37.351890 | 2019-08-21T05:25:05 | 2019-08-21T05:25:05 | 203,093,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'venumenu.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"FCipolone@gmail.com"
] | FCipolone@gmail.com |
d863aa960bd859d9f48e63cb47e8b52f47519960 | 739b531f456ef13c04e437239a08c4ffac6b49f5 | /jl/bin/digg-tool | 4440f8b121458ed4421524ca4b973cba2460a0c9 | [] | no_license | bcampbell/journalisted | e2ec3a6f48bdf0bec4e6e5245c7975c23e77f07d | 0df05a829825e67c35e2963c1a6d53db5872e203 | refs/heads/master | 2021-01-21T05:01:59.155762 | 2016-06-14T04:18:13 | 2016-06-14T04:18:13 | 306,575 | 6 | 1 | null | 2013-07-19T04:37:45 | 2009-09-14T13:00:58 | PHP | UTF-8 | Python | false | false | 2,846 | #!/usr/bin/env python2.4
# 2008-03-19 BenC Initial version
#
# Scraper which looks for references to newspaper articles
# on digg.com and loads the number of diggs, comments etc
# into our database, populating the article_commentlink table.
#
import sys
from datetime import datetime
from optparse import OptionParser
sys.path.append( "../pylib" )
from digg import *
from JL import DB,ukmedia,CommentLink
# scraperfront used to map urls to article srcids
sys.path.append( "../scraper" )
import scrapefront
APPKEY = 'http://www.scumways.com'
domains = [
'independent.co.uk',
'dailymail.co.uk',
'mailonsunday.co.uk',
'express.co.uk',
'dailyexpress.co.uk',
'guardian.co.uk',
'mirror.co.uk',
'sundaymirror.co.uk',
'telegraph.co.uk',
'scotsman.com',
'ft.com',
'theherald.co.uk',
'thesun.co.uk',
'timesonline.co.uk',
'bbc.co.uk'
]
digg = Digg(APPKEY)
def FetchFromDigg( domain, total=500 ):
"""Try and find 'numentries' stories on Digg with the given domain"""
entries = []
got = 0
while got < total:
count = total-got
if count > 100:
count = 100
errcnt = 0
while 1:
try:
stories = digg.getStories( offset=got,count=count, domain=domain )
break
except Exception,err:
if isinstance( err, KeyboardInterrupt ):
raise
errcnt += 1
if errcnt >= 3:
ukmedia.DBUG( "digg-tool: ABORTING - too many errors\n" )
raise
print >>sys.stderr, sys.exc_info()
ukmedia.DBUG( "digg-tool: Retrying... (%d)\n" % (errcnt) )
if total > int(stories.total):
total = int(stories.total)
count = int( stories.count )
got += count
ukmedia.DBUG2( "digg-tool: %s: got %d/%d\n" % (domain,got,total) )
for s in stories:
e = {
'url': s.link,
'score': s.diggs,
'num_comments': s.comments,
'comment_url': s.href,
'source': 'digg',
# 'submitted': datetime.fromtimestamp( int( s.submit_date ) ),
}
entries.append(e)
return entries
def LoadEntries( conn, entries ):
"""Load fetched digg entries into the database"""
stats = CommentLink.Stats()
c = conn.cursor()
for e in entries:
srcid = scrapefront.CalcSrcID( e['url'] )
if not srcid:
# not handled
stats.not_handled += 1
continue
e['srcid'] = srcid
if CommentLink.AddCommentLink( conn, e ):
stats.matched += 1
else:
stats.missing += 1
return stats
def DoDomain( conn, domain ):
"""Fetch digg entries for domain and try to load them into db"""
entries = FetchFromDigg( domain )
stats = LoadEntries( conn, entries )
ukmedia.DBUG( "digg-tool: %s: %s\n" %( domain,stats.Report() ) )
return stats
def main():
conn = DB.Connect()
overallstats = CommentLink.Stats()
for d in domains:
stats = DoDomain( conn, d )
overallstats.Accumulate( stats )
ukmedia.DBUG( "digg-tool: overall: %s" % (overallstats.Report()) )
if __name__ == "__main__":
main()
| [
"ben@scumways.com"
] | ben@scumways.com | |
18ac3c356a6997616ffd316d126b96694524b264 | d73409535734a788af83a9b2b2e32dd1b979d5d2 | /proxySTAR_V3/certbot/certbot/tests/util.py | a36f0f6acfe177d279e2ea8b860f81c8a0fa4ebd | [
"Apache-2.0",
"MIT"
] | permissive | mami-project/lurk | adff1fb86cb3e478fe1ded4cbafa6a1e0b93bfdd | 98c293251e9b1e9c9a4b02789486c5ddaf46ba3c | refs/heads/master | 2022-11-02T07:28:22.708152 | 2019-08-24T19:28:58 | 2019-08-24T19:28:58 | 88,050,138 | 2 | 2 | NOASSERTION | 2022-10-22T15:46:11 | 2017-04-12T12:38:33 | Python | UTF-8 | Python | false | false | 8,987 | py | """Test utilities.
.. warning:: This module is not part of the public API.
"""
import multiprocessing
import os
import pkg_resources
import shutil
import tempfile
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import mock
import OpenSSL
from six.moves import reload_module # pylint: disable=import-error
from acme import jose
from certbot import constants
from certbot import interfaces
from certbot import storage
from certbot import util
from certbot.display import util as display_util
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
return pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
else: # pragma: no cover
raise ValueError("Loader could not be recognized based on extension")
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_comparable_cert(*names):
"""Load ComparableX509 cert."""
return jose.ComparableX509(load_cert(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def skip_unless(condition, reason): # pragma: no cover
"""Skip tests unless a condition holds.
This implements the basic functionality of unittest.skipUnless
which is only available on Python 2.7+.
:param bool condition: If ``False``, the test will be skipped
:param str reason: the reason for skipping the test
:rtype: callable
:returns: decorator that hides tests unless condition is ``True``
"""
if hasattr(unittest, "skipUnless"):
return unittest.skipUnless(condition, reason)
elif condition:
return lambda cls: cls
else:
return lambda cls: None
def make_lineage(config_dir, testfile):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
os.makedirs(directory)
sample_archive = vector_path('sample-archive')
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
class FreezableMock(object):
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior can be set and frozen so they cannot be
changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None):
self._frozen_set = set() if frozen else set(('freeze',))
self._func = func
self._mock = mock.MagicMock()
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
if self._frozen:
return setattr(self._mock, name, value)
elif name != '_frozen_set':
self._frozen_set.add(name)
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
for name in interfaces.IDisplay.names(): # pylint: disable=no-member
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return mock.MagicMock(return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
# pylint: disable=star-args
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def lock_and_call(func, lock_path):
"""Grab a lock for lock_path and call func.
:param callable func: object to call after acquiring the lock
:param str lock_path: path to file or directory to lock
"""
# Reload module to reset internal _LOCKS dictionary
reload_module(util)
# start child and wait for it to grab the lock
cv = multiprocessing.Condition()
cv.acquire()
child_args = (cv, lock_path,)
child = multiprocessing.Process(target=hold_lock, args=child_args)
child.start()
cv.wait()
# call func and terminate the child
func()
cv.notify()
cv.release()
child.join()
assert child.exitcode == 0
def hold_lock(cv, lock_path): # pragma: no cover
"""Acquire a file lock at lock_path and wait to release it.
:param multiprocessing.Condition cv: condition for synchronization
:param str lock_path: path to the file lock
"""
from certbot import lock
if os.path.isdir(lock_path):
my_lock = lock.lock_dir(lock_path)
else:
my_lock = lock.LockFile(lock_path)
cv.acquire()
cv.notify()
cv.wait()
my_lock.release()
| [
"diego.deaguilarcanellas@telefonica.com"
] | diego.deaguilarcanellas@telefonica.com |
827f27bc483bfd708ef207bef5a1bb0e2f15d46e | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/程序员练级+Never/Fun_Projects/cs173-python/cs173-python-master/design2/python/james-tests/emptyfun.py | 2066cff1b0af808ea95c1998937555d5cae7171d | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | def foo():
pass
print("calling foo...");
foo()
print("done");
| [
"643472092@qq.com"
] | 643472092@qq.com |
35ada6f333d683b572c085d7576aed98123320ff | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/secdev_scapy/scapy-master/scapy/modules/queso.py | 9c38f42735e3c27a6b92186d7b3e5755b162164d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,987 | py | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Clone of queso OS fingerprinting
"""
from scapy.data import KnowledgeBase
from scapy.config import conf
from scapy.layers.inet import IP,TCP
from scapy.error import warning
from scapy.volatile import RandInt
from scapy.sendrecv import sr
#from
conf.queso_base ="/etc/queso.conf"
#################
## Queso stuff ##
#################
def quesoTCPflags(flags):
if flags == "-":
return "-"
flv = "FSRPAUXY"
v = 0
for i in flags:
v |= 2**flv.index(i)
return "%x" % v
class QuesoKnowledgeBase(KnowledgeBase):
def lazy_init(self):
try:
f = open(self.filename)
except IOError:
return
self.base = {}
p = None
try:
for l in f:
l = l.strip()
if not l or l[0] == ';':
continue
if l[0] == '*':
if p is not None:
p[""] = name
name = l[1:].strip()
p = self.base
continue
if l[0] not in list("0123456"):
continue
res = l[2:].split()
res[-1] = quesoTCPflags(res[-1])
res = " ".join(res)
if not p.has_key(res):
p[res] = {}
p = p[res]
if p is not None:
p[""] = name
except:
self.base = None
warning("Can't load queso base [%s]", self.filename)
f.close()
queso_kdb = QuesoKnowledgeBase(conf.queso_base)
def queso_sig(target, dport=80, timeout=3):
p = queso_kdb.get_base()
ret = []
for flags in ["S", "SA", "F", "FA", "SF", "P", "SEC"]:
ans, unans = sr(IP(dst=target)/TCP(dport=dport,flags=flags,seq=RandInt()),
timeout=timeout, verbose=0)
if len(ans) == 0:
rs = "- - - -"
else:
s,r = ans[0]
rs = "%i" % (r.seq != 0)
if not r.ack:
r += " 0"
elif r.ack-s.seq > 666:
rs += " R" % 0
else:
rs += " +%i" % (r.ack-s.seq)
rs += " %X" % r.window
rs += " %x" % r.payload.flags
ret.append(rs)
return ret
def queso_search(sig):
p = queso_kdb.get_base()
sig.reverse()
ret = []
try:
while sig:
s = sig.pop()
p = p[s]
if p.has_key(""):
ret.append(p[""])
except KeyError:
pass
return ret
@conf.commands.register
def queso(*args,**kargs):
"""Queso OS fingerprinting
queso(target, dport=80, timeout=3)"""
return queso_search(queso_sig(*args, **kargs))
| [
"659338505@qq.com"
] | 659338505@qq.com |
784a4de2014ccf41f5665df5f57e6d53f1f2f561 | fd65851c7977176cfa69056ea5d63ca529e74271 | /components/diagnostics/diagnose_me/component.py | 3578702ae14a0c8acd4a14d5fa8bc69b61c74c57 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] | permissive | NikeNano/pipelines | dad9f45267a7f4c495a30880dd6fe1570f26fa64 | 73804f8928ce671839d34800627b6d3ea9f820a7 | refs/heads/master | 2022-01-29T21:24:43.693120 | 2021-11-20T18:18:35 | 2021-11-20T18:18:35 | 221,051,451 | 1 | 1 | Apache-2.0 | 2021-04-23T20:07:11 | 2019-11-11T19:11:29 | Python | UTF-8 | Python | false | false | 8,302 | py | # Copyright 2020 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple, Optional
def run_diagnose_me(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: list = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
""" Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
"""
# Installing pip3 and kfp, since the base image 'google/cloud-sdk:279.0.0'
# does not come with pip3 pre-installed.
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
# Check quota.
quota_dict = {} # Mapping from region to dict[metric, available]
for region_quota in quota_list.json_output:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
# Get the project ID
# from project configuration
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
# Get project buckets
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
# Get the root of the user provided bucket i.e. gs://root.
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
# Verify APIs that are required are enabled
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
# printing the results in stdout for logging purposes
print('%s %s' % (item['config']['name'], item['state']))
# Check if target apis are enabled
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
if __name__ == '__main__':
import kfp.components as comp
comp.func_to_container_op(
run_diagnose_me,
base_image='google/cloud-sdk:279.0.0',
output_component_file='component.yaml',
)
| [
"noreply@github.com"
] | NikeNano.noreply@github.com |
24a66109c4bda7c5668b7766a6f938bbafb68128 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/46/usersdata/74/18966/submittedfiles/funcoes1.py | d635ffd151a958560fc49a52a3187bbec41da256 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | # -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
i = 0
cont = 0
while (len(lista)-1)>=i:
if lista[i]<lista[i+1]:
cont=cont+1
i = i+1
if (len(lista)-1)==cont:
return 'S'
else:
return 'N'
def decrescente (lista1):
j = 0
cont1 = 0
while (len(lista1)-1)>=j:
if lista1[i]<lista1[j+1]:
cont1=cont1+1
j = j+1
if (len(lista1)-1)==cont1:
return 'S'
else:
return 'N'
def ciguais (lista2):
k = 0
cont2 = 0
while (len(lista2)-1)>=k:
if lista2[k]==lista2[k+1]:
cont2 = cont2+1
k = k+1
if cont2>0:
return 'S'
else:
return'N'
n = input('Digite o tamanho do vetor? ')
x = 1
y = 1
z = 1
a = []
b = []
c = []
while n>=x:
a.append(input('Digite os valores do vetor A: ')
x = x+1
while n>=y:
b.append(input('Digite os valores do vetor B: ')
y = y+1
while n>=z:
c.append(input('Digite os valores do vetor C: ')
z = z+1
crescente(a)
decrescente(a)
ciguais(a)
crescente(b)
decrescente(b)
ciguais(b)
crescente(c)
decrescente(c)
ciguais(c)
#escreva o programa principal
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
3323cd8116e2956ac0d1007bb69f9f4a201104df | a2bbd69fe69ec9a5737565b3b7325b5dcaaecf53 | /main/page/pe_add_product.py | 074f11ce24862b59e11eba0772961cbb3a95c473 | [] | no_license | SamWithWorld/selenium-2 | d945a03492548e8ee59bbb06d8c3bdb8593d8c54 | a575d7b3962a2754e69acb99cd48fe13dc62c6e5 | refs/heads/master | 2022-09-27T09:31:28.978249 | 2015-03-12T07:03:22 | 2015-03-12T07:03:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,693 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import os, time, sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'))
from base import BasePage
class addProduct(BasePage):
url = "https://www.tokopedia.com/product-add.pl"
#locators
_pname_loc = (By.ID, 'p-name')
_pdep1_loc = (By.ID, 'p-dep-1')
_pdep2_loc = (By.ID, 'p-dep-2')
_pdep3_loc = (By.ID, 'p-dep-3')
_pminorder_loc = (By.ID, 'p-min-order')
_pprice_loc = (By.ID, 'p-price')
_pweight_loc = (By.ID, 'p-weight')
_puploadto_loc = (By.ID, 'p-upload-to')
_mustinsurance_loc = (By.ID, 'must_insurance')
_pcondition_loc = (By.ID, 'p-condition')
_returnable_loc = (By.ID, 'returnable')
_pdescription_loc = (By.ID, 'p-description')
_submit_loc = (By.ID, 's-save-prod')
# dictionary
dict = {
"index_url" : "http://www.tokopedia.com/",
"email" : "tkpd.qc+18@gmail.com",
"password" : "imtokopedia91"
}
def open(self, url):
self.driver.get(url)
time.sleep(2)
def go_to_add_product(self):
self.open(self.dict['index_url'] + 'product-add.pl')
def add_to_product(self):
self.go_to_add_product()
try:
self.driver.find_element(By.ID, "p-name").send_keys("Product AB")
time.sleep(4)
self.choose_category()
self.driver.find_element(By.ID, "p-min-order").clear()
self.driver.find_element(By.ID, "p-min-order").send_keys(randint(1, 5))
self.driver.find_element(By.ID, "p-price").send_keys(randint(5000, 10000))
self.driver.find_element(By.ID, "p-weight").send_keys(randint(100, 250))
self.choose_upload_to()
self.driver.find_element(By.ID, "s-save-prod").submit()
except Exception as inst:
print(inst)
def choose_category(self):
try:
time.sleep(6)
self.driver.execute_script("document.getElementById('p-dep-1').style.display = '';")
time.sleep(6)
list_category_first = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-1']/option")
i = 0
while i < len(list_category_first):
if i == randint(0, len(list_category_first)-1):
list_category_first[i].click()
break
i += 1
time.sleep(6)
self.driver.execute_script("document.getElementById('p-dep-2').style.display = '';")
time.sleep(6)
list_category_second = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-2']/option")
i = 0
while i < len(list_category_second):
if i == randint(0, len(list_category_second)-1):
list_category_second[i].click()
break
i += 1
time.sleep(6)
self.driver.execute_script("document.getElementById('p-dep-3').style.display = '';")
time.sleep(6)
list_category_third = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-3']/option")
i = 0
while i < len(list_category_third):
if i == randint(0, len(list_category_third)-1):
list_category_third[i].click()
break
i += 1
except Exception as inst:
print(inst)
def choose_upload_to(self):
try:
time.sleep(6)
self.driver.execute_script("document.getElementById('p-upload-to').style.display = '';")
wait = WebDriverWait(self.driver, 10)
element = wait.until(EC.element_to_be_clickable((By.ID,'p-upload-to')))
time.sleep(6)
list_upload_to = self.driver.find_elements(By.XPATH, "//select[@id='p-upload-to']/option")
list_upload_to[0].click()
time.sleep(6)
self.driver.execute_script("document.getElementById('p-menu-id').style.display = '';")
time.sleep(6)
list_etalase = self.driver.find_elements(By.XPATH, "//select[@id='p-menu-id']/option")
i = 0
while i < len(list_etalase):
if i == randint(0, len(list_etalase)-1):
list_etalase[i].click()
break
i += 1
except Exception as inst:
print(inst) | [
"herman.wahyudi02@gmail.com"
] | herman.wahyudi02@gmail.com |
4b4679abcadd364adbc3b56bf6980cb1b8789d12 | 0a037e4ee03c5afbf6f58b7293fefab1cc6998cf | /project_2/RollingDice.py | 2c4e7dc005788ebed16ca65fa51fc394a5f1cded | [] | no_license | mingyyy/crash_course | 6ac2a41b14c821e96e3938047cb056ad2ce99280 | dad9f9b37ef3093dad25a0cb7fddf0e65fed3571 | refs/heads/master | 2020-04-24T14:24:43.283617 | 2019-12-25T07:43:05 | 2019-12-25T07:43:05 | 172,019,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | from random import randint
import pygal
class Die():
def __init__(self, num_sides=6):
# dice of 6 sides
self.num_sides = num_sides
def roll(self):
# return a random value between 1 and the number of sides
return randint(1, self.num_sides)
n = 10000
m = 2
d1 = 8
d2 = 8
die1 = Die(d1)
die2 = Die(d2)
results = []
for roll_num in range(n):
results.append(die1.roll() + die2.roll())
# print(results)
freq = []
for value in range(2, die1.num_sides + die2.num_sides + 1):
freq.append(results.count(value))
# print(freq)
# visualize the results
hist = pygal.Bar()
hist.title = f"Results of rolling two D{die1.num_sides} {n} times"
# 15-6, list comprehension
hist.x_labels = [i for i in range(1*m, d1+d2+1)]
hist.x_title = "Results"
hist.y_title = "Frequency of Result"
hist.add(f'D{d1} + D{d2}', freq)
# save to the current folder, open the svg with a browser
hist.render_to_file(f'dice_visual_{m}{d1}{d2}.svg')
| [
"j.yanming@gmail.com"
] | j.yanming@gmail.com |
b520f7db7409a921734c2c6347f277e85a5b12c0 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/6e4c690a37f2d01a3d6449cfbabcbf45b96d0310-<fetch_file>-fix.py | 0abe5d7bd70377b7a62337b97e29848916cd98bb | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py |
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv(('FETCH "%s" TO "%s"' % (in_path, out_path)), host=self._winrm_host)
buffer_size = (2 ** 19)
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = ('\n $path = "%(path)s"\n If (Test-Path -Path $path -PathType Leaf)\n {\n $buffer_size = %(buffer_size)d\n $offset = %(offset)d\n\n $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)\n $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null\n $buffer = New-Object -TypeName byte[] $buffer_size\n $bytes_read = $stream.Read($buffer, 0, $buffer_size)\n if ($bytes_read -gt 0) {\n $bytes = $buffer[0..($bytes_read - 1)]\n [System.Convert]::ToBase64String($bytes)\n }\n $stream.Close() > $null\n }\n ElseIf (Test-Path -Path $path -PathType Container)\n {\n Write-Host "[DIR]";\n }\n Else\n {\n Write-Error "$path does not exist";\n Exit 1;\n }\n ' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset))
display.vvvvv(('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset)), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if (result.status_code != 0):
raise IOError(to_native(result.std_err))
if (result.std_out.strip() == '[DIR]'):
data = None
else:
data = base64.b64decode(result.std_out.strip())
if (data is None):
makedirs_safe(out_path)
break
else:
if (not out_file):
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if (len(data) < buffer_size):
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError(('failed to transfer file to "%s"' % out_path))
finally:
if out_file:
out_file.close()
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
fbe2fb38035c098f1729f5fc9c642d658ef7bf9e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/signal/testcase/firstcases/testcase4_004.py | e8b43073fbae46b2b65501337b4b3f9efaa91db6 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.thoughtcrime.securesms',
'appActivity' : 'org.thoughtcrime.securesms.ConversationListActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.thoughtcrime.securesms/org.thoughtcrime.securesms.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"R322\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.thoughtcrime.securesms:id/sms_failed_indicator\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"4 min\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"R322\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().resourceId(\"org.thoughtcrime.securesms:id/contact_photo_image\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.thoughtcrime.securesms'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
fa8c20fc650e966a7c439fcf78f72ccfa51bcfd0 | 6669b132eb482f95c1f40d35ecae14a544fe9197 | /tree/no872.py | 2b4c2ee97838a1e8a4aef4db93b4708475064212 | [] | no_license | markdannel/leetcode | 94dade2e5a286d04075e70e48015459ea6ac383a | 6a2ac436599ecebc527efe0d6bfe0f6f825311fb | refs/heads/master | 2021-06-06T20:56:34.868122 | 2020-10-21T12:16:56 | 2020-10-21T12:16:56 | 140,668,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | # 请考虑一颗二叉树上所有的叶子,这些叶子的值按从左到右的顺序排列形成一个 叶值序列 。
# 3
# / \
# 5 1
# / \ / \
# 6 2 9 8
# / \
# 7 4
# 举个例子,如上图所示,给定一颗叶值序列为 (6, 7, 4, 9, 8) 的树。
# 如果有两颗二叉树的叶值序列是相同,那么我们就认为它们是 叶相似 的。
# 如果给定的两个头结点分别为 root1 和 root2 的树是叶相似的,则返回 true;否则返回 false 。
# 提示:
# 给定的两颗树可能会有 1 到 200 个结点。
# 给定的两颗树上的值介于 0 到 200 之间。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def fetchLeafNode(node):
if not node:
return []
res = []
res += fetchLeafNode(node.left)
if not node.left and not node.right:
res.append(node.val)
res += fetchLeafNode(node.right)
return res
return fetchLeafNode(root1) == fetchLeafNode(root2) | [
"wistion@foxmail.com"
] | wistion@foxmail.com |
2c79ac5a234ab0c1909ec5ed4fb2e050dfc7c112 | 9c58a1f594e18cee20128f2c8dad8257429b10d1 | /custom_business_reports/report/mapm_pbl_sales.py | d00089e3779c08973a081d7404eb9b5b448fc350 | [] | no_license | gastonfeng/Odoo-eBay-Amazon | e8919768b2a1500209f209ee3aecc7f2fb10cda7 | a9c4a8a7548b19027bc0fd904f8ae9249248a293 | refs/heads/master | 2022-04-05T00:23:50.483430 | 2020-02-19T04:58:56 | 2020-02-19T04:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | # -*- coding: utf-8 -*-
from odoo import tools
from odoo import api, fields, models
class MAMPPBLSales(models.Model):
_name = "mapm.pbl.sales.report"
_description = "mapm.pbl.sales.report"
_auto = False
_rec_name = 'date'
_order = 'date desc'
order_id = fields.Many2one('sale.order', 'Order', readonly=True)
item_id = fields.Char('Item', readonly=True)
name = fields.Char('LAD', readonly=True)
price_total = fields.Float('Price total', readonly=True)
date = fields.Datetime('Date Order', readonly=True)
@api.model_cr
def init(self):
# self._table = sale_report
tools.drop_view_if_exists(self.env.cr, self._table)
qry = """CREATE or REPLACE VIEW mapm_pbl_sales_report as (
SELECT row_number() OVER () AS id, sol.order_id as order_id, so.date_order - '4 hour'::interval as date,
sol.item_id, sol.name as name, sol.price_total as price_total
FROM public.sale_order_line sol
LEFT JOIN sale_order so ON sol.order_id = so.id
WHERE so.state IN ('sale','done') AND sol.item_id LIKE 'MAPM-PBL-%'
ORDER BY order_id, item_id
)"""
self.env.cr.execute(qry)
| [
"yjm@mail.ru"
] | yjm@mail.ru |
d3a937c8bafdeeaad095658e1450109e72bcd7bc | 62530422360aa0cb294cb208cbc7d21d282b18b5 | /test-1b/input_utils.py | 830a1d3799cca92271006521d459574d5937f330 | [] | no_license | ag8/capsule-b | d560d75d6204a41f4c8526a84fbdae614c9e47ff | fb3bdc9ebb66890fc3f6d06fd6d8e3335ae882f9 | refs/heads/master | 2021-04-15T08:35:27.283053 | 2018-04-17T05:55:08 | 2018-04-17T05:55:08 | 126,885,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,061 | py | import os, time
# import scipy
import numpy as np
import tensorflow as tf
import collections
from config import cfg
def load_mnist(path=cfg.dataset):
fd = open(os.path.join(path, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float)
fd = open(os.path.join(path, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.int32)
fd = open(os.path.join(path, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28, 28, 1)).astype(np.float)
fd = open(os.path.join(path, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.int32)
# normalize to 0 1 float
trX = trX / 255.
teX = teX / 255.
return trX, trY, teX, teY
def load_mmnist(path, samples_tr=200000, samples_te=10000):
mnist = {}
# train images
trX = np.fromfile(file=os.path.join(path, 'trX'), dtype=np.uint8)
mnist["trX"] = trX.reshape([samples_tr, 36, 36, 1]).astype(np.float32) / 255.
# test images
te0X = np.fromfile(file=os.path.join(path, 'te0X'), dtype=np.uint8)
mnist["te0X"] = te0X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te1X = np.fromfile(file=os.path.join(path, 'te1X'), dtype=np.uint8)
mnist["te1X"] = te1X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te2X = np.fromfile(file=os.path.join(path, 'te2X'), dtype=np.uint8)
mnist["te2X"] = te2X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te3X = np.fromfile(file=os.path.join(path, 'te3X'), dtype=np.uint8)
mnist["te3X"] = te3X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te4X = np.fromfile(file=os.path.join(path, 'te4X'), dtype=np.uint8)
mnist["te4X"] = te4X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te5X = np.fromfile(file=os.path.join(path, 'te5X'), dtype=np.uint8)
mnist["te5X"] = te5X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te6X = np.fromfile(file=os.path.join(path, 'te6X'), dtype=np.uint8)
mnist["te6X"] = te6X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te7X = np.fromfile(file=os.path.join(path, 'te7X'), dtype=np.uint8)
mnist["te7X"] = te7X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te8X = np.fromfile(file=os.path.join(path, 'te8X'), dtype=np.uint8)
mnist["te8X"] = te8X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR30 = np.fromfile(file=os.path.join(path, 'teR30X'), dtype=np.uint8)
mnist["teR30X"] = teR30.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR60 = np.fromfile(file=os.path.join(path, 'teR60X'), dtype=np.uint8)
mnist["teR60X"] = teR60.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR30R = np.fromfile(file=os.path.join(path, 'teR30RX'), dtype=np.uint8)
mnist["teR30RX"] = teR30R.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR60R = np.fromfile(file=os.path.join(path, 'teR60RX'), dtype=np.uint8)
mnist["teR60RX"] = teR60R.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
# train labels
trY = np.fromfile(file=os.path.join(path, 'trY'), dtype=np.int32)
mnist["trY"] = trY.reshape([samples_tr, 2])
# test labels
te0Y = np.fromfile(file=os.path.join(path, 'te0Y'), dtype=np.int32)
mnist["te0Y"] = te0Y.reshape([samples_te, 2])
te1Y = np.fromfile(file=os.path.join(path, 'te1Y'), dtype=np.int32)
mnist["te1Y"] = te1Y.reshape([samples_te, 2])
te2Y = np.fromfile(file=os.path.join(path, 'te2Y'), dtype=np.int32)
mnist["te2Y"] = te2Y.reshape([samples_te, 2])
te3Y = np.fromfile(file=os.path.join(path, 'te3Y'), dtype=np.int32)
mnist["te3Y"] = te3Y.reshape([samples_te, 2])
te4Y = np.fromfile(file=os.path.join(path, 'te4Y'), dtype=np.int32)
mnist["te4Y"] = te4Y.reshape([samples_te, 2])
te5Y = np.fromfile(file=os.path.join(path, 'te5Y'), dtype=np.int32)
mnist["te5Y"] = te5Y.reshape([samples_te, 2])
te6Y = np.fromfile(file=os.path.join(path, 'te6Y'), dtype=np.int32)
mnist["te6Y"] = te6Y.reshape([samples_te, 2])
te7Y = np.fromfile(file=os.path.join(path, 'te7Y'), dtype=np.int32)
mnist["te7Y"] = te7Y.reshape([samples_te, 2])
te8Y = np.fromfile(file=os.path.join(path, 'te8Y'), dtype=np.int32)
mnist["te8Y"] = te8Y.reshape([samples_te, 2])
teR30 = np.fromfile(file=os.path.join(path, 'teR30Y'), dtype=np.int32)
mnist["teR30Y"] = teR30.reshape([samples_te, 2])
teR60 = np.fromfile(file=os.path.join(path, 'teR60Y'), dtype=np.int32)
mnist["teR60Y"] = teR60.reshape([samples_te, 2])
teR30R = np.fromfile(file=os.path.join(path, 'teR30RY'), dtype=np.int32)
mnist["teR30RY"] = teR30R.reshape([samples_te, 2])
teR60R = np.fromfile(file=os.path.join(path, 'teR60RY'), dtype=np.int32)
mnist["teR60RY"] = teR60R.reshape([samples_te, 2])
return mnist
| [
"andrew2000g@gmail.com"
] | andrew2000g@gmail.com |
dd6825da044bffc2f8a198b2c0760f36a3143b3c | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil591.py | 331ab5964b4d9cb39ebddab5d46ce4593416f25a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | # qubit number=4
# total number=16
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += X(1) # number=8
prog += CNOT(0,1) # number=10
prog += X(1) # number=11
prog += H(1) # number=13
prog += CZ(0,1) # number=14
prog += H(1) # number=15
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil591.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
c5aec8a31ec607c4e166febd65af3588cb70872c | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-drds/aliyunsdkdrds/request/v20190123/DescribeDrdsInstanceDbMonitorRequest.py | 255282dcb1a0e419a4b6e5416b68bade5ef1804e | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,093 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class DescribeDrdsInstanceDbMonitorRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'DescribeDrdsInstanceDbMonitor','Drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_DbName(self):
return self.get_query_params().get('DbName')
def set_DbName(self,DbName):
self.add_query_param('DbName',DbName)
def get_Key(self):
return self.get_query_params().get('Key')
def set_Key(self,Key):
self.add_query_param('Key',Key) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
557cf22a3c0b34d01c3207e7ec3e3456814d148d | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-rms/huaweicloudsdkrms/v1/model/configuration_aggregator_resp.py | 7e3277665b36f94cf9e6f237eff00d1bee94f5fc | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 9,329 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ConfigurationAggregatorResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'aggregator_name': 'str',
'aggregator_id': 'str',
'aggregator_urn': 'str',
'aggregator_type': 'str',
'account_aggregation_sources': 'AccountAggregationSource',
'updated_at': 'str',
'created_at': 'str'
}
attribute_map = {
'aggregator_name': 'aggregator_name',
'aggregator_id': 'aggregator_id',
'aggregator_urn': 'aggregator_urn',
'aggregator_type': 'aggregator_type',
'account_aggregation_sources': 'account_aggregation_sources',
'updated_at': 'updated_at',
'created_at': 'created_at'
}
def __init__(self, aggregator_name=None, aggregator_id=None, aggregator_urn=None, aggregator_type=None, account_aggregation_sources=None, updated_at=None, created_at=None):
"""ConfigurationAggregatorResp
The model defined in huaweicloud sdk
:param aggregator_name: 资源聚合器名称。
:type aggregator_name: str
:param aggregator_id: 资源聚合器ID。
:type aggregator_id: str
:param aggregator_urn: 资源聚合器标识符。
:type aggregator_urn: str
:param aggregator_type: 聚合器类型。
:type aggregator_type: str
:param account_aggregation_sources:
:type account_aggregation_sources: :class:`huaweicloudsdkrms.v1.AccountAggregationSource`
:param updated_at: 资源聚合器更新时间。
:type updated_at: str
:param created_at: 资源聚合器创建时间。
:type created_at: str
"""
self._aggregator_name = None
self._aggregator_id = None
self._aggregator_urn = None
self._aggregator_type = None
self._account_aggregation_sources = None
self._updated_at = None
self._created_at = None
self.discriminator = None
if aggregator_name is not None:
self.aggregator_name = aggregator_name
if aggregator_id is not None:
self.aggregator_id = aggregator_id
if aggregator_urn is not None:
self.aggregator_urn = aggregator_urn
if aggregator_type is not None:
self.aggregator_type = aggregator_type
if account_aggregation_sources is not None:
self.account_aggregation_sources = account_aggregation_sources
if updated_at is not None:
self.updated_at = updated_at
if created_at is not None:
self.created_at = created_at
@property
def aggregator_name(self):
"""Gets the aggregator_name of this ConfigurationAggregatorResp.
资源聚合器名称。
:return: The aggregator_name of this ConfigurationAggregatorResp.
:rtype: str
"""
return self._aggregator_name
@aggregator_name.setter
def aggregator_name(self, aggregator_name):
"""Sets the aggregator_name of this ConfigurationAggregatorResp.
资源聚合器名称。
:param aggregator_name: The aggregator_name of this ConfigurationAggregatorResp.
:type aggregator_name: str
"""
self._aggregator_name = aggregator_name
@property
def aggregator_id(self):
"""Gets the aggregator_id of this ConfigurationAggregatorResp.
资源聚合器ID。
:return: The aggregator_id of this ConfigurationAggregatorResp.
:rtype: str
"""
return self._aggregator_id
@aggregator_id.setter
def aggregator_id(self, aggregator_id):
"""Sets the aggregator_id of this ConfigurationAggregatorResp.
资源聚合器ID。
:param aggregator_id: The aggregator_id of this ConfigurationAggregatorResp.
:type aggregator_id: str
"""
self._aggregator_id = aggregator_id
@property
def aggregator_urn(self):
"""Gets the aggregator_urn of this ConfigurationAggregatorResp.
资源聚合器标识符。
:return: The aggregator_urn of this ConfigurationAggregatorResp.
:rtype: str
"""
return self._aggregator_urn
@aggregator_urn.setter
def aggregator_urn(self, aggregator_urn):
"""Sets the aggregator_urn of this ConfigurationAggregatorResp.
资源聚合器标识符。
:param aggregator_urn: The aggregator_urn of this ConfigurationAggregatorResp.
:type aggregator_urn: str
"""
self._aggregator_urn = aggregator_urn
@property
def aggregator_type(self):
"""Gets the aggregator_type of this ConfigurationAggregatorResp.
聚合器类型。
:return: The aggregator_type of this ConfigurationAggregatorResp.
:rtype: str
"""
return self._aggregator_type
@aggregator_type.setter
def aggregator_type(self, aggregator_type):
"""Sets the aggregator_type of this ConfigurationAggregatorResp.
聚合器类型。
:param aggregator_type: The aggregator_type of this ConfigurationAggregatorResp.
:type aggregator_type: str
"""
self._aggregator_type = aggregator_type
@property
def account_aggregation_sources(self):
"""Gets the account_aggregation_sources of this ConfigurationAggregatorResp.
:return: The account_aggregation_sources of this ConfigurationAggregatorResp.
:rtype: :class:`huaweicloudsdkrms.v1.AccountAggregationSource`
"""
return self._account_aggregation_sources
@account_aggregation_sources.setter
def account_aggregation_sources(self, account_aggregation_sources):
"""Sets the account_aggregation_sources of this ConfigurationAggregatorResp.
:param account_aggregation_sources: The account_aggregation_sources of this ConfigurationAggregatorResp.
:type account_aggregation_sources: :class:`huaweicloudsdkrms.v1.AccountAggregationSource`
"""
self._account_aggregation_sources = account_aggregation_sources
@property
def updated_at(self):
"""Gets the updated_at of this ConfigurationAggregatorResp.
资源聚合器更新时间。
:return: The updated_at of this ConfigurationAggregatorResp.
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this ConfigurationAggregatorResp.
资源聚合器更新时间。
:param updated_at: The updated_at of this ConfigurationAggregatorResp.
:type updated_at: str
"""
self._updated_at = updated_at
@property
def created_at(self):
"""Gets the created_at of this ConfigurationAggregatorResp.
资源聚合器创建时间。
:return: The created_at of this ConfigurationAggregatorResp.
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ConfigurationAggregatorResp.
资源聚合器创建时间。
:param created_at: The created_at of this ConfigurationAggregatorResp.
:type created_at: str
"""
self._created_at = created_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConfigurationAggregatorResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
373c8a72664191552bc59b1eb1c3eda9b042f144 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_suntan.py | f7092f8f48d34221b8bcda80a4bb30ba17516561 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py |
#calss header
class _SUNTAN():
def __init__(self,):
self.name = "SUNTAN"
self.definitions = [u'pleasantly brown skin caused by being in hot sun: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
971a4fe1130c67c67676450d48c44d9c0423faff | ae12996324ff89489ded4c10163f7ff9919d080b | /LeetCodePython/LargestColorValueInaDirectedGraph.py | c4c26114bd04fca86a8152f91f6b3614eb7fbe72 | [] | no_license | DeanHe/Practice | 31f1f2522f3e7a35dc57f6c1ae74487ad044e2df | 3230cda09ad345f71bb1537cb66124ec051de3a5 | refs/heads/master | 2023-07-05T20:31:33.033409 | 2023-07-01T18:02:32 | 2023-07-01T18:02:32 | 149,399,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | """
There is a directed graph of n colored nodes and m edges. The nodes are numbered from 0 to n - 1.
You are given a string colors where colors[i] is a lowercase English letter representing the color of the ith node in this graph (0-indexed). You are also given a 2D array edges where edges[j] = [aj, bj] indicates that there is a directed edge from node aj to node bj.
A valid path in the graph is a sequence of nodes x1 -> x2 -> x3 -> ... -> xk such that there is a directed edge from xi to xi+1 for every 1 <= i < k. The color value of the path is the number of nodes that are colored the most frequently occurring color along that path.
Return the largest color value of any valid path in the given graph, or -1 if the graph contains a cycle.
Example 1:
Input: colors = "abaca", edges = [[0,1],[0,2],[2,3],[3,4]]
Output: 3
Explanation: The path 0 -> 2 -> 3 -> 4 contains 3 nodes that are colored "a" (red in the above image).
Example 2:
Input: colors = "a", edges = [[0,0]]
Output: -1
Explanation: There is a cycle from 0 to 0.
Constraints:
n == colors.length
m == edges.length
1 <= n <= 10^5
0 <= m <= 10^5
colors consists of lowercase English letters.
0 <= aj, bj < n
hints:
1 Use topological sort.
2 let dp[u][c] := the maximum count of vertices with color c of any path starting from vertex u.
"""
from collections import defaultdict, deque
from typing import List
class LargestColorValueInaDirectedGraph:
def largestPathValue(self, colors: str, edges: List[List[int]]) -> int:
res = visited = 0
n = len(colors)
dp = [[0] * 26 for _ in range(n)]
in_deg = defaultdict(int)
graph = defaultdict(list)
for s, e in edges:
graph[s].append(e)
in_deg[e] += 1
q = deque()
for i in range(n):
if in_deg[i] == 0:
q.append(i)
while q:
cur = q.popleft()
color = ord(colors[cur]) - ord('a')
print(color)
dp[cur][color] += 1
res = max(res, dp[cur][color])
visited += 1
for nb in graph[cur]:
for nb_color in range(26):
dp[nb][nb_color] = max(dp[nb][nb_color], dp[cur][nb_color])
in_deg[nb] -= 1
if in_deg[nb] == 0:
q.append(nb)
return res if visited == n else -1
| [
"tengda.he@gmail.com"
] | tengda.he@gmail.com |
0c074324b2e5f243477d914e86865f85b1de2e3d | 62179a165ec620ba967dbc20016e890978fbff50 | /nncf/tensorflow/quantization/init_range.py | 2d6bff0a405aa4fd16e1ccf112bfcdd01394e7a9 | [
"Apache-2.0"
] | permissive | openvinotoolkit/nncf | 91fcf153a96f85da166aacb7a70ca4941e4ba4a4 | c027c8b43c4865d46b8de01d8350dd338ec5a874 | refs/heads/develop | 2023-08-24T11:25:05.704499 | 2023-08-23T14:44:05 | 2023-08-23T14:44:05 | 263,687,600 | 558 | 157 | Apache-2.0 | 2023-09-14T17:06:41 | 2020-05-13T16:41:05 | Python | UTF-8 | Python | false | false | 11,386 | py | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from copy import deepcopy
from itertools import islice
from typing import List
import numpy as np
import tensorflow as tf
from nncf.common.logging.progress_bar import ProgressBar
from nncf.common.quantization.initialization.range import RangeInitCollectorParams
from nncf.common.quantization.initialization.range import RangeInitConfig
from nncf.common.quantization.initialization.range import RangeInitParams
from nncf.common.quantization.structs import QuantizerGroup
from nncf.common.scopes import should_consider_scope
from nncf.common.tensor_statistics.collectors import ReductionShape
from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
from nncf.config.schemata.defaults import MAX_PERCENTILE
from nncf.config.schemata.defaults import MIN_PERCENTILE
from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS
from nncf.tensorflow.layers.data_layout import get_channel_axis
from nncf.tensorflow.layers.operation import InputType
from nncf.tensorflow.layers.wrapper import NNCFWrapper
from nncf.tensorflow.quantization.layers import FakeQuantize
from nncf.tensorflow.tensor_statistics.collectors import TFMeanMinMaxStatisticCollector
from nncf.tensorflow.tensor_statistics.collectors import TFMeanPercentileStatisticCollector
from nncf.tensorflow.tensor_statistics.collectors import TFMedianMADStatisticCollector
from nncf.tensorflow.tensor_statistics.collectors import TFMinMaxStatisticCollector
from nncf.tensorflow.tensor_statistics.collectors import TFMixedMinMaxStatisticCollector
from nncf.tensorflow.tensor_statistics.collectors import TFPercentileStatisticCollector
from nncf.tensorflow.tensor_statistics.reduction import get_reduction_shape_activations
from nncf.tensorflow.tensor_statistics.reduction import get_reduction_shape_weights
from nncf.tensorflow.tensor_statistics.statistics import tf_convert_stat_to_min_max_tensor_stat
class TFRangeInitParams(RangeInitParams):
def get_max_num_init_steps(self) -> int:
steps = []
if self.global_init_config is not None:
steps.append(self.global_init_config.num_init_samples)
for pl_config in self.per_layer_range_init_configs:
steps.append(pl_config.num_init_samples)
batch_size = self.init_range_data_loader.batch_size
return math.ceil(max(steps) / batch_size)
def get_init_config_for_quantization_point(self, layer: tf.keras.layers.Layer, input_type: str) -> RangeInitConfig:
if input_type == InputType.WEIGHTS:
node_name = layer.name
group = QuantizerGroup.WEIGHTS
else:
node_name = layer.name.replace("/fake_quantize", "")
group = QuantizerGroup.ACTIVATIONS
return self.get_init_config_for_scope_and_group(node_name, group)
def get_init_config_for_scope_and_group(self, node_name: str, group: QuantizerGroup) -> RangeInitConfig:
matches = [] # type: List[RangeInitConfig]
for pl_config in self.per_layer_range_init_configs:
if should_consider_scope(
node_name, ignored_scopes=pl_config.ignored_scopes, target_scopes=pl_config.target_scopes
):
if group == pl_config.target_group or pl_config.target_group is None:
matches.append(
RangeInitConfig(
pl_config.init_type, pl_config.num_init_samples, pl_config.init_type_specific_params
)
)
if len(matches) > 1:
raise ValueError(
"Location {} matches more than one per-layer initialization parameter "
"definition!".format(str(node_name))
)
if len(matches) == 1:
return matches[0]
if not matches and self.global_init_config is not None:
return deepcopy(self.global_init_config)
raise ValueError(
"Location {} does not match any per-layer initialization parameter definition!".format(str(node_name))
)
class RangeInitializer:
def __init__(self, range_init_params: TFRangeInitParams):
self.range_init_params = range_init_params
self.dataset = range_init_params.init_range_data_loader
self.num_steps = range_init_params.get_max_num_init_steps()
self.nncf_quantization_operation_classes = NNCF_QUANTIZATION_OPERATIONS.registry_dict.values()
@staticmethod
def generate_stat_collector(
reduction_shape: ReductionShape,
collector_params: RangeInitCollectorParams,
init_config: RangeInitConfig,
num_samples_to_collect_override: int = None,
) -> TensorStatisticCollectorBase:
range_type = init_config.init_type
num_samples = init_config.num_init_samples
if num_samples_to_collect_override is not None:
num_samples = num_samples_to_collect_override
if range_type == "min_max":
return TFMinMaxStatisticCollector(collector_params.use_abs_max, reduction_shape, num_samples)
if range_type == "mixed_min_max":
return TFMixedMinMaxStatisticCollector(
collector_params.use_per_sample_stats(per_sample_stats=True),
collector_params.use_abs_max,
collector_params.use_means_of_mins,
collector_params.use_means_of_maxs,
reduction_shape,
num_samples,
)
if range_type == "mean_min_max":
return TFMeanMinMaxStatisticCollector(
collector_params.use_per_sample_stats(per_sample_stats=True),
collector_params.use_abs_max,
reduction_shape,
num_samples,
)
if range_type == "threesigma":
return TFMedianMADStatisticCollector(reduction_shape, num_samples)
if range_type == "percentile":
min_percentile = init_config.init_type_specific_params.get("min_percentile", MIN_PERCENTILE)
max_percentile = init_config.init_type_specific_params.get("max_percentile", MAX_PERCENTILE)
return TFPercentileStatisticCollector([min_percentile, max_percentile], reduction_shape, num_samples)
if range_type == "mean_percentile":
min_percentile = init_config.init_type_specific_params.get("min_percentile", MIN_PERCENTILE)
max_percentile = init_config.init_type_specific_params.get("max_percentile", MAX_PERCENTILE)
return TFMeanPercentileStatisticCollector([min_percentile, max_percentile], reduction_shape, num_samples)
raise ValueError(f"Range type {range_type} is not supported.")
def _register_layer_statistics(self, layer: tf.keras.layers.Layer, layer_statistics: list, handles: list):
channel_axes = get_channel_axis(InputType.INPUTS, "", layer)
init_config = self.range_init_params.get_init_config_for_quantization_point(layer, InputType.INPUTS)
is_weights = False
collector_params = RangeInitCollectorParams(is_weights, layer.mode, layer.per_channel)
per_sample_stats = init_config.init_type in ["mixed_min_max", "mean_min_max"]
reduction_shape = get_reduction_shape_activations(
layer, channel_axes, collector_params.use_per_sample_stats(per_sample_stats)
)
num_batches = int(np.ceil(init_config.num_init_samples / self.dataset.batch_size))
collector = RangeInitializer.generate_stat_collector(
reduction_shape, collector_params, init_config, num_batches
)
handles.append(layer.register_hook_pre_quantizer(collector.register_input))
layer.enabled = False
layer_statistics.append((layer, collector))
def _register_op_statistics(self, layer: tf.keras.layers.Layer, op_statistics: list, handles: list):
for weight_attr, ops in layer.weights_attr_ops.items():
for op_name, op in ops.items():
if op.__class__ in self.nncf_quantization_operation_classes:
channel_axes = get_channel_axis(InputType.WEIGHTS, weight_attr, layer)
init_config = self.range_init_params.get_init_config_for_quantization_point(
layer, InputType.WEIGHTS
)
is_weights = True
collector_params = RangeInitCollectorParams(is_weights, op.mode, op.per_channel)
reduction_shape = get_reduction_shape_weights(layer, weight_attr, channel_axes, op.per_channel)
# No need to store extra statistics in memory since weights won't change during range init
num_batches = 1
collector = RangeInitializer.generate_stat_collector(
reduction_shape, collector_params, init_config, num_batches
)
handles.append(op.register_hook_pre_call(collector.register_input))
op.enabled = False
op_statistics.append((layer, op_name, op, collector))
def run(self, model: tf.keras.Model) -> None:
layer_statistics = []
op_statistics = []
handles = []
for layer in model.layers:
if isinstance(layer, FakeQuantize):
self._register_layer_statistics(layer, layer_statistics, handles)
elif isinstance(layer, NNCFWrapper):
self._register_op_statistics(layer, op_statistics, handles)
for x, _ in ProgressBar(
islice(self.dataset, self.num_steps), total=self.num_steps, desc="Collecting tensor statistics/data"
):
model(x, training=False)
for layer, collector in layer_statistics:
target_stat = collector.get_statistics()
minmax_stats = tf_convert_stat_to_min_max_tensor_stat(target_stat)
layer.apply_range_initialization(tf.squeeze(minmax_stats.min_values), tf.squeeze(minmax_stats.max_values))
layer.enabled = True
for layer, op_name, op, collector in op_statistics:
weights = layer.get_operation_weights(op_name)
target_stat = collector.get_statistics()
minmax_stats = tf_convert_stat_to_min_max_tensor_stat(target_stat)
min_values = minmax_stats.min_values
if len(min_values.shape) != 1:
min_values = tf.squeeze(min_values)
max_values = minmax_stats.max_values
if len(max_values.shape) != 1:
max_values = tf.squeeze(max_values)
op.apply_range_initialization(weights, min_values, max_values)
op.enabled = True
for handle in handles:
handle.remove()
for x, _ in self.dataset:
model(x, training=False)
break
| [
"noreply@github.com"
] | openvinotoolkit.noreply@github.com |
f182b0390f018af7205452f0f5f35f9e85f0130c | 625daac7e73b98935f9fe93e647eb809b48b712e | /Challenges/checkEqualFrequency.py | beb465b5ca868a5cbcf16ba1eb2e11564169cb94 | [] | no_license | aleksaa01/codefights-codesignal | 19b2d70779cc60f62511b6f88ae5d049451eac82 | a57a5589ab2c9d9580ef44900ea986c826b23051 | refs/heads/master | 2022-03-15T04:46:40.356440 | 2019-12-08T15:41:37 | 2019-12-08T15:41:37 | 112,034,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
Given array of integers, check whether each integer, that occurs in it,
is contained there the same number of times as any other integer from the
given array.
"""
def checkEqualFrequency(inputArray):
if len(inputArray) > 40000:
return True
count = inputArray.count(inputArray[0])
for i in set(inputArray):
if inputArray.count(i) != count:
return False
return True
| [
"some12curious@gmail.com"
] | some12curious@gmail.com |
89c9bc62e1e65ff6b930e0fdc27eb8ab0c1b18fe | 054043e4b151459235c63cca32fc54e16ad4d619 | /manage.py | c6bdf3fd0df120d52341cc15a0106cc2bc7cff07 | [] | no_license | venugopalgodavarthi/model-26-6-2021 | 82c19d67cff39979ddefdd1b65004d55a3ad9afe | b742218f95ec393832c17c3201171789bf0bb4d0 | refs/heads/main | 2023-06-01T09:41:44.132863 | 2021-06-26T13:09:15 | 2021-06-26T13:09:15 | 380,504,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pro30.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"venugopalgodavarthi@gmail.com"
] | venugopalgodavarthi@gmail.com |
9e046b04afc103d5c8c01bdaa9d96e966cffef3f | dbbdf35bff726681ae34ad08eeda5f30929e2ae9 | /math/0x00-linear_algebra/12-bracin_the_elements.py | 22bcbb1c3b72f1ab9f9c4b303ab98863b5e86b99 | [] | no_license | jorgezafra94/holbertonschool-machine_learning | 0b7f61c954e5d64b1f91ec14c261527712243e98 | 8ad4c2594ff78b345dbd92e9d54d2a143ac4071a | refs/heads/master | 2023-02-03T20:19:36.544390 | 2020-12-21T21:49:10 | 2020-12-21T21:49:10 | 255,323,504 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/env python3
"""
using wise-element operations
"""
def np_elementwise(mat1, mat2):
"""
using Numpy wise-element operators
Methods of Numpy arrays or matrices
suma = np.add(mat1, mat2)
resta = np.subtract(mat1, mat2)
multi = np.multiply(mat1, mat2)
div = np.divide(mat1, mat2)
"""
suma = mat1 + mat2
resta = mat1 - mat2
multi = mat1 * mat2
div = mat1 / mat2
return (suma, resta, multi, div)
| [
"947@holbertonschool.com"
] | 947@holbertonschool.com |
55a62710c495ee1c00662cc4bad352b248617cd1 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/servicebus/azure-servicebus/azure/servicebus/aio/management/_utils.py | 0660b05a3a050fa3f60899b59ea31785903c1750 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 6,533 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import cast
from xml.etree.ElementTree import ElementTree
import urllib.parse as urlparse
from ...management import _constants as constants
from ...management._handle_response_error import _handle_response_error
# This module defines functions get_next_template and extract_data_template.
# Application code uses functools.partial to substantialize their params and builds an
# azure.core.async_paging.AsyncItemPaged instance with the two substantialized functions.
# The following is an ATOM feed XML list of QueueDescription with page size = 2.
# Tag <feed> has 2 (the page size) children <entry> tags.
# Tag <link rel="next" .../> tells the link to the next page.
# The whole XML will be deserialized into an XML ElementTree.
# Then model class QueueDescriptionFeed deserializes the ElementTree into a QueueDescriptionFeed instance.
# (QueueDescriptionFeed is defined in file ../../management/_generated/models/_models.py and _models_py3.py)
# Function get_next_template gets the next page of XML data like this one and returns the ElementTree.
# Function extract_data_template deserialize data from the ElementTree and provide link to the next page.
# azure.core.async_paging.AsyncItemPaged orchestrates the data flow between them.
# <feed xmlns="http://www.w3.org/2005/Atom">
# <title type="text">Queues</title>
# <id>https://servicebusname.servicebus.windows.net/$Resources/queues?$skip=0&$top=2&api-version=2017-04</id>
# <updated>2020-06-30T23:49:41Z</updated>
# <link rel="self" href="https://servicebusname.servicebus.windows.net/$Resources/queues?
# $skip=0&$top=2&api-version=2017-04"/>
# <link rel="next" href="https://servicebusname.servicebus.windows.net/$Resources/queues?
# %24skip=2&%24top=2&api-version=2017-04"/>
#
# <entry xml:base="https://servicebusname.servicebus.windows.net/$Resources/queues?
# $skip=0&$top=2&api-version=2017-04">
# <id>https://servicebusname.servicebus.windows.net/5?api-version=2017-04</id>
# <title type="text">5</title>
# <published>2020-06-05T00:24:34Z</published>
# <updated>2020-06-25T05:57:29Z</updated>
# <author>
# <name>servicebusname</name>
# </author>
# <link rel="self" href="../5?api-version=2017-04"/>
# <content type="application/xml">
# <QueueDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
# xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
# ...
# </QueueDescription>
# </content>
# </entry>
# <entry xml:base="https://servicebusname.servicebus.windows.net/$Resources/queues?
# $skip=0&$top=2&api-version=2017-04">
# <id>https://servicebusname.servicebus.windows.net/6?api-version=2017-04</id>
# <title type="text">6</title>
# <published>2020-06-15T19:49:35Z</published>
# <updated>2020-06-15T19:49:35Z</updated>
# <author>
# <name>servicebusname</name>
# </author>
# <link rel="self" href="../6?api-version=2017-04"/>
# <content type="application/xml">
# <QueueDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
# xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
# ...
# </QueueDescription>
# </content>
# </entry>
# </feed>
async def extract_data_template(feed_class, convert, feed_element):
"""A function that will be partialized to build a function used by AsyncItemPaged.
It deserializes the ElementTree returned from function `get_next_template`, returns data in an iterator and
the link to next page.
azure.core.async_paging.AsyncItemPaged will use the returned next page to call a partial function created
from `get_next_template` to fetch data of next page.
"""
deserialized = feed_class.deserialize(feed_element)
list_of_qd = [convert(x) if convert else x for x in deserialized.entry]
next_link = None
# when the response xml has two <link> tags, the 2nd if the next-page link.
if deserialized.link and len(deserialized.link) == 2:
next_link = deserialized.link[1].href
return next_link, iter(
list_of_qd
) # when next_page is None, AsyncPagedItem will stop fetch next page data.
async def extract_rule_data_template(feed_class, convert, feed_element):
"""Special version of function extrat_data_template for Rule.
Pass both the XML entry element and the rule instance to function `convert`. Rule needs to extract
KeyValue from XML Element and set to Rule model instance manually. The autorest/msrest serialization/deserialization
doesn't work for this special part.
After autorest is enhanced, this method can be removed.
Refer to autorest issue https://github.com/Azure/autorest/issues/3535
"""
deserialized = feed_class.deserialize(feed_element)
next_link = None
if deserialized.link and len(deserialized.link) == 2:
next_link = deserialized.link[1].href
if deserialized.entry:
list_of_entities = [
convert(*x) if convert else x
for x in zip(
feed_element.findall(constants.ATOM_ENTRY_TAG), deserialized.entry
)
]
else:
list_of_entities = []
return next_link, iter(list_of_entities)
async def get_next_template(
list_func, *args, start_index=0, max_page_size=100, **kwargs
):
"""Call list_func to get the XML data and deserialize it to XML ElementTree.
azure.core.async_paging.AsyncItemPaged will call `extract_data_template` and use the returned
XML ElementTree to call a partial function created from `extrat_data_template`.
"""
api_version = constants.API_VERSION
if args[0]: # It's next link. It's None for the first page.
queries = urlparse.parse_qs(urlparse.urlparse(args[0]).query)
start_index = int(queries[constants.LIST_OP_SKIP][0])
max_page_size = int(queries[constants.LIST_OP_TOP][0])
api_version = queries[constants.API_VERSION_PARAM_NAME][0]
with _handle_response_error():
feed_element = cast(
ElementTree,
await list_func(
skip=start_index, top=max_page_size, api_version=api_version, **kwargs
),
)
return feed_element
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
90cda9b891ed7f996babd72cf4f9b5c8bf58a64a | 87003211b07881fa747fe4fca0aa07f437f7b553 | /savanna/openstack/common/rpc/service.py | e9610c88090594de826ab67f1864a9421a96a933 | [
"Apache-2.0"
] | permissive | lookmee/savanna | 1215492c48173ec8e7423edb2896fcd2b7cbfa83 | 33cbdf6ef01e07fabe63bdbefb949012a9aadada | refs/heads/master | 2020-12-31T02:14:07.151864 | 2013-11-15T17:43:26 | 2013-11-15T17:43:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from savanna.openstack.common.gettextutils import _ # noqa
from savanna.openstack.common import log as logging
from savanna.openstack.common import rpc
from savanna.openstack.common.rpc import dispatcher as rpc_dispatcher
from savanna.openstack.common import service
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
self.serializer)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
| [
"slukjanov@mirantis.com"
] | slukjanov@mirantis.com |
5871f2151c438d680e6b7ec9fa1d2014af3d58a4 | f98a1d31ab3e82be724a03de9e468d07a7c65b5e | /medium/sortColors.py | bc4a523cdf4809171ed15c02d6a1f825553ef29e | [] | no_license | chaochaocodes/leetcode | bd618973483a88d2aa1d9ba3d1463e8d152877d4 | 087b4780e7c95fc780afd3266129c4975c68b321 | refs/heads/master | 2023-06-05T06:04:38.863389 | 2021-06-17T04:07:49 | 2021-06-17T04:07:49 | 294,852,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | '''
https://leetcode.com/problems/sort-colors/
Given an array nums with n objects colored red, white, or blue, sort them in-place so that objects of the same color are adjacent, with the colors in the order red, white, and blue.
We will use the integers 0, 1, and 2 to represent the color red, white, and blue, respectively.
'''
nums1 = [2,0,2,1,1,0]
# Output: [0,0,1,1,2,2]
nums2 = [2,0,1]
# Output: [0,1,2]
nums3 = [0]
# Output: [0]
nums4 = [1]
# Output: [1]
# Approach 1: 3-Pointer approach with Python swap
# "Python swap" unpacks tuple with comma operator and accesses elements in constant time
# One pass, O(n) time, O(1) space
def sortColors(nums):
"""
Do not return anything, modify nums in-place instead.
"""
runner = 0
left = 0
right = len(nums) - 1
while runner <= right:
if nums[runner] == 0:
nums[runner], nums[left] = nums[left], nums[runner]
runner += 1
left += 1
elif nums[runner] == 1:
runner += 1
else:
nums[runner], nums[right] = nums[right], nums[runner]
right -= 1
# print('END. runner: {}, l: {}, r: {}, nums: {}'.format(runner, left, right, nums))
return nums
# Approach 2: Python copy with slice syntax [:] to sort in-place
# One pass, O(n) time, O(1) space. Less efficient than 3-pointer because
# - slicing lists copies the references which costs you overhead memory.
# - concatenating two lists creates a new list in memory, complexity O(n+m)
def sortColors(nums):
count0, count1, count2 = 0, 0, 0
for i in nums:
if i == 0:
count0 += 1
elif i == 1:
count1 += 1
elif i == 2:
count2 += 1
nums[:] = [0]*count0 + [1]*count1 + [2]*count2
return nums
sortColors(nums1) | [
"57464564+chaochaocodes@users.noreply.github.com"
] | 57464564+chaochaocodes@users.noreply.github.com |
b143cf79d367b8b2843fc4dc1106a8f70c8df756 | 33c4bc9ca463ce0ec61945fca5841c9d8a18ab8e | /thrift/compiler/test/fixtures/qualified/gen-py3/module1/types.pyi | 3526c5f207a7649315a2d163c2aee5a44aaa8fc0 | [
"Apache-2.0"
] | permissive | gaurav1086/fbthrift | d54bb343bf1a8503dd329fbfcd0b46fe9f70754c | 68d1a8790bfd5b3974e1b966c8071f9c456b6c6a | refs/heads/master | 2020-12-27T22:41:09.452839 | 2020-02-03T23:56:20 | 2020-02-03T23:58:33 | 238,088,855 | 0 | 0 | Apache-2.0 | 2020-02-04T00:13:04 | 2020-02-04T00:13:03 | null | UTF-8 | Python | false | false | 2,465 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as __iobuf
import thrift.py3.types
import thrift.py3.exceptions
from thrift.py3.types import NOTSET, NOTSETTYPE
import typing as _typing
import sys
import itertools
__property__ = property
class Enum(thrift.py3.types.Enum):
ONE: Enum = ...
TWO: Enum = ...
THREE: Enum = ...
class Struct(thrift.py3.types.Struct, _typing.Hashable, _typing.Iterable[_typing.Tuple[str, _typing.Any]]):
def __init__(
self, *,
first: _typing.Optional[int]=None,
second: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
first: _typing.Union[int, NOTSETTYPE, None]=NOTSET,
second: _typing.Union[str, NOTSETTYPE, None]=NOTSET
) -> Struct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['Struct'], bytes]]: ...
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
def __bool__(self) -> bool: ...
def __hash__(self) -> int: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'Struct') -> bool: ...
def __gt__(self, other: 'Struct') -> bool: ...
def __le__(self, other: 'Struct') -> bool: ...
def __ge__(self, other: 'Struct') -> bool: ...
@__property__
def first(self) -> int: ...
@__property__
def second(self) -> str: ...
_List__EnumT = _typing.TypeVar('_List__EnumT', bound=_typing.Sequence[Enum])
class List__Enum(_typing.Sequence[Enum], _typing.Hashable):
def __init__(self, items: _typing.Sequence[Enum]=None) -> None: ...
def __repr__(self) -> str: ...
def __len__(self) -> int: ...
def __hash__(self) -> int: ...
def __contains__(self, x: object) -> bool: ...
def __copy__(self) -> _typing.Sequence[Enum]: ...
@_typing.overload
def __getitem__(self, i: int) -> Enum: ...
@_typing.overload
def __getitem__(self, s: slice) -> _typing.Sequence[Enum]: ...
def count(self, item: _typing.Any) -> int: ...
def index(self, item: _typing.Any, start: int = ..., stop: int = ...) -> int: ...
def __add__(self, other: _typing.Sequence[Enum]) -> 'List__Enum': ...
def __radd__(self, other: _List__EnumT) -> _List__EnumT: ...
def __reversed__(self) -> _typing.Iterator[Enum]: ...
def __iter__(self) -> _typing.Iterator[Enum]: ...
c1: Struct = ...
e1s: List__Enum = ...
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
04f783b4a4df38fece8fdcee10e4b9afc7e09de2 | 0ec046d7ad5b66bc14d5afaac178466f9f8e7073 | /config.py | da9665e8b3c9ed60bece06604c8618383058e7b7 | [] | no_license | MarsStirner/vesta | b7e7b9da9b6028acf1ea0cd7d6088037e95fef93 | 891b26ddfddfaebe145cf4c4a220fdb8c9f74fe0 | refs/heads/master | 2020-12-03T00:34:12.420995 | 2014-10-01T08:21:37 | 2014-10-01T08:21:37 | 96,043,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # -*- coding: utf-8 -*-
DEBUG = False
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 5000
SYSTEM_USER = 'vesta'
MODULE_NAME = 'vesta'
WTF_CSRF_ENABLED = True
SECRET_KEY = ''
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_USER = 'vesta_user'
MONGODB_PASSWORD = 'vesta_pwd'
MONGODB_DB = 'vesta'
SIMPLELOGS_URL = 'http://127.0.0.1:8080'
NSI_SOAP = 'http://nsi.rosminzdrav.ru/wsdl/SOAP-server.v2.php?wsdl'
NSI_TOKEN = ''
try:
from config_local import *
except ImportError:
# no local config found
pass
MONGODB_CONNECT_URI = 'mongodb://{user}:{password}@{host}/{database}'.format(user=MONGODB_USER,
password=MONGODB_PASSWORD,
host=MONGODB_HOST,
port=MONGODB_PORT,
database=MONGODB_DB) | [
"santipov@korusconsulting.ru"
] | santipov@korusconsulting.ru |
e675565a6db7627396edece81fd2bcaafcf387f4 | 8efb4caeafe2cfb024827ce194b5abae6fdfc9a4 | /test/functional/test_framework/siphash.py | 62d02f18535edfc6f28097327c5c332948720b6d | [
"MIT"
] | permissive | Worldcoin-Network/worldcoin | cd8ac9631154666cb11603d5f07e3a9dc2e1653a | 4f14d8baadda3f46363c26dc327a68b33f14e28c | refs/heads/master | 2022-03-04T01:50:14.783972 | 2021-10-26T15:21:47 | 2021-10-26T15:21:47 | 156,328,955 | 15 | 9 | MIT | 2021-05-10T16:58:07 | 2018-11-06T05:08:32 | C++ | UTF-8 | Python | false | false | 2,016 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Worldcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Specialized SipHash-2-4 implementations.
This implements SipHash-2-4 for 256-bit integers.
"""
def rotl64(n, b):
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
def siphash_round(v0, v1, v2, v3):
v0 = (v0 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 13)
v1 ^= v0
v0 = rotl64(v0, 32)
v2 = (v2 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 16)
v3 ^= v2
v0 = (v0 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 21)
v3 ^= v0
v2 = (v2 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 17)
v1 ^= v2
v2 = rotl64(v2, 32)
return (v0, v1, v2, v3)
def siphash256(k0, k1, h):
n0 = h & ((1 << 64) - 1)
n1 = (h >> 64) & ((1 << 64) - 1)
n2 = (h >> 128) & ((1 << 64) - 1)
n3 = (h >> 192) & ((1 << 64) - 1)
v0 = 0x736f6d6570736575 ^ k0
v1 = 0x646f72616e646f6d ^ k1
v2 = 0x6c7967656e657261 ^ k0
v3 = 0x7465646279746573 ^ k1 ^ n0
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n0
v3 ^= n1
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n1
v3 ^= n2
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n2
v3 ^= n3
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n3
v3 ^= 0x2000000000000000
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= 0x2000000000000000
v2 ^= 0xFF
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
return v0 ^ v1 ^ v2 ^ v3
| [
"quentin.neveu@hotmail.ca"
] | quentin.neveu@hotmail.ca |
05ec9e42a6699d7ed705714b7814116855fc61ac | a64d7e2814c296db3157f841f17ea73169d54405 | /minmarkets/models.py | d50f27284d6342b43dec2b0d053731836029e009 | [] | no_license | andrewseft/amjumfb | ad8ea0dc091355437cbf0984983550fe149300a5 | 6c027f47e320b7ad9c7e27d635335c9b260216a7 | refs/heads/master | 2023-05-04T23:19:07.942766 | 2021-05-25T15:39:16 | 2021-05-25T15:39:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | from cloudinary.models import CloudinaryField
from django.db import models
# Create your models here.
from accounts.models import Profile, upload_image_path
class LoanPackage(models.Model):
name = models.CharField(max_length=300, blank=True, null=True)
price = models.IntegerField(default=3000)
premium_package = models.BooleanField(default=True)
package_owner = models.CharField(max_length=300)
description = models.TextField()
product_code = models.CharField(null=True, blank=True, max_length=10)
image = CloudinaryField(upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.name
def image_tag(self):
from django.utils.html import mark_safe
return mark_safe('<img src="%s" width="150" height="200" />' % self.image.url)
image_tag.short_description = 'Package Image'
image_tag.allow_tags = True
class LoanCalculators(models.Model):
name = models.CharField(max_length=300, blank=True, null=True)
price = models.IntegerField(default=3000)
premium_package = models.BooleanField(default=True)
package_owner = models.CharField(max_length=300)
description = models.TextField()
file = models.CharField(max_length=300, blank=True, null=True, help_text="download link here!")
product_code = models.CharField(null=True, blank=True, max_length=10)
image = CloudinaryField(upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name = 'Loan Calculator'
verbose_name_plural = 'Loan Calculators'
def __str__(self):
return self.name
def image_tag(self):
from django.utils.html import mark_safe
return mark_safe('<img src="%s" width="100" height="100" />' % self.image.url)
image_tag.short_description = 'Package Image'
image_tag.allow_tags = True
class LoanCollectionPackage(models.Model):
name = models.CharField(max_length=300, blank=True, null=True)
price = models.IntegerField(default=3000)
premium_package = models.BooleanField(default=True)
package_owner = models.CharField(max_length=300)
description = models.TextField()
product_code = models.CharField(null=True, blank=True, max_length=10)
image = CloudinaryField(upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.name
def image_tag(self):
from django.utils.html import mark_safe
return mark_safe('<img src="%s" width="150" height="200" />' % self.image.url)
image_tag.short_description = 'Package Image'
image_tag.allow_tags = True
| [
"mathegeniuse@gmail.com"
] | mathegeniuse@gmail.com |
0dffae8f000cbe0ea6a09206c93f17ba9c7e5ea7 | b6475b69ae89f5a2ffb3c03c21d747bc6fddbdd2 | /user/migrations/0002_auto_20201202_1712.py | cb7d7046b236d287d3782a1113ddd610830b1b72 | [] | no_license | LeeSuHa98/14-2nd-SHABANG-backend | 3718516abc1a423da7e97d9363c61bfc7dd5ec4f | 13cc50c80aca273277bae8d8b15a1623b860ce55 | refs/heads/main | 2023-02-18T05:57:27.863525 | 2021-01-19T04:47:20 | 2021-01-19T04:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # Generated by Django 3.1.3 on 2020-12-02 17:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='number',
new_name='phone_number',
),
]
| [
"fergith@naver.com"
] | fergith@naver.com |
9454dc45a60ab83de31a7d603a042931f4f0e03e | 8a49aafeea46ded564dd2482350f82b4334436ed | /network/Auto_Deeplab/__init__.py | fd7bea4ba5e386f29d3d212818c9f6babb91e440 | [] | no_license | yifuxiong/Deeplab_pytorch | 1f96cd69a5597edc2021c24a5b88e462f67cb738 | 530809110156625945dfabd9b6dec0b2c0190415 | refs/heads/master | 2022-06-24T19:55:28.687829 | 2019-02-19T08:22:09 | 2019-02-19T08:22:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | # -*- coding: utf-8 -*-
"""
@Time : 2019/2/17 22:15
@Author : Wang Xin
@Email : wangxin_buaa@163.com
""" | [
"wangxin_buaa@163.com"
] | wangxin_buaa@163.com |
11a63de740fb4d5f7772abdb589d20dc2321c2ae | 2a6f1afa7678e5d76efe01b1474eda59d442ae0f | /venv/Lib/site-packages/jesse/indicators/vwma.py | 3d884a9f64880ba84a80dbc72e1b1bce906a4afd | [] | no_license | cagridincel/CagriTrade | 6b50c785efc3eb43487724be59511a5850a92145 | 86839e6604eb18850f6410acf5f6993da59b74ec | refs/heads/master | 2023-03-03T09:16:29.965177 | 2021-02-16T13:01:18 | 2021-02-16T13:01:18 | 338,672,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | from typing import Union
import numpy as np
import tulipy as ti
from jesse.helpers import get_candle_source
def vwma(candles: np.ndarray, period=20, source_type="close", sequential=False) -> Union[float, np.ndarray]:
"""
VWMA - Volume Weighted Moving Average
:param candles: np.ndarray
:param period: int - default: 20
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
res = ti.vwma(np.ascontiguousarray(source), np.ascontiguousarray(candles[:, 5]), period=period)
return np.concatenate((np.full((candles.shape[0] - res.shape[0]), np.nan), res), axis=0) if sequential else res[-1]
| [
"cagridincel@gmail.com"
] | cagridincel@gmail.com |
e0ea6925e3b151389aae2796fe99d07db9bb45fe | 6a6bae69fb39e7b236c0ee0abfe581ee59bb68be | /urls.py | 821c6961ddb6a2e026e807c77324ba0537835d34 | [] | no_license | taddeimania/tfb | 46b6360e5b93f9d93dc4badf5bf28dc0ed7aba36 | dee60801300acf4ba654f9c69573a0a0f9e4a4d3 | refs/heads/master | 2016-09-16T16:16:16.403711 | 2012-11-22T03:19:22 | 2012-11-22T03:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from tfb import views as base_views
from tfb.messages import views as message_views
from tfb.matchup import views as matchup_views
from tfb.player_card import views as player_card_views
from tfb.top_player_list import views as top_player_list
from tfb.draft import views as draft_views
from tfb.profile import views as profile_views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', base_views.AboutView.as_view()),
url(r'^about/$', base_views.AboutView.as_view()),
url(r'^home/$', base_views.HomeView.as_view(), name='home'),
url(r'^players/$', base_views.HomeView.as_view(), name='home'),
url(r'^blue/$', base_views.BlankView.as_view(), name='blank'),
url(r'^delete_account/$', profile_views.DeleteAccountView.as_view(), name='delete'),
url(r'^player/(?P<player_id>\w+)/$', player_card_views.PlayerPageView.as_view(), name='player'),
url(r'^uteam/(?P<team_id>\w+)/$', base_views.NotMyTeamView.as_view(), name='uteam'),
url(r'^uteam/$', base_views.MyTeamView.as_view()),
url(r'^myteam/$', login_required(base_views.MyTeamView.as_view()), name="myteam"),
url(r'^messages/$', message_views.MessageView.as_view(),name='message'),
url(r'^league/$', login_required(base_views.league_page),name='league'),
url(r'^league/(?P<week>\w+)/$', login_required(base_views.league_page),name='league'),
url(r'^leagueadmin/$', base_views.leagueadmin,name='leagueadmin'),
url(r'^leagueadmin/(?P<arg>\w+)/$', base_views.leagueadmin,name='leagueadmin'),
url(r'^login/$', 'django.contrib.auth.views.login'),
url(r'^logout/$', base_views.logout_user,name='logout_user'),
url(r'^profile/$', login_required(profile_views.ProfileView.as_view()), name='ProfileView'),
url(r'^profile/edit/$', profile_views.EditAccountView.as_view(), name='profileedit'),
url(r'^joinleague/$', base_views.joinleague,name='joinleague'),
url(r'^pickup/(?P<posid>\w+)/$', base_views.pickup,name='pickup'),
url(r'^list/(?P<posid>\w+)/$', base_views.list_player,name='list'),
url(r'^draft/$', draft_views.draftpage, name='draftpage'),
url(r'^drag/$', draft_views.drag_and_drop, name='draftpage'),
url(r'^matchup/$', login_required(matchup_views.MatchupPageView.as_view()), name='matchup'),
url(r'^matchup/(?P<matchup_id>\w+)/$', login_required(matchup_views.MatchupPageView.as_view()),name='matchup'),
url(r'^sysadmin/$', base_views.sysadmin,name='sysadmin'),
url(r'^sysadmin/(?P<arg>\w+)/(?P<argval>.*?)$', base_views.sysadmin,name='sysadmin'),
url(r'^admin/', include(admin.site.urls)),
url(r'^playerpage/$', top_player_list.playerpage),
url(r'^playernotfound/$', top_player_list.PlayerNotFound.as_view()),
url(r'^playerpage/(?P<arg>\w+)', top_player_list.playerpage),
url(r'^playerpage/(?P<arg>\w+)', top_player_list.playerpage),
url(r'^leaguelist/(?P<league_id>\w+)', base_views.league_list),
url(r'^transactions/$', base_views.transactions_page),
url(r'^accounts/', include('registration.backends.default.urls')),
)
| [
"jtaddei@gmail.com"
] | jtaddei@gmail.com |
b6c197d99eca65f0b1b77cd64e93e6af05231af1 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /explore/2020/august/Sort_Array_By_Parity.py | 599a6511b88e961531c076fcdd4fe199f8da353f | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | '''
https://leetcode.com/explore/featured/card/august-leetcoding-challenge/551/week-3-august-15th-august-21st/3431/
You are here!
Your runtime beats 96.99 % of python submissions.
'''
class Solution(object):
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
ans = []
for a in A:
if a % 2 == 0:
ans.append(a)
for a in A:
if a % 2 == 1:
ans.append(a)
return ans
if __name__ == '__main__':
A = [3,1,2,4]
# Output: [2,4,3,1]
# The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
print Solution().sortArrayByParity(A) | [
"838255715@qq.com"
] | 838255715@qq.com |
cbc6badcee7a608483c0c04aaa51e3dad1cd5c26 | 1388b4c7e7a896492c7953f8e4914b9818ad538c | /lessons_crawler/dao/lesson_dao.py | 5c88225ce83d82dc36ff822134571df3fb212f25 | [] | no_license | compcederj/lessons-crawler | e5b7658de4741ceb1c21f51a9835a19d4f8584fc | 2b4b0448f1fe3587d6a8f5af3254863c311ecb30 | refs/heads/main | 2023-01-21T01:50:06.337833 | 2020-11-29T23:13:33 | 2020-11-29T23:13:33 | 294,533,878 | 0 | 0 | null | 2020-11-29T23:12:01 | 2020-09-10T22:13:31 | Python | UTF-8 | Python | false | false | 1,401 | py | from datetime import timedelta
from typing import List
import click
from lessons_crawler.db import db
from lessons_crawler.models.lessons import Lesson
from lessons_crawler.models.subjects import Subject
class LessonDAO:
@staticmethod
def create_or_update(
subject: Subject, lesson_index: str, original_url: str, title: str, xml_file: str,
index_file: str, sync_file: str, mp4_video_file: str, webm_video_file: str, thumbnail: str,
length: timedelta = None) -> Lesson:
lesson = (
db.session.
query(Lesson).
filter(Lesson.subject_id == subject.id).
filter(Lesson.lesson_index == lesson_index).
first()
)
if not lesson:
lesson = Lesson()
lesson.lesson_index = lesson_index.replace("_", " ")
lesson.title = title
lesson.length = length
lesson.original_url = original_url
lesson.subject_id = subject.id
lesson.xml_file = xml_file
lesson.index_file = index_file
lesson.sync_file = sync_file
lesson.mp4_video_file = mp4_video_file
lesson.webm_video_file = webm_video_file
lesson.thumbnail = thumbnail
lesson.save()
return lesson
@staticmethod
def get_all() -> List[Lesson]:
lessons = db.session.query(Lesson).all()
return lessons
| [
"thiagoborges@id.uff.br"
] | thiagoborges@id.uff.br |
4bf04e81e4e02551cd90da3f5e55221a1a407668 | cfd2e1f12208dad79bc4b899e81ce1f7de84e80c | /Brian2_scripts/sim_brian_scratch/sim_brian_twenty_four_v1.py | 3245829707e2d1ef165c75d6fa0b22ddbeded811 | [] | no_license | zhouyanasd/DL-NC | 334adafdea1dd8c4c08c7efef3abc3b623344f0d | 396521096f65b27aa24efb1deda7b215876166b2 | refs/heads/master | 2023-03-22T04:57:19.790975 | 2023-03-14T08:57:01 | 2023-03-14T08:57:01 | 64,385,964 | 41 | 9 | null | 2023-02-15T17:52:34 | 2016-07-28T10:22:45 | Python | UTF-8 | Python | false | false | 12,301 | py | # ----------------------------------------
# new structure of LSM with inhibitory neuron STDP
# inhibitory neuron WTA and different spike patterns classification test
# multiple pre-train STDP and the distribution is different for different patterns
# local STDP and partial connection
# simulation 6--analysis 4
# ----------------------------------------
from brian2 import *
from brian2tools import *
from scipy.optimize import leastsq
import scipy as sp
from sklearn.preprocessing import MinMaxScaler
prefs.codegen.target = "numpy" # it is faster than use default "cython"
start_scope()
np.random.seed(102)
# ------define function------------
def lms_train(p0, Zi, Data):
def error(p, y, args):
l = len(p)
f = p[l - 1]
for i in range(len(args)):
f += p[i] * args[i]
return f - y
Para = leastsq(error, p0, args=(Zi, Data))
return Para[0]
def lms_test(M, p):
Data = []
for i in M:
Data.append(i)
l = len(p)
f = p[l - 1]
for i in range(len(Data)):
f += p[i] * Data[i]
return f
def readout(M, Z):
n = len(M)
Data = []
for i in M:
Data.append(i)
p0 = [1] * n
p0.append(0.1)
para = lms_train(p0, Z, Data)
return Data, para
def mse(y_test, y):
return sp.sqrt(sp.mean((y_test - y) ** 2))
def patterns_classification(duration, patterns, neu=1, interval_l=10, interval_s=ms, percent=0.2):
def tran_patterns(A, patterns, percent):
trans = []
for a in A:
# the data is in the middle of a sequence
for i in range(int(interval_l * percent)):
trans.append(0)
a_ = patterns[a]
for i in a_:
trans.append(int(i))
for i in range(int(interval_l * (1 - percent))):
trans.append(0)
return np.asarray(trans)
interval = interval_l + patterns.shape[1]
if (duration / interval_s) % interval != 0:
raise ("duration and interval+len(patterns) must be exacted division")
n = int((duration / interval_s) / interval)
label = np.random.randint(0, int(patterns.shape[0]), n)
seq = tran_patterns(label, patterns, percent)
times = where(seq == 1)[0] * interval_s
indices = zeros(int(len(times)))
P = SpikeGeneratorGroup(neu, indices, times)
return P, label
def label_to_obj(label, obj):
temp = []
for a in label:
if a == obj:
temp.append(1)
else:
temp.append(0)
return np.asarray(temp)
def classification(thea, data):
def normalization_min_max(arr):
arr_n = arr
for i in range(arr.size):
x = float(arr[i] - np.min(arr)) / (np.max(arr) - np.min(arr))
arr_n[i] = x
return arr_n
data_n = normalization_min_max(data)
data_class = []
for a in data_n:
if a >= thea:
b = 1
else:
b = 0
data_class.append(b)
return np.asarray(data_class), data_n
def ROC(y, scores, fig_title='ROC', pos_label=1):
def normalization_min_max(arr):
arr_n = arr
for i in range(arr.size):
x = float(arr[i] - np.min(arr)) / (np.max(arr) - np.min(arr))
arr_n[i] = x
return arr_n
scores_n = normalization_min_max(scores)
from sklearn import metrics
fpr, tpr, thresholds = metrics.roc_curve(y, scores_n, pos_label=pos_label)
roc_auc = metrics.auc(fpr, tpr)
fig = plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(fig_title)
plt.legend(loc="lower right")
return fig, roc_auc, thresholds
# sample 5 times as default because the beginning is always '0'
# the states are Normalized
def get_states(input, interval, duration, sample=5):
n = int(duration / interval)
t = np.arange(n) * interval
step = int(interval / sample)
temp = []
for i in range(n):
sum = np.sum(input[:, i * interval:(i + 1) * interval:step], axis=1)
temp.append(sum)
return MinMaxScaler().fit_transform(np.asarray(temp).T), t
###############################################
# -----parameter and model setting-------
obj = 1
patterns = np.array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 1, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 1, 0, 1, 0, 0, 0, 1]])
patterns_pre = patterns[obj][newaxis, :]
n = 20
pre_train_duration = 2000 * ms
duration = 2000 * ms
duration_test = 2000 * ms
pre_train_loop = 2
interval_l = 40
interval_s = ms
threshold = 0.5
sample = 5
t0 = int(duration / ((interval_l + patterns.shape[1]) * interval_s))
t1 = int((duration + duration_test) / ((interval_l + patterns.shape[1]) * interval_s))
equ = '''
r : 1
dv/dt = (I-v) / (3*ms*r) : 1 (unless refractory)
dg/dt = (-g)/(1.5*ms*r) : 1
dh/dt = (-h)/(1.45*ms*r) : 1
I = tanh(g-h)*20 : 1
'''
equ_read = '''
dg/dt = (-g)/(1.5*ms) : 1
dh/dt = (-h)/(1.45*ms) : 1
I = tanh(g-h)*20 : 1
'''
on_pre = '''
h+=w
g+=w
'''
model_STDP = '''
w : 1
wmax : 1
wmin : 1
Apre : 1
Apost = -Apre * taupre / taupost * 1.2 : 1
taupre : second
taupost : second
dapre/dt = -apre/taupre : 1 (clock-driven)
dapost/dt = -apost/taupost : 1 (clock-driven)
'''
on_pre_STDP = '''
h+=w
g+=w
apre += Apre
w = clip(w+apost, wmin, wmax)
'''
on_post_STDP = '''
apost += Apost
w = clip(w+apre, wmin, wmax)
'''
# -----neurons and synapses setting-------
P_plasticity, label_plasticity = patterns_classification(pre_train_duration, patterns_pre,
interval_l=interval_l, interval_s=interval_s)
P, label = patterns_classification(duration + duration_test, patterns,
interval_l=interval_l, interval_s=interval_s)
G = NeuronGroup(n, equ, threshold='v > 0.20', reset='v = 0', method='euler', refractory = 1 * ms,
name='neurongroup')
G_lateral_inh = NeuronGroup(1, equ, threshold='v > 0.20', reset='v = 0', method='euler', refractory = 1 * ms,
name='neurongroup_la_inh')
G2 = NeuronGroup(round(n / 4), equ, threshold='v > 0.20', reset='v = 0', method='euler', refractory = 1 * ms,
name='neurongroup_1')
G_readout = NeuronGroup(n, equ_read, method='euler')
S = Synapses(P_plasticity, G, 'w : 1', on_pre=on_pre, method='linear', name='synapses')
S2 = Synapses(G2, G, 'w : 1', on_pre=on_pre, method='linear', name='synapses_1')
S3 = Synapses(P_plasticity, G_lateral_inh, 'w : 1', on_pre=on_pre, method='linear', name='synapses_2')
S4 = Synapses(G, G, model_STDP, on_pre=on_pre_STDP, on_post=on_post_STDP, method='linear', name='synapses_3')
S5 = Synapses(G, G2, model_STDP, on_pre=on_pre_STDP, on_post=on_post_STDP, method='linear', name='synapses_4')
S6 = Synapses(G_lateral_inh, G, 'w : 1', on_pre=on_pre, method='linear', name='synapses_5')
S_readout = Synapses(G, G_readout, 'w = 1 : 1', on_pre=on_pre, method='linear')
# -------network topology----------
S.connect(j='k for k in range(int(n*0.1))')
S2.connect(p=0.3)
S3.connect()
S4.connect(p=0.3, condition='i != j')
S5.connect(p=0.3)
S6.connect(j='k for k in range(int(n*0.1))')
S_readout.connect(j='i')
S4.wmax = '0.5+rand()*0.4'
S5.wmax = '0.5+rand()*0.4'
S4.wmin = '0.2+rand()*0.3'
S5.wmin = '0.2+rand()*0.3'
S4.Apre = S5.Apre = '0.01'
S4.taupre = S4.taupost ='1*ms+rand()*9*ms'
S5.taupre = S5.taupost ='1*ms+rand()*9*ms'
S.w = '0.6+j*'+str(0.4/(n*0.1))
S2.w = '-0.4'
S3.w = '0.8'
S4.w = '0.3+rand()*0.5'
S5.w = '0.3+rand()*0.5'
S6.w = [-0.05,-0.2]
S4.delay = '3*ms'
S.delay = '3*ms'
G.r = '1'
G2.r = '1'
G_lateral_inh.r = '1'
# ------monitors setting----------------
m1 = StateMonitor(G_readout, ('I'), record=True, dt=ms)
m_w = StateMonitor(S5, 'w', record=True)
m_w2 = StateMonitor(S4, 'w', record=True)
m_s = SpikeMonitor(P)
m_g = StateMonitor(G, (['I', 'v']), record=True)
m_g2 = StateMonitor(G2, (['I', 'v']), record=True)
m_read = StateMonitor(G_readout, ('I'), record=True)
m_inh = StateMonitor(G_lateral_inh, ('v'), record=True)
# ------create network-------------
net = Network(collect())
net.store('first')
fig00 = plt.figure(figsize=(4, 4))
brian_plot(S.w)
fig0 = plt.figure(figsize=(4, 4))
brian_plot(S4.w)
###############################################
# ------pre_train------------------
for loop in range(pre_train_loop):
net.run(pre_train_duration, report= 'text')
# ------plot the weight----------------
fig2 = plt.figure(figsize=(10, 8))
title('loop: ' + str(loop))
subplot(211)
plot(m_w.t / second, m_w.w.T)
xlabel('Time (s)')
ylabel('Weight / gmax')
subplot(212)
plot(m_w2.t / second, m_w2.w.T)
xlabel('Time (s)')
ylabel('Weight / gmax')
net.store('second')
net.restore('first')
S4.w = net._stored_state['second']['synapses_3']['w'][0]
S5.w = net._stored_state['second']['synapses_4']['w'][0]
# -------change the input source----------
net.remove(P_plasticity)
S.source = P
S.pre.source = P
S._dependencies.remove(P_plasticity.id)
S.add_dependency(P)
S3.source = P
S3.pre.source = P
S3._dependencies.remove(P_plasticity.id)
S3.add_dependency(P)
# -------change the synapse model----------
S4.pre.code = '''
h+=w
g+=w
'''
S4.post.code = ''
S5.pre.code = '''
h+=w
g+=w
'''
S5.post.code = ''
###############################################
# ------run for lms_train-------
net.store('third')
net.run(duration, report='text')
# ------lms_train---------------
y = label_to_obj(label[:t0], obj)
states, _t_m = get_states(m1.I, int(interval_l + patterns.shape[1]), duration / interval_s, sample)
Data, para = readout(states, y)
#####################################
# ----run for test--------
net.restore('third')
net.run(duration + duration_test, report='text')
# -----lms_test-----------
obj_t = label_to_obj(label, obj)
states, t_m = get_states(m1.I, int(interval_l + patterns.shape[1]), (duration + duration_test) / interval_s, sample)
y_t = lms_test(states, para)
#####################################
# ------calculate results----
y_t_class, data_n = classification(threshold, y_t)
fig_roc_train, roc_auc_train, thresholds_train = ROC(obj_t[:t0], data_n[:t0], 'ROC for train')
print('ROC of train is %s for classification of %s' % (roc_auc_train, obj))
fig_roc_test, roc_auc_test, thresholds_test = ROC(obj_t[t0:], data_n[t0:], 'ROC for test')
print('ROC of test is %s for classification of %s' % (roc_auc_test, obj))
print(obj_t)
# ------vis of results----
fig1 = plt.figure(figsize=(20, 8))
subplot(211)
plt.scatter(t_m, y_t_class, s=2, color="red", marker='o', alpha=0.6)
plt.scatter(t_m, obj_t, s=3, color="blue", marker='*', alpha=0.4)
plt.scatter(t_m, data_n, color="green")
axhline(threshold, ls='--', c='r', lw=1)
axvline(duration / ms, ls='--', c='green', lw=3)
subplot(212)
plot(m_s.t / ms, m_s.i, '.k')
ylim(-0.5, 0.5)
fig3 = plt.figure(figsize=(20, 8))
subplot(211)
plt.plot(m_g.t / ms, m_g.v.T, label='v')
legend(labels=[('V_%s' % k) for k in range(n)], loc='upper right')
subplot(212)
plt.plot(m_g.t / ms, m_g.I.T, label='I')
legend(labels=[('I_%s' % k) for k in range(n)], loc='upper right')
fig4 = plt.figure(figsize=(20, 8))
subplot(311)
plt.plot(m_g2.t / ms, m_g2.v.T, label='v')
legend(labels=[('V_%s' % k) for k in range(n)], loc='upper right')
subplot(312)
plt.plot(m_g2.t / ms, m_g2.I.T, label='I')
legend(labels=[('I_%s' % k) for k in range(n)], loc='upper right')
subplot(313)
plt.plot(m_inh.t / ms, m_inh.v.T, label='v')
legend(labels=[('v_%s' % k) for k in range(n)], loc='upper right')
fig5 = plt.figure(figsize=(20, 4))
plt.plot(m_read.t / ms, m_read.I.T, label='I')
legend(labels=[('I_%s' % k) for k in range(n)], loc='upper right')
fig6 = plt.figure(figsize=(4, 4))
brian_plot(S4.w)
show()
| [
"zhouyanasd@gmail.com"
] | zhouyanasd@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.